gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import sys
sys.path.append('..')
import os
import json
from time import time
import numpy as np
from tqdm import tqdm
from matplotlib import pyplot as plt
from sklearn.externals import joblib
import theano
import theano.tensor as T
from theano.sandbox.cuda.dnn import dnn_conv, dnn_pool
from lib import activations
from lib import updates
from lib import inits
from lib.vis import color_grid_vis
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, deconv, dropout, l2normalize
from lib.metrics import nnc_score, nnd_score
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data, center_crop, patch
from lasagne.layers import InverseLayer
from scipy.misc import imread
from glob import glob
def transform(X):
X = [center_crop(x, npx) for x in X]
return floatX(X).transpose(0, 3, 1, 2)/127.5 - 1.
def inverse_transform(X):
X = (X.reshape(-1, nc, npx, npx).transpose(0, 2, 3, 1)+1.)/2.
return X
k = 1 # # of discrim updates for each gen update
l2 = 1e-5 # l2 weight decay
nvis = 196 # # of samples to visualize during training
b1 = 0.5 # momentum term of adam
nc = 3 # # of channels in image
nbatch = 100 # # of examples in batch
npx = 64 # # of pixels width/height of images
nz = 100 # # of dim for Z
ngf = 128 # # of gen filters in first conv layer
ndf = 64 # # of discrim filters in first conv layer
nx = npx*npx*nc # # of dimensions in X
niter = 20 # # of iter at starting learning rate
niter_decay = 0 # # of iter to linearly decay learning rate to zero
t = time()
npzfiles = glob(os.path.join('/data/dilin/dataset/lsun/', 'pickle', '*.npz'))
desc = 'steingan_mix'
model_dir = 'models/%s' % desc
samples_dir = 'samples/%s' % desc
dir_list = [model_dir, samples_dir]
for dir in dir_list:
if not os.path.exists(dir):
os.makedirs(dir)
print desc
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
tanh = activations.Tanh()
bce = T.nnet.binary_crossentropy
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
gain_ifn = inits.Normal(loc=1., scale=0.02)
bias_ifn = inits.Constant(c=0.)
gw = gifn((nz, ngf*8*4*4), 'gw')
gg = gain_ifn((ngf*8*4*4), 'gg')
gb = bias_ifn((ngf*8*4*4), 'gb')
gw2 = gifn((ngf*8, ngf*4, 5, 5), 'gw2')
gg2 = gain_ifn((ngf*4), 'gg2')
gb2 = bias_ifn((ngf*4), 'gb2')
gw3 = gifn((ngf*4, ngf*2, 5, 5), 'gw3')
gg3 = gain_ifn((ngf*2), 'gg3')
gb3 = bias_ifn((ngf*2), 'gb3')
gw4 = gifn((ngf*2, ngf, 5, 5), 'gw4')
gg4 = gain_ifn((ngf), 'gg4')
gb4 = bias_ifn((ngf), 'gb4')
gwx = gifn((ngf, nc, 5, 5), 'gwx')
aew1 = difn((ndf, nc, 3, 3), 'aew1')
aew2 = difn((ndf*2, ndf, 5, 5), 'aew2')
aew3 = difn((ndf*2, ndf*2, 3, 3), 'aew3')
aew4 = difn((ndf*4, ndf*2, 5, 5), 'aew4')
aew5 = difn((ndf*4, ndf*4, 3, 3), 'aew5')
aew6 = difn((ndf*4, ndf*4, 4, 4), 'aew6')
aeg2 = gain_ifn((ndf*2), 'aeg2')
aeb2 = bias_ifn((ndf*2), 'aeb2')
aeg3 = gain_ifn((ndf*2), 'aeg3')
aeb3 = bias_ifn((ndf*2), 'aeb3')
aeg4 = gain_ifn((ndf*4), 'aeg4')
aeb4 = bias_ifn((ndf*4), 'aeb4')
aeg5 = gain_ifn((ndf*4), 'aeg5')
aeb5 = bias_ifn((ndf*4), 'aeb5')
aeg6 = gain_ifn((ndf*4), 'aeg6')
aeb6 = bias_ifn((ndf*4), 'aeb6')
aeg6t = gain_ifn((ndf*4), 'aeg6t')
aeb6t = bias_ifn((ndf*4), 'aeb6t')
aeg5t = gain_ifn((ndf*4), 'aeg5t')
aeb5t = bias_ifn((ndf*4), 'aeb5t')
aeg4t = gain_ifn((ndf*2), 'aeg4t')
aeb4t = bias_ifn((ndf*2), 'aeb4t')
aeg3t = gain_ifn((ndf*2), 'aeg3t')
aeb3t = bias_ifn((ndf*2), 'aeb3t')
aeg2t = gain_ifn((ndf), 'aeg2t')
aeb2t = bias_ifn((ndf), 'aeb2t')
gen_params = [gw, gg, gb, gw2, gg2, gb2, gw3, gg3, gb3, gw4, gg4, gb4, gwx]
discrim_params = [ aew1, aew2, aew3, aew4, aew5, aew6, aeg2, aeb2, aeg3, aeb3, aeg4, aeb4, aeg5, aeb5, aeg6, aeb6, aeg2t, aeb2t, aeg3t, aeb3t, aeg4t, aeb4t, aeg5t, aeb5t, aeg6t, aeb6t]
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
h = h.reshape((h.shape[0], ngf*8, 4, 4))
h2 = relu(batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
h4 = relu(batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
x = tanh(deconv(h4, wx, subsample=(2, 2), border_mode=(2, 2)))
return x
def discrim(X):
current_input = dropout(X, 0.3)
### encoder ###
cv1 = relu(dnn_conv(current_input, aew1, subsample=(1,1), border_mode=(1,1)))
cv2 = relu(batchnorm(dnn_conv(cv1, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2, b=aeb2))
cv3 = relu(batchnorm(dnn_conv(cv2, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3, b=aeb3))
cv4 = relu(batchnorm(dnn_conv(cv3, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4, b=aeb4))
cv5 = relu(batchnorm(dnn_conv(cv4, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5, b=aeb5))
cv6 = relu(batchnorm(dnn_conv(cv5, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6, b=aeb6))
### decoder ###
dv6 = relu(batchnorm(deconv(cv6, aew6, subsample=(4,4), border_mode=(0,0)), g=aeg6t, b=aeb6t))
dv5 = relu(batchnorm(deconv(dv6, aew5, subsample=(1,1), border_mode=(1,1)), g=aeg5t, b=aeb5t))
dv4 = relu(batchnorm(deconv(dv5, aew4, subsample=(4,4), border_mode=(2,2)), g=aeg4t, b=aeb4t))
dv3 = relu(batchnorm(deconv(dv4, aew3, subsample=(1,1), border_mode=(1,1)), g=aeg3t, b=aeb3t))
dv2 = relu(batchnorm(deconv(dv3, aew2, subsample=(4,4), border_mode=(2,2)), g=aeg2t, b=aeb2t))
dv1 = tanh(deconv(dv2, aew1, subsample=(1,1), border_mode=(1,1)))
rX = dv1
mse = T.sqrt(T.sum(T.abs_(T.flatten(X-rX, 2)),axis=1)) + T.sqrt(T.sum(T.flatten((X-rX)**2, 2), axis=1))
return T.flatten(cv6, 2), rX, mse
def rbf_kernel(X0):
XY = T.dot(X0, X0.transpose())
x2 = T.reshape(T.sum(T.square(X0), axis=1), (X0.shape[0], 1))
X2e = T.repeat(x2, X0.shape[0], axis=1)
H = T.sub(T.add(X2e, X2e.transpose()), 2 * XY)
V = H.flatten()
# median distance
h = T.switch(T.eq((V.shape[0] % 2), 0),
# if even vector
T.mean(T.sort(V)[ ((V.shape[0] // 2) - 1) : ((V.shape[0] // 2) + 1) ]),
# if odd vector
T.sort(V)[V.shape[0] // 2])
h = T.sqrt(0.5 * h / T.log(X0.shape[0].astype('float32') + 1.0)) / 2.
Kxy = T.exp(-H / h ** 2 / 2.0)
neighbors = T.argsort(H, axis=1)[:, 1]
return Kxy, neighbors, h
def svgd_gradient(X0):
hidden, _, mse = discrim(X0)
grad = -1.0 * T.grad( mse.sum(), X0)
kxy, neighbors, h = rbf_kernel(hidden) #TODO
coff = T.exp( - T.sum((hidden[neighbors] - hidden)**2, axis=1) / h**2 / 2.0 )
v = coff.dimshuffle(0, 'x') * (-hidden[neighbors] + hidden) / h**2
X1 = X0[neighbors]
hidden1, _, _ = discrim(X1)
dxkxy = T.Lop(hidden1, X1, v)
#svgd_grad = (T.dot(kxy, T.flatten(grad, 2)).reshape(dxkxy.shape) + dxkxy) / T.sum(kxy, axis=1).dimshuffle(0, 'x', 'x', 'x')
svgd_grad = grad + dxkxy / 2.
return grad, svgd_grad, dxkxy
X = T.tensor4() # data
X0 = T.tensor4() # vgd samples
deltaX = T.tensor4() #vgd gradient
Z = T.matrix()
epsilon = T.tensor4()
### define discriminative cost ###
_, rX_data, mse_data = discrim(X)
_, rX_vgd, mse_vgd = discrim(X0)
balance_weight = sharedX(0.3)
d_cost = T.mean(mse_data - balance_weight * mse_vgd)
gX = gen(Z, *gen_params)
g_cost = -1 * T.sum(T.sum(T.mul(gX, deltaX), axis=1))#update generate models by minimize reconstruct mse
d_lr = 5e-4
g_lr = 1e-3
d_lrt = sharedX(d_lr)
g_lrt = sharedX(g_lr)
d_updater = updates.Adam(lr=d_lrt, b1=b1, regularizer=updates.Regularizer(l2=l2))
g_updater = updates.Adam(lr=g_lrt, b1=b1, regularizer=updates.Regularizer(l2=l2))
d_updates = d_updater(discrim_params, d_cost)
g_updates = g_updater(gen_params, g_cost)
print 'COMPILING'
t = time()
_gen = theano.function([Z], gX)
_discrim = theano.function([X], discrim(X))
_train_d = theano.function([X, X0], d_cost, updates=d_updates)
_train_g = theano.function([Z, deltaX], g_cost, updates=g_updates)
_ae = theano.function(inputs=[X], outputs=rX_data)
_svgd_gradient = theano.function([X0], svgd_gradient(X0))
_reconstruction_cost = theano.function([X], T.mean(mse_data))
print '%.2f seconds to compile theano functions'%(time()-t)
sample_zmb = floatX(np_rng.uniform(-1., 1., size=(nvis, nz)))
n_updates = 0
t = time()
for epoch in range(1, niter+1):
for filename in npzfiles:
print filename
batch_data = shuffle(np.load(filename)['images'].astype(theano.config.floatX))
for idx in tqdm(xrange(0, batch_data.shape[0] // nbatch)):
imb = transform(batch_data[idx*nbatch:(idx+1)*nbatch])
zmb = floatX(np_rng.uniform(-1., 1., size=(imb.shape[0], nz)))
samples = floatX(_gen(zmb))
grad, vgd_grad, dxkxy = _svgd_gradient(samples)
_train_g(zmb, floatX(vgd_grad))
_train_d(imb, samples)
n_updates += 1
cost_batch_vgd = _reconstruction_cost(floatX(samples))
cost_batch_data = _reconstruction_cost(imb)
if n_updates % 50 == 0:
print desc, cost_batch_data, cost_batch_vgd
if cost_batch_data > cost_batch_vgd:
d_lrt.set_value(5e-4)
else:
d_lrt.set_value(1e-4)
color_grid_vis(inverse_transform(_ae(imb)), (10, 10), 'samples/%s/ae-%d.png'%(desc, epoch))
samples = np.asarray(_gen(sample_zmb))
color_grid_vis(inverse_transform(samples), (14, 14), 'samples/%s/gan-%d.png' % (desc, epoch))
if epoch % 2 == 0:
joblib.dump([p.get_value() for p in gen_params], 'models/%s/%d_gen_params.jl'%(desc, epoch))
joblib.dump([p.get_value() for p in discrim_params], 'models/%s/%d_discrim_params.jl'%(desc, epoch))
print '%.2f seconds to train the generative model' % (time()-t)
print 'DONE'
|
|
#!/usr/bin/python
logo = """
_________ .__ __ __
\_ ___ \_______|__|/ |__/ |_ ___________________
/ \ \/\_ __ \ \ __\ __\/ __ \_ __ \___ /
\ \____| | \/ || | | | \ ___/| | \// /
\______ /|__| |__||__| |__| \___ >__| /_____ |
\/ \/ \/
Input: text file with hash, ip, or domain on each line
Output: JSON CRITS response, Fidelis Feed Format, and CRITS URLs
Opens CRITS browser tabs unless -q is specified. You must have a valid CRITS session logged into your browser.
"""
print logo
import json
import datetime
import optparse
import webbrowser
import time
import urllib
import urllib2
import os
try:
import simplejson
except ImportError:
print "Couldnt import simplejson. \n Try: 'sudo pip install simplejson'"
try:
import requests
except ImportError:
print "Couldnt import requests. \n Try: 'sudo pip install requests and sudo pip install requests[security]'"
dict = {}
vSleep = 1
#FUNCTIONS
def cDomain(i, src, buck):
try:
url = 'https://' + SERVER + '/api/v1/domains/'
data = {
'api_key': APIKEY,
'username': USERNAME,
'domain': i,
'source': src,
'bucket_list': buck,
}
r = requests.post(url, data=data, verify=False, timeout=10)
time.sleep(vSleep)
params = {
'api_key': APIKEY,
'username': USERNAME,
'c-domain': i,
}
r = requests.get(url, params=params, verify=False, timeout=10)
parsed = json.loads(r.content)
print json.dumps(parsed, indent=4, sort_keys=True)
except Exception as e:
print 'Exception:', e.message
def cIP(i, src, buck):
try:
url = 'https://' + SERVER + '/api/v1/ips/'
data = {
'api_key': APIKEY,
'username': USERNAME,
'ip': i,
'ip_type': "Address - ipv4-addr",
'source': src,
'bucket_list': buck,
}
r = requests.post(url, data=data, verify=False, timeout=10)
time.sleep(vSleep)
params = {
'api_key': APIKEY,
'username': USERNAME,
'c-ip': i,
}
r = requests.get(url, params=params, verify=False, timeout=10)
parsed = json.loads(r.content)
print json.dumps(parsed, indent=4, sort_keys=True)
except Exception as e:
print 'Exception:', e.message
def cmd5(i, src, buck):
try:
url = 'https://' + SERVER + '/api/v1/samples/'
data = {
'api_key': APIKEY,
'username': USERNAME,
'md5': i,
'upload_type': "metadata",
'source': src,
'bucket_list': buck,
}
r = requests.post(url, data=data, verify=False, timeout=10)
time.sleep(vSleep)
params = {
'api_key': APIKEY,
'username': USERNAME,
'c-md5': i,
}
r = requests.get(url, params=params, verify=False, timeout=10)
parsed = json.loads(r.content)
print json.dumps(parsed, indent=4, sort_keys=True)
except Exception as e:
print 'Exception:', e.message
def cCampaign(camp, desc, buck):
try:
url = 'https://' + SERVER + '/api/v1/campaigns/'
data = {
'api_key': APIKEY,
'username': USERNAME,
'name': camp,
'description': desc,
'bucket_list': buck,
}
r = requests.post(url, data=data, verify=False, timeout=10)
time.sleep(vSleep)
params = {
'api_key': APIKEY,
'username': USERNAME,
'c-campaign.name': camp,
}
r = requests.get(url, params=params, verify=False, timeout=10)
parsed = json.loads(r.content)
print json.dumps(parsed, indent=4, sort_keys=True)
except Exception as e:
print 'Exception:', e.message
def cRelationship(ltype, lid, rtype, rid, reltype):
try:
url = 'https://' + SERVER + '/api/v1/relationships/'
data = {
'api_key': APIKEY,
'username': USERNAME,
'left_type': ltype,
'left_id': lid,
'right_type': rtype,
'right_id': rid,
'rel_type': reltype,
}
print str(data)
r = requests.post(url, data=data, verify=False, timeout=10)
time.sleep(vSleep)
except Exception as e:
print 'Exception:', e.message
#MAIN
def main():
parser = optparse.OptionParser('usage python critterz.py <-m mode> <-f filename> [-s intelSource] [-d feedDescription] [-b bucket] [ -q ] \n\n example: python critterz.py -m ip -f ip.txt -s TRT -d "Evil related" -b EvilMalware -c EvilCampaign')
parser.add_option('-m', '--mode', dest='mode', type='string', help='required specify CRITs mode of either < domain | ip | hash >')
parser.add_option('-f', '--filename', dest='filename', type='string', help='required specify filename')
parser.add_option('-s', '--source', dest='source', type='string', help='optional specify source of intel (Defaults to TRT)')
parser.add_option('-d', '--description', dest='description', type='string', help='optional specify feed description (Defaults to bucket entry or Unknown if bucket is empty)')
parser.add_option('-b', '--bucket', dest='bucket', type='string', help='optional specify CRITs bucket (Defaults to Unknown)')
parser.add_option("-q", action="store_true", dest="quiet", help="optional quiet mode will not open browswer tabs")
#parser.add_option('-c', '--campaign', dest='campaign', type='string', help='optional specify CRITs campaign (Defaults to Unknown)')
(options, args) = parser.parse_args()
if (options.mode == None) | (options.filename == None):
print parser.usage
exit (0)
cm = options.mode
fn = options.filename
if (options.source == None):
src = "TRT"
else:
src = options.source
if (options.description == None):
if (options.bucket != None):
desc = options.bucket
else:
desc = "Unknown"
else:
desc = options.description
if (options.bucket == None):
buck = "Unknown"
else:
buck = options.bucket
if (options.quiet == True):
quiet = "True"
else:
quiet = "False"
requests.packages.urllib3.disable_warnings()
fn = open(fn)
dt = datetime.date.today().strftime('%m/%d/%Y')
new = 2
'''
if (options.campaign == None):
camp = "Unknown"
else:
camp = options.campaign
if camp != "Unknown":
cCampaign(camp,desc,buck)
'''
global SERVER
global USERNAME
global APIKEY
if not os.path.exists("crits.conf"):
cfn = open("crits.conf", "w+")
SERVER = str(raw_input('Enter Server: ')) + "\n"
USERNAME = str(raw_input('Enter Username: ')) + "\n"
APIKEY = str(raw_input('Enter API Key: ')) + "\n"
cfn.write(SERVER)
cfn.write(USERNAME)
cfn.write(APIKEY)
cfn.close()
cfn = open("crits.conf")
SERVER = cfn.readline().rstrip("\n")
USERNAME = cfn.readline().rstrip("\n")
APIKEY = cfn.readline().rstrip("\n")
cfn.close()
if cm == "domain":
a = []
for i in fn:
i = i.rstrip("\n")
cDomain(i,src,buck)
a.append(i)
time.sleep(vSleep)
print "\n *** Feed Syntax *** \n"
for c in range(len(a)):
print 'url,' + a[c] + ',' + desc + ',' + dt + ',,'
print "\n *** CRITs URLs *** \n"
for b in range(len(a)):
url = 'https://' + SERVER + '/domains/details/'+ a[b] + '/#analysis_button'
print url
if quiet == "False":
webbrowser.open(url,new=new)
time.sleep(vSleep)
elif cm == "ip":
a = []
for i in fn:
i = i.rstrip("\n")
cIP(i,src,buck)
a.append(i)
time.sleep(vSleep)
print "\n *** Feed Syntax *** \n"
for c in range(len(a)):
print 'ip,' + a[c] + ',' + desc + ',' + dt + ',,'
print "\n *** CRITs URLs *** \n"
for b in range(len(a)):
url = 'https://' + SERVER + '/ips/details/'+ a[b] + '/#analysis_button'
print url
if quiet == "False":
webbrowser.open(url,new=new)
time.sleep(vSleep)
elif cm == "hash":
a = []
ftype = str(raw_input('Enter filetype [pe]: '))
malname = str(raw_input('Enter malware name [Trojan.Win.EvilName.fss1]: '))
maltype = str(raw_input('Enter malware type [Trojan]: '))
alast = str(raw_input('Enter analyst last name [ProtectorOfTheRealm]: '))
for i in fn:
i = i.rstrip("\n")
cmd5(i,src,buck)
a.append(i)
time.sleep(vSleep)
print "\n *** Feed Syntax *** \n"
for c in range(len(a)):
print a[c] + ',,' + ftype + ',' + malname + ',' + maltype + ',4,' + desc + ',' + alast
print "\n *** CRITs URLs *** \n"
for b in range(len(a)):
url = 'https://' + SERVER + '/samples/details/'+ a[b] + '/#analysis_button'
print url
if quiet == "False":
webbrowser.open(url,new=new)
time.sleep(vSleep)
else:
print parser.usage
if __name__ == '__main__':
main()
|
|
from __future__ import division
import requests
import json
import pandas as pd
import numpy as np
from itertools import chain
from shapely.geometry import LineString, Point
"""
If you wish to retrieve route data directly into a DataFrame
df['travel_time_valhalla'], df['valhalla_route'] = zip(*
df.apply(route_valhalla, args=(origin_tuple), axis=1))
"""
def query_route_valhalla(
key,
start,
end,
costing,
language="en_US",
out_format="json",
direction_params=None,
costing_params=None):
"""
Query a Valhalla instance for a route
Returns travel time and list of route geometries
key: Valhalla API key
start: route start coords as a lon, lat iterable
end: route end coords as a lon, lat iterable
Not all options have been implemented here.
See: https://github.com/valhalla/valhalla-docs/blob/gh-pages/api-reference.md#inputs-of-a-valhalla-route
"""
allowed = ('pedestrian', 'bicycle', 'bus', 'auto', 'auto_shorter')
if costing not in allowed:
raise Exception(
"Unknown travel method. Must be one of %s. Christ." % ', '.join(allowed))
# build routing JSON
initial_route = {
"locations": [{"lat":start[1] ,"lon": start[0]}, {"lat":end[1] ,"lon":end[0]}],
"costing": costing,
"language": language,
"out_format": out_format
}
route = initial_route.copy()
if direction_params:
route.update({'directions_options': direction_params})
if costing_params:
route.update({'costing_options': costing_params})
endpoint = "https://valhalla.mapzen.com/route"
params = {"json": json.dumps(route), "api_key": key}
req = requests.get(endpoint, params=params)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
return (np.nan, np.nan)
if req.json()['trip']['status'] == 207:
return (np.nan, np.nan)
return req.json()['trip']['summary']['time'], [leg['shape'] for leg in req.json()['trip']['legs']][0]
def query_route_osrm(start, end, method):
"""
Get a route back from MapZen's OSRM
start, end: lon, lat tuples
method: foot, car, bicycle
returns encoded Polyline
TODO: bounds checking for coords
"""
allowed = ('foot', 'car', 'bicycle')
if method not in allowed:
raise Exception(
"Unknown method. Must be one of %s. Christ." % ', '.join(allowed))
endpoint = 'http://osrm.mapzen.com'
method = '/{m}/viaroute'.format(m=method)
# should be properly encoding second loc, but dict keys are unique!
# reverse lon, lat because ugh
params = {'loc': '{1},{0}&loc={3},{2}'.format(*chain(start, end))}
req = requests.get(endpoint + method, params=params)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
return (np.nan, np.nan)
if req.json().get('status') == 207:
return np.nan, np.nan
return req.json()['route_summary']['total_time'], req.json()['route_geometry']
def query_route_gmaps(start, end, method, key):
""" retrieve a bike route from GMaps """
url = "https://maps.googleapis.com/maps/api/directions/json"
params = {
"origin": "%s, %s" % (start[1], start[0]),
"destination": "%s, %s" % (end[1], end[0]),
"mode": method,
"units": "metric",
"region": "uk",
"key": key
}
req = requests.get(url, params=params)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
return (np.nan, np.nan)
# currently one route, containing one leg
try:
route = req.json()['routes'][0]
leg = req.json()['routes'][0]['legs'][0]
duration = sum([step['duration']['value'] for step in leg['steps']])
overview_polyline = route['overview_polyline']['points']
except (KeyError, IndexError):
return (np.nan, np.nan)
return duration, overview_polyline
def query_elevation(polyline, key):
""" retrieve elevations for a polyline from GMaps """
url = "https://maps.googleapis.com/maps/api/elevation/json"
params = {
"locations": "enc:%s" % polyline,
"key": key
}
req = requests.get(url, params=params)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
return np.nan
try:
elevations = req.json()['results']
except (KeyError, IndexError):
return np.nan
return elevations
def route_valhalla(df, start):
return query_route_valhalla(api_key, start, (df['lon'], df['lat']), 'bicycle')
def route_gmaps(df, start):
return query_route_gmaps(start, (df['lon'], df['lat']), 'bicycling', gmaps_key)
def route_osrm(df, start):
return query_route_osrm(start, (df['lon'], df['lat']), 'bicycle')
def decode_polyline(point_str, gmaps=False):
"""
Decodes a polyline that has been encoded using Google's algorithm
http://code.google.com/apis/maps/documentation/polylinealgorithm.html
This is a generic method that returns a list of (lon, lat)
tuples, which are used as input to a Shapely LineString
point_str: encoded polyline string
returns: LineString instance
"""
# some coordinate offsets are represented by 4 to 5 binary chunks
if pd.isnull(point_str):
return np.nan
coord_chunks = [[]]
for char in point_str:
# convert each character to decimal from ascii
value = ord(char) - 63
# values that have a chunk following have an extra 1 on the left
split_after = not (value & 0x20)
value &= 0x1F
coord_chunks[-1].append(value)
if split_after:
coord_chunks.append([])
del coord_chunks[-1]
coords = []
for coord_chunk in coord_chunks:
coord = 0
for i, chunk in enumerate(coord_chunk):
coord |= chunk << (i * 5)
# there is a 1 on the right if the coord is negative
if coord & 0x1:
coord = ~coord #invert
coord >>= 1
# https://github.com/Project-OSRM/osrm-backend/issues/713
# (OSRM returns higher-precision coordinates)
# NB this is not the case for Google Directions Polylines
# they only need coord /= 100000.
if not gmaps:
coord /= 1000000.
else:
coord /= 100000.
coords.append(coord)
# convert the 1d list to a 2d list & offsets to actual values
points = []
prev_x = 0
prev_y = 0
for i in xrange(0, len(coords) - 1, 2):
if coords[i] == 0 and coords[i + 1] == 0:
continue
prev_x += coords[i + 1]
prev_y += coords[i]
# rounding to 6 digits ensures that the floats are the same as when
# they were encoded
points.append((round(prev_x, 6), round(prev_y, 6)))
if len(points) > 1:
return LineString(points)
else:
return np.nan
def project_linestring(ls, m, inverse=False):
""" return a linestring projected into map coordinates """
if not pd.isnull(ls):
return LineString(zip(*m(*zip(*ls.coords))))
else:
return np.nan
def similarity(ls1, ls2):
"""
Calculate LineString similarity percentage
& is untion, | is intersection
"""
set1 = set(ls1.coords)
set2 = set(ls2.coords)
return (len(set1 & set2) / len(set1 | set2)) * 100
def pairs(line):
""" yield line segment start and end points for input line """
for i in xrange(1, len(line)):
yield line[i - 1], line[i]
def segmentise(line):
""" returns a list of linestring sub-segments for the input """
return [
LineString([Point(seg_start).coords[0], Point(seg_end).coords[0]])
for seg_start, seg_end in pairs(line.coords)
]
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experiments for paper on private sampling sketches."""
import collections
import math
import os
import pickle
import time
import matplotlib.pyplot as plt
import numpy as np
import scipy.integrate as integrate
from private_sampling import private_sampling
# Set matplotlib parameters
plt.rc("font", size=13)
plt.rcParams["text.usetex"] = True # Let TeX do the typsetting
plt.rcParams["text.latex.preamble"] = [
r"\usepackage{sansmath}", r"\sansmath"
] # Force sans-serif math mode (for axes labels)
plt.rcParams["font.family"] = "sans-serif" # ... for regular text
DEFAULT_DIR_FOR_PRECOMPUTED = "precomputed_pickle_files"
class PrecomputePrivateThresholdSampling(object):
"""Precomputes and stores values to avoid recomputation each time a plot is generated."""
SAVE_EVERY = 1000
def __init__(self,
threshold,
eps,
delta,
sampling_method,
pickle_dir,
print_progress=True):
self.sample = private_sampling.PrivateThresholdSampleWithFrequencies(
threshold, eps, delta, sampling_method, store_every=1)
file_name = "private_sampling_%s_t%s_e%s_d%s" % (sampling_method.__name__,
threshold, eps, 1 / delta)
self.file_path = os.path.join(pickle_dir, file_name)
self.print_progress = print_progress
if os.path.exists(self.file_path):
self._load()
def _load(self):
"""Writeme."""
f = open(self.file_path, "rb")
precomputed = pickle.load(f)
if (precomputed["threshold"] != self.sample.threshold or
precomputed["eps"] != self.sample.eps or
precomputed["delta"] != self.sample.delta or
precomputed["sampling_method"] != self.sample.sampling_method.__name__):
raise Exception("Tried to load precomputed values for wrong parameters")
# pylint: disable=protected-access
self.sample._reported_weight_dist = precomputed["reported_weight_dist"]
self.sample._mle_estimators = precomputed["mle_estimators"]
self.sample._biased_down_estimators = precomputed["biased_down_estimators"]
# pylint: enable=protected-access
f.close()
def _save(self):
"""Writeme."""
precomputed = {}
precomputed["threshold"] = self.sample.threshold
precomputed["eps"] = self.sample.eps
precomputed["delta"] = self.sample.delta
precomputed["sampling_method"] = self.sample.sampling_method.__name__
precomputed["timestamp"] = time.time()
# pylint: disable=protected-access
precomputed["reported_weight_dist"] = self.sample._reported_weight_dist
precomputed["mle_estimators"] = self.sample._mle_estimators
precomputed["biased_down_estimators"] = self.sample._biased_down_estimators
# pylint: enable=protected-access
f = open(self.file_path, "wb")
pickle.dump(precomputed, f)
f.close()
def precompute(self, max_freq):
"""Writeme."""
start_time = time.time()
for i in range(1, max_freq + 1, self.SAVE_EVERY):
self.sample.compute_reported_frequency_dist(i)
for j in range(i, min(i + self.SAVE_EVERY, max_freq + 1)):
self.sample.mle_estimator(j)
self.sample.biased_down_estimator(j)
self._save()
if self.print_progress:
print(
"Finished %d, time: %f" %
(min(i + self.SAVE_EVERY - 1, max_freq), time.time() - start_time))
def inclusion_prob_vec_for_private_sampling_keys_only(max_freq, threshold, eps,
delta, sampling_method):
"""Computes the vector of inclusion probabilities for private sampling."""
s = private_sampling.PrivateThresholdSampleKeysOnly(
threshold, eps, delta, sampling_method, store_every=1)
s.compute_inclusion_prob(max_freq)
return s._inclusion_prob.copy() # pylint: disable=protected-access
# Functions used to compute the inclusion probability, bias, and MSE when first
# generating a private histrogram and then sampling.
# Auxiliary functions: pdf and cdf of the Laplace distribution, integrals
def laplace_pdf(x, b, mean=0):
return 0.5 * math.exp(-1.0 * abs(x - mean) / b) / b
def laplace_cdf(x, b, mean=0):
if x <= mean:
return 0.5 * math.exp((x - mean) / b)
return 1 - 0.5 * math.exp((mean - x) / b)
def indef_int_x_exp_minus_epsx(eps, x):
"""Integral of x * exp(-eps * x)dx."""
return -1.0 * (1.0 / eps**2) * math.exp(-1.0 * eps * x) * (eps * x + 1)
def indef_int_x_exp_epsx(eps, x):
"""Integral of x * exp(eps * x)dx."""
return (1.0 / eps**2) * math.exp(eps * x) * (eps * x - 1)
def eps_indef_int_x_exp_minus_epsx(eps, x, add_to_exp=0.0):
"""Integral of x * exp(-eps * x)dx times eps * exp(add_to_exp)."""
return -1.0 * (1.0 / eps) * math.exp(-1.0 * eps * x + add_to_exp) * (
eps * x + 1)
def eps_indef_int_x_exp_epsx(eps, x, add_to_exp=0.0):
"""Integral of x * exp(eps * x)dx times eps * exp(add_to_exp)."""
return (1.0 / eps) * math.exp(eps * x + add_to_exp) * (eps * x - 1)
def eps_indef_int_x_sqr_exp_epsx(eps, x, add_to_exp=0.0):
"""Integral of x^2 * exp(eps * x)dx times eps * exp(add_to_exp)."""
return (1.0 / eps**2) * math.exp(eps * x + add_to_exp) * (
(eps * x)**2 - 2 * eps * x + 2)
def eps_indef_int_x_sqr_exp_minus_epsx(eps, x, add_to_exp=0.0):
"""Integral of x^2 * exp(-eps * x)dx times eps * exp(add_to_exp)."""
return (-1.0 / eps**2) * math.exp(-1.0 * eps * x + add_to_exp) * (
(eps * x)**2 + 2 * eps * x + 2)
def inclusion_prob_using_private_histogram_numerical(freq,
threshold,
eps,
delta,
sampling_method,
err=10**-6):
"""Computes the inclusion probability of a key when sampling from a private histogram."""
# This function uses numerical integration.
# TODO(ofirg): for some sampling methods, we can solve the integral and
# compute exactly.
laplace_param_b = 1.0 / eps
histogram_inclusion_threshold = (1.0 / eps) * np.log(1.0 / delta) + 1
# To make the integration output stable
max_noise = max(-1.0 * laplace_param_b * np.log(2 * err), 100)
def derivative(x):
return (laplace_pdf(x, laplace_param_b) *
sampling_method.inclusion_prob(freq + x, threshold))
return integrate.quad(
derivative, max(histogram_inclusion_threshold - freq, -1 * max_noise),
max_noise)[0]
def inclusion_prob_using_private_histogram(freq, threshold, eps, delta,
sampling_method):
"""Computes the inclusion probability of a key when sampling from a private histogram."""
laplace_param_b = 1.0 / eps
histogram_inclusion_threshold = (1.0 / eps) * np.log(1.0 / delta) + 1
if sampling_method == private_sampling.AlwaysIncludeSamplingMethod:
return 1.0 - laplace_cdf(histogram_inclusion_threshold - freq,
laplace_param_b)
elif sampling_method == private_sampling.PpsworSamplingMethod:
if freq <= histogram_inclusion_threshold:
return 1.0 - laplace_cdf(
histogram_inclusion_threshold - freq,
laplace_param_b) - (eps / (2 * (eps + threshold))) * math.exp(
freq * eps - (eps + threshold) * histogram_inclusion_threshold)
else:
est = 1.0 - laplace_cdf(histogram_inclusion_threshold - freq,
laplace_param_b)
est -= (eps / (2 * (eps + threshold))) * math.exp(-1.0 * threshold * freq)
if eps != threshold:
est -= (eps / (2 * (eps - threshold))) * (
math.exp(-1.0 * threshold * freq) -
math.exp(histogram_inclusion_threshold *
(eps - threshold) - freq * eps))
else:
est -= 0.5 * eps * (freq - histogram_inclusion_threshold) * math.exp(
-1.0 * freq * eps)
return est
elif sampling_method == private_sampling.PrioritySamplingMethod:
if histogram_inclusion_threshold >= 1.0 / threshold:
return 1.0 - laplace_cdf(histogram_inclusion_threshold - freq,
laplace_param_b)
if freq <= histogram_inclusion_threshold:
add_to_exp = eps * freq
return 0.5 * threshold * (
eps_indef_int_x_exp_minus_epsx(eps, 1.0 / threshold, add_to_exp) -
eps_indef_int_x_exp_minus_epsx(eps, histogram_inclusion_threshold,
add_to_exp)) + 1.0 - laplace_cdf(
1.0 / threshold - freq,
laplace_param_b)
elif freq >= 1 / threshold:
add_to_exp = -1.0 * eps * freq
return 0.5 * threshold * (
eps_indef_int_x_exp_epsx(eps, 1.0 / threshold, add_to_exp) -
eps_indef_int_x_exp_epsx(eps, histogram_inclusion_threshold,
add_to_exp)) + 1.0 - laplace_cdf(
1.0 / threshold - freq, laplace_param_b)
else:
add_to_exp = eps * freq
part_one = 0.5 * threshold * (
eps_indef_int_x_exp_minus_epsx(eps, 1.0 / threshold, add_to_exp) -
eps_indef_int_x_exp_minus_epsx(eps, freq, add_to_exp))
add_to_exp = -1.0 * eps * freq
part_two = 0.5 * threshold * (
eps_indef_int_x_exp_epsx(eps, freq, add_to_exp) -
eps_indef_int_x_exp_epsx(eps, histogram_inclusion_threshold,
add_to_exp))
return part_one + part_two + 1.0 - laplace_cdf(1.0 / threshold - freq,
laplace_param_b)
raise Exception("Unknown sampling method")
def mse_always_sample(freq, eps, delta):
"""MSE when there is no sampling (inclusion probability = 1.0)."""
histogram_inclusion_threshold = (1.0 / eps) * np.log(1.0 / delta) + 1
est_of_freq = expected_estimator_using_private_histogram(freq, eps, delta)
# inc_prob = inclusion_prob_using_private_histogram(
# freq, 1.0, eps, delta, private_sampling.AlwaysIncludeSamplingMethod)
if freq >= histogram_inclusion_threshold:
int_of_sq_range1 = 0.5 * (freq**2) + freq / eps + 1 / (eps**2)
int_of_sq_range2 = 0.5 * math.exp(-1.0 * eps * freq) * (
eps_indef_int_x_sqr_exp_epsx(eps, freq) -
eps_indef_int_x_sqr_exp_epsx(eps, histogram_inclusion_threshold))
int_of_sq = int_of_sq_range1 + int_of_sq_range2
else:
int_of_sq = 0.5 * (1 / eps**2) * math.exp(
(freq - histogram_inclusion_threshold) * eps) * (
(histogram_inclusion_threshold * eps)**2 +
2 * eps * histogram_inclusion_threshold + 2)
return int_of_sq - 2 * freq * est_of_freq + (freq**2) # * inc_prob
def mse_priority_sampling(freq, eps, delta, tau):
"""MSE when using priority sampling on a private histogram."""
est_of_freq = expected_estimator_using_private_histogram(freq, eps, delta)
parts_except_int_of_sq = -2 * freq * est_of_freq + (freq**2)
histogram_inclusion_threshold = (1.0 / eps) * np.log(1.0 / delta) + 1
if freq <= histogram_inclusion_threshold:
if 1.0 / tau <= histogram_inclusion_threshold:
return parts_except_int_of_sq - 0.5 * eps_indef_int_x_sqr_exp_minus_epsx(
eps, histogram_inclusion_threshold, eps * freq)
return parts_except_int_of_sq + (0.5 / tau) * (
eps_indef_int_x_exp_minus_epsx(eps, 1.0 / tau, eps * freq) -
eps_indef_int_x_exp_minus_epsx(eps, histogram_inclusion_threshold,
eps * freq)
) - 0.5 * eps_indef_int_x_sqr_exp_minus_epsx(eps, 1.0 / tau, eps * freq)
# freq > histogram_inclusion_threshold
if 1.0 / tau <= histogram_inclusion_threshold:
return parts_except_int_of_sq - 0.5 * eps_indef_int_x_sqr_exp_minus_epsx(
eps, freq, eps * freq) + 0.5 * eps_indef_int_x_sqr_exp_epsx(
eps, freq, -1.0 * freq * eps) - 0.5 * eps_indef_int_x_sqr_exp_epsx(
eps, histogram_inclusion_threshold, -1.0 * freq * eps)
elif 1.0 / tau <= freq:
int1 = -0.5 * eps_indef_int_x_sqr_exp_minus_epsx(eps, freq, eps * freq)
int2 = (0.5 / tau) * (
eps_indef_int_x_exp_epsx(eps, 1.0 / tau, -1.0 * freq * eps) -
eps_indef_int_x_exp_epsx(eps, histogram_inclusion_threshold,
-1.0 * freq * eps))
int3 = 0.5 * (
eps_indef_int_x_sqr_exp_epsx(eps, freq, -1.0 * freq * eps) -
eps_indef_int_x_sqr_exp_epsx(eps, 1.0 / tau, -1.0 * freq * eps))
return parts_except_int_of_sq + int1 + int2 + int3
int1 = -0.5 * eps_indef_int_x_sqr_exp_minus_epsx(eps, 1.0 / tau, eps * freq)
int2 = (0.5 / tau) * (
eps_indef_int_x_exp_minus_epsx(eps, 1.0 / tau, eps * freq) -
eps_indef_int_x_exp_minus_epsx(eps, freq, eps * freq))
int3 = (0.5 / tau) * (
eps_indef_int_x_exp_epsx(eps, freq, -1.0 * eps * freq) -
eps_indef_int_x_exp_epsx(eps, histogram_inclusion_threshold,
-1.0 * eps * freq))
return parts_except_int_of_sq + int1 + int2 + int3
def mse_using_private_histogram(
freq,
eps,
delta,
sampling_method=private_sampling.AlwaysIncludeSamplingMethod,
threshold=1.0):
"""Computes the MSE when sampling from a private histogram using various sampling methods."""
if sampling_method == private_sampling.AlwaysIncludeSamplingMethod:
return mse_always_sample(freq, eps, delta)
if sampling_method == private_sampling.PrioritySamplingMethod:
return mse_priority_sampling(freq, eps, delta, threshold)
raise NotImplementedError("Unsupported sampling method (for MSE computation)")
def inclusion_probability_priority(i, tau, eps, delta):
"""Computes the inclusion probability when using priority sampling on a private histogram using explicit/simplified expressions."""
T = (1.0 / eps) * math.log(1.0 / delta) + 1 # pylint: disable=invalid-name
if T >= 1.0 / tau:
if i >= T:
return 1.0 - (0.5 / delta) * math.exp(-1.0 * (i - 1) * eps)
return 0.5 * delta * math.exp(eps * (i - 1))
if i <= T:
return 0.5 * tau * ((T + 1 / eps) * math.exp(
(i - T) * eps) - (1 / eps) * math.exp((i - 1 / tau) * eps))
if i >= 1.0 / tau:
return 1.0 - 0.5 * (tau / eps) * math.exp(
eps * (1 / tau - i)) - 0.5 * tau * (T - 1 / eps) * math.exp(eps *
(T - i))
return tau * (
i - (0.5 / eps) * math.exp(eps * (i - 1 / tau)) - 0.5 *
(T - 1 / eps) * math.exp(eps * (T - i)))
def bias_and_variance_using_private_histogram_on_freq_vector(
freq_vec,
eps,
delta,
sampling_method=private_sampling.AlwaysIncludeSamplingMethod,
threshold=1.0):
"""Computes the bias and variance on an entire dataset/frequency distribution when sampling from a private histogram."""
var_sum = 0.0
bias_sum = 0.0
bias_and_mse_by_freq = {}
for freq in freq_vec:
if freq in bias_and_mse_by_freq:
bias, mse = bias_and_mse_by_freq[freq]
else:
bias = bias_using_private_histogram(freq, eps, delta)
mse = mse_using_private_histogram(freq, eps, delta, sampling_method,
threshold)
bias_and_mse_by_freq[freq] = (bias, mse)
bias_sum += bias
var_sum += mse - (bias**2)
return bias_sum, var_sum
def bias_and_variance_using_precomputed_sample_on_freq_vector(
freq_vec, sample, estimator_func):
"""Computes the bias and variance on an entire dataset/frequency distribution using a precomputed private weighted sample."""
var_sum = 0.0
bias_sum = 0.0
bias_and_mse_by_freq = {}
for freq in freq_vec:
if freq not in bias_and_mse_by_freq:
bias_and_mse_by_freq[freq] = sample.bias_and_mean_square_error(
freq, estimator_func)
bias, mse = bias_and_mse_by_freq[freq]
bias_sum += bias
var_sum += mse - (bias**2)
return bias_sum, var_sum
def expected_estimator_using_private_histogram(freq, eps, delta):
"""Computes the expected estimator when sampling a key with a given (non-private) frequency from a private histogram."""
# laplace_param_b = 1.0 / eps
histogram_inclusion_threshold = (1.0 / eps) * np.log(1.0 / delta) + 1
if freq >= histogram_inclusion_threshold:
# est = 0.5 * eps * (math.exp(-1.0 * freq * eps) *
# (indef_int_x_exp_epsx(eps, freq) -
# indef_int_x_exp_epsx(eps, histogram_inclusion_threshold)) - \
# math.exp(freq * eps) * indef_int_x_exp_minus_epsx(eps, freq))
# if abs(est - freq + (0.5 / delta) * math.exp((1 - freq) * eps) *
# (histogram_inclusion_threshold - (1.0 / eps))) > 0.1**10:
# raise Exception("Incorrect expected estimate",
# freq - (0.5 / delta) * math.exp((1 - freq) * eps) *
# (histogram_inclusion_threshold - (1.0 / eps)) - est)
return freq - (0.5 / delta) * math.exp((1 - freq) * eps) * (
histogram_inclusion_threshold - (1.0 / eps))
# est = -0.5 * eps * math.exp(eps * freq) *
# indef_int_x_exp_minus_epsx(eps, histogram_inclusion_threshold)
# if abs(est + 0.5 * eps * math.exp(eps * freq) *
# indef_int_x_exp_minus_epsx(eps, histogram_inclusion_threshold))
# > 0.1**10:
# raise Exception("Incorrect expected estimate",
# -0.5 * eps * math.exp(eps * freq) *
# indef_int_x_exp_minus_epsx(eps, histogram_inclusion_threshold) - est)
return 0.5 * delta * math.exp((freq - 1) * eps) * (
histogram_inclusion_threshold + (1.0 / eps))
def bias_using_private_histogram(freq, eps, delta):
"""Computes the bias of the estimator of a key with a given (non-private) frequency when sampling from a private histogram."""
return expected_estimator_using_private_histogram(freq, eps, delta) - freq
def inclusion_prob_vec_using_private_histogram(max_freq, threshold, eps, delta,
sampling_method):
"""Computes the inclusion probability for each frequency when sampling from a private histogram."""
return [
inclusion_prob_using_private_histogram(i, threshold, eps, delta,
sampling_method)
for i in range(1, int(max_freq) + 1)
]
# Functions used to produce plots
def plot_inclusion_prob_using_precompute(max_freq, sample, output_path):
"""Inclusion probability plots."""
eps = sample.eps
delta = sample.delta
sampling_method = sample.sampling_method
threshold = sample.threshold
log_threshold = math.log10(threshold)
if int(log_threshold) == log_threshold:
log_threshold = int(log_threshold)
plt.clf()
log1_delta = math.log10(delta)
if log1_delta == int(log1_delta):
log1_delta = int(log1_delta)
include_non_private = True
if sampling_method == private_sampling.AlwaysIncludeSamplingMethod or (
sampling_method == private_sampling.PrioritySamplingMethod and
threshold == 1.0):
include_non_private = False
title = ("Inclusion Probability: No Sampling, $\\varepsilon=%s, "
"\\delta=10^{%s}$") % (eps, log1_delta)
elif sampling_method == private_sampling.PrioritySamplingMethod:
title = ("Inclusion Probability: Priority Sampling $\\tau=10^{%s}, "
"\\varepsilon=%s, \\delta=10^{%s}$") % (log_threshold, eps,
log1_delta)
elif sampling_method == private_sampling.PpsworSamplingMethod:
title = ("Inclusion Probability: PPSWOR $\\tau=10^{%s}, \\varepsilon=%s, "
"\\delta=10^{%s}$") % (log_threshold, eps, log1_delta)
else:
raise NotImplementedError("Sampling method not supported")
plt.xlabel("Frequency")
plt.ylabel("Inclusion Probability")
# plt.yscale("log", basey=10)
# prob_vec_our = [1.0 - sample.compute_reported_frequency_dist(i)[0]
# for i in range(1, max_freq + 1)]
sample = private_sampling.PrivateThresholdSampleKeysOnly(
threshold, eps, delta, sampling_method)
prob_vec_our = [
sample.compute_inclusion_prob(i) for i in range(1, max_freq + 1)
]
prob_vec_histogram = inclusion_prob_vec_using_private_histogram(
max_freq, threshold, eps, delta, sampling_method)
if include_non_private:
plt.loglog(
range(1, max_freq + 1), [
sampling_method.inclusion_prob(i, threshold)
for i in range(1,
int(max_freq) + 1)
],
color="tab:green",
label="Non-private",
marker="d",
markevery=0.25)
plt.loglog(
range(1, max_freq + 1),
prob_vec_our,
color="tab:blue",
label="PWS",
marker="s",
markevery=0.25)
plt.loglog(
range(1, max_freq + 1),
prob_vec_histogram,
color="tab:orange",
label="SbH",
marker=".",
markevery=0.25)
plt.title(title)
plt.legend()
plt.savefig(output_path)
MARKERS = ("d", "s", "v", "^", "D", "<", ">")
COLORS = [
"tab:blue", "tab:green", "tab:red", "tab:purple", "tab:brown", "tab:pink",
"tab:gray", "tab:olive", "tab:cyan"
]
def plot_bias_using_precompute_and_private_histogram(max_freq, samples,
output_path):
"""Bias plots."""
eps = samples[0].eps
delta = samples[0].delta
sampling_method = samples[0].sampling_method
if len(samples) > len(MARKERS) or len(samples) > len(COLORS):
raise ValueError("Tried to plot more samples than colors")
plt.clf()
for sample, color, marker in zip(samples, COLORS, MARKERS):
if sample.sampling_method != sampling_method or sample.eps != eps or sample.delta != delta:
raise ValueError("Mismatch in sample parameters")
if sampling_method == private_sampling.PrioritySamplingMethod and sample.threshold == 1.0:
label = "MLE, no sampling"
else:
log_threshold = math.log10(sample.threshold)
if int(log_threshold) == log_threshold:
log_threshold = int(log_threshold)
label = "MLE, $\\tau = 10^{%s}$" % log_threshold
bias_and_mse_mle_est = [
sample.bias_and_mean_square_error(
i, sample.mle_estimator)
for i in range(1, max_freq + 1)
]
plt.plot(
range(1, max_freq + 1),
[x[0] / (i + 1) for i, x in enumerate(bias_and_mse_mle_est)],
color=color,
label=label,
marker=marker,
markevery=0.25)
plt.xscale("log", basex=2)
plt.xlabel("Frequency")
plt.ylabel("Bias / Frequency")
# bias_and_mse_biased_down_est = [sample.bias_and_mean_square_error(
# i, lambda x, y: sample.biased_down_estimator(x, y))
# for i in range(1, max_freq + 1)]
# plt.plot(range(1, max_freq + 1),
# [x[0] / (i + 1) for i, x in enumerate(bias_and_mse_biased_down_est)],
# color="tab:blue", label="Biased Down", marker = 'd', markevery= [0, -1])
private_histogram_bias = [
bias_using_private_histogram(i, eps, delta)
for i in range(1, max_freq + 1)
]
plt.plot(
range(1, max_freq + 1),
[x / (i + 1) for i, x in enumerate(private_histogram_bias)],
color="tab:orange",
label="SbH",
marker=".",
markevery=0.25)
log1_delta = math.log2(delta)
if log1_delta == int(log1_delta):
log1_delta = int(log1_delta)
plt.title("Normalized Bias: %s, $\\varepsilon=%s, \\delta=2^{%s}$" %
(sampling_method.__name__.strip("SamplingMethod") + " Sampling",
eps, log1_delta))
plt.legend()
plt.savefig(output_path)
def plot_variance_using_precompute_and_private_histogram(
max_freq, sample, output_path, normalized=True, short_name=False):
"""Variance plots."""
eps = sample.eps
delta = sample.delta
sampling_method = sample.sampling_method
threshold = sample.threshold
# bias_and_mse_biased_down_est = [sample.bias_and_mean_square_error(
# i, lambda x, y: sample.biased_down_estimator(x, y))
# for i in range(1, max_freq + 1)]
bias_and_mse_mle_est = [
sample.bias_and_mean_square_error(i, sample.mle_estimator)
for i in range(1, max_freq + 1)
]
private_histogram_expected = [
expected_estimator_using_private_histogram(i, eps, delta)
for i in range(1, max_freq + 1)
]
private_histogram_bias = [
bias_using_private_histogram(i, eps, delta)
for i in range(1, max_freq + 1)
]
non_private_var = [
non_private_variance(i, sampling_method, threshold)
for i in range(1, max_freq + 1)
]
include_non_private = True
if sampling_method == private_sampling.AlwaysIncludeSamplingMethod or (
sampling_method == private_sampling.PrioritySamplingMethod and
threshold == 1.0):
private_histogram_mse = [
mse_always_sample(i, eps, delta) for i in range(1, max_freq + 1)
]
include_non_private = False
method_text = "No Sampling"
elif sampling_method == private_sampling.PrioritySamplingMethod:
private_histogram_mse = [
mse_priority_sampling(i, eps, delta, threshold)
for i in range(1, max_freq + 1)
]
log_threshold = math.log10(sample.threshold)
if int(log_threshold) == log_threshold:
log_threshold = int(log_threshold)
method_text = "Priority Sampling $\\tau=10^{%s}$" % log_threshold
if short_name:
method_text = method_text.replace(" Sampling", "")
else:
raise NotImplementedError("Sampling method not supported")
plt.clf()
plt.tight_layout()
plt.xlabel("Frequency")
log1_delta = math.log2(delta)
if log1_delta == int(log1_delta):
log1_delta = int(log1_delta)
if normalized:
plt.ylabel("Variance / Frequency$^2$")
plt.yscale("log", basey=10)
# plt.plot(range(1, max_freq + 1),
# [(mse - bias ** 2) / ((i + 1) ** 2)
# for i, (bias, mse) in enumerate(bias_and_mse_biased_down_est)],
# color="tab:blue", label="Biased Down", marker = 'd', markevery= [0, -1])
if include_non_private:
plt.plot(
range(1, max_freq + 1),
[x / ((i + 1)**2) for i, x in enumerate(non_private_var)],
color="tab:green",
label="Non-private",
marker="d",
markevery=0.25)
plt.plot(
range(1, max_freq + 1),
[(mse - bias**2) / ((i + 1)**2)
for i, (bias, mse) in enumerate(bias_and_mse_mle_est)],
color="tab:blue",
label="MLE",
marker="s",
markevery=0.25)
plt.plot(
range(1, max_freq + 1),
[(mse - bias**2) / ((i + 1)**2) for i, (bias, mse) in enumerate(
zip(private_histogram_bias, private_histogram_mse))],
color="tab:orange",
label="SbH",
marker=".",
markevery=0.25)
plt.title("Normalized Variance: %s, $\\varepsilon=%s, \\delta=2^{%s}$" %
(method_text, eps, log1_delta))
else:
plt.ylabel("Variance")
# plt.plot(range(1, max_freq + 1),
# [mse - bias ** 2 for bias, mse in bias_and_mse_biased_down_est],
# color="tab:blue", label="Biased Down", marker = 'd',
# markevery= [0, -1])
if include_non_private:
plt.plot(
range(1, max_freq + 1),
non_private_var,
color="tab:green",
label="Non-private",
marker="d",
markevery=0.25)
plt.plot(
range(1, max_freq + 1),
[mse - bias**2 for bias, mse in bias_and_mse_mle_est],
color="tab:blue",
label="MLE",
marker="s",
markevery=0.25)
plt.plot(
range(1, max_freq + 1), [
mse - bias**2
for bias, mse in zip(private_histogram_bias, private_histogram_mse)
],
color="tab:orange",
label="SbH",
marker=".",
markevery=0.25)
plt.title("Variance: %s, $\\varepsilon=%s, \\delta=2^{%s}$" %
(method_text, eps, log1_delta))
plt.legend()
plt.savefig(output_path)
def non_private_variance(freq, sampling_method, threshold):
"""The variance of non-private sampling."""
return freq * freq * (
(1.0 / sampling_method.inclusion_prob(freq, threshold)) - 1)
def plot_nrmse_on_freq_vector(
freq_vec,
eps,
delta,
thresholds,
output_path,
dataset_name="",
sampling_method=private_sampling.PrioritySamplingMethod,
precomputed_pickle_dir=DEFAULT_DIR_FOR_PRECOMPUTED):
"""Plots of the error on an entire dataset/frequency vector."""
sum_of_freq = sum(freq_vec)
max_freq = max(freq_vec)
thresholds = sorted(thresholds)
nrmse_non_private = []
nrmse_sbh = []
nrmse_mle = []
# nrmse_biased_down = []
for t in thresholds:
var_non_private = sum(
[non_private_variance(x, sampling_method, t) for x in freq_vec])
nrmse_non_private.append((var_non_private**0.5) / sum_of_freq)
bias_sbh, var_sbh = bias_and_variance_using_private_histogram_on_freq_vector(
freq_vec, eps, delta, sampling_method, t)
nrmse_sbh.append(((var_sbh + bias_sbh**2)**0.5) / sum_of_freq)
pre = PrecomputePrivateThresholdSampling(
t,
eps,
delta,
sampling_method,
precomputed_pickle_dir,
print_progress=False)
pre.precompute(max_freq)
sample = pre.sample
bias_mle, var_mle = bias_and_variance_using_precomputed_sample_on_freq_vector(
freq_vec, sample, sample.mle_estimator)
nrmse_mle.append(((var_mle + bias_mle**2)**0.5) / sum_of_freq)
# bias_biased_down, var_biased_down =
# bias_and_variance_using_precomputed_sample_on_freq_vector(
# freq_vec, sample, lambda x, y: sample.biased_down_estimator(x, y))
# nrmse_biased_down.append(
# ((var_biased_down + bias_biased_down ** 2) ** 0.5) / sum_of_freq)
plt.clf()
plt.xlabel("Sampling Threshold")
plt.xscale("log", basex=10)
plt.ylabel("NRMSE")
# plt.yscale("log", basey=10)
log1_delta = math.log2(delta)
if log1_delta == int(log1_delta):
log1_delta = int(log1_delta)
plt.title("%s on %s: $\\varepsilon=%s, \\delta=2^{%s}$" %
(sampling_method.__name__.strip("SamplingMethod") + " Sampling",
dataset_name, eps, log1_delta))
plt.plot(
thresholds,
nrmse_sbh,
color="tab:orange",
label="SbH",
marker=".",
markevery=0.25)
plt.plot(
thresholds,
nrmse_mle,
color="tab:blue",
label="MLE",
marker="s",
markevery=0.25)
# plt.plot(thresholds, nrmse_biased_down, color="tab:blue",
# label="Biased Down", marker = 'd', markevery=0.25)
plt.plot(
thresholds,
nrmse_non_private,
color="tab:green",
label="Non-private",
marker="^",
markevery=0.25)
plt.legend()
plt.savefig(output_path)
def compute_fraction_reported_non_private(freq_vec, sampling_method, threshold):
"""For a given vector of key frequencies, computes the expected number of keys to reported in a non-private sample."""
expected_sample = 0.0
for freq in freq_vec:
expected_sample += sampling_method.inclusion_prob(freq, threshold)
return expected_sample / len(freq_vec)
def compute_fraction_reported_pws(
freq_vec,
eps,
delta,
sampling_method=private_sampling.AlwaysIncludeSamplingMethod,
threshold=1.0):
"""For a given vector of key frequencies, computes the expected number of keys to reported in a private weighted sample."""
s = private_sampling.PrivateThresholdSampleKeysOnly(threshold, eps, delta,
sampling_method)
expected_sample = 0.0
for freq in freq_vec:
expected_sample += s.compute_inclusion_prob(freq)
return expected_sample / len(freq_vec)
def compute_fraction_reported_sbh(
freq_vec,
eps,
delta,
sampling_method=private_sampling.AlwaysIncludeSamplingMethod,
threshold=1.0):
"""For a given vector of key frequencies, computes the expected number of keys to reported when sampling from a stability-based histogram."""
expected_sample = 0.0
for freq in freq_vec:
expected_sample += inclusion_prob_using_private_histogram(
freq, threshold, eps, delta, sampling_method)
return expected_sample / len(freq_vec)
def plot_gains_by_delta(
dataset_name,
freq_vec,
eps,
deltas,
output_path,
sampling_method=private_sampling.AlwaysIncludeSamplingMethod,
threshold=1.0):
"""Plots the fraction of reported keys (comparing PWS and SbH) for different delta values."""
plt.clf()
pws_fraction_reported = [
compute_fraction_reported_pws(freq_vec, eps, delta, sampling_method,
threshold) for delta in deltas
]
sbh_fraction_reported = [
compute_fraction_reported_sbh(freq_vec, eps, delta, sampling_method,
threshold) for delta in deltas
]
plt.loglog(
deltas, pws_fraction_reported, label="PWS", marker="d", markevery=[0, -1])
plt.loglog(
deltas, sbh_fraction_reported, label="SbH", marker=".", markevery=[0, -1])
plt.xlabel("$\\delta$", fontsize=18)
plt.ylabel("Fraction", fontsize=18)
plt.title(
"Keys reported: %s, $\\varepsilon=$%s" % (dataset_name, eps), fontsize=18)
plt.legend(prop={"size": 20})
plt.savefig(output_path)
def plot_gains_by_tau(dataset_name,
freq_vec,
eps,
delta,
output_path,
thresholds,
sampling_method=private_sampling.PpsworSamplingMethod):
"""Plots the fraction of reported keys (comparing PWS, SbH, and non-private) for different sampling threshold values."""
plt.clf()
pws_fraction_reported = [
compute_fraction_reported_pws(freq_vec, eps, delta, sampling_method,
threshold) for threshold in thresholds
]
sbh_fraction_reported = [
compute_fraction_reported_sbh(freq_vec, eps, delta, sampling_method,
threshold) for threshold in thresholds
]
non_private = [
compute_fraction_reported_non_private(freq_vec, sampling_method,
threshold)
for threshold in thresholds
]
plt.loglog(
thresholds,
non_private,
label="Non-private",
marker="o",
markersize=11,
color="red",
markevery=[0, -1])
plt.loglog(
thresholds,
pws_fraction_reported,
label="PWS",
marker="d",
markevery=[0, -1])
plt.loglog(
thresholds,
sbh_fraction_reported,
label="SbH",
marker=".",
markevery=[0, -1])
plt.xlabel("$\\tau$", fontsize=18)
plt.ylabel("Fraction", fontsize=18)
plt.title(
"Keys reported: %s, %s, (%s,%s)" %
(dataset_name, sampling_method.__name__.strip("SamplingMethod"), eps,
delta),
fontsize=18)
plt.legend(prop={"size": 20})
plt.savefig(output_path)
def plot_gain_ratio_by_delta(
datasets,
eps,
deltas,
output_path,
sampling_method=private_sampling.AlwaysIncludeSamplingMethod,
threshold=1.0,
markers=MARKERS):
"""Plots the gain in the number of reported keys (the ratio of PWS/SbH) for different delta values."""
plt.clf()
for (dataset_name, freq_vec), marker in zip(datasets, markers):
pws_fraction_reported = [
compute_fraction_reported_pws(freq_vec, eps, delta, sampling_method,
threshold) for delta in deltas
]
sbh_fraction_reported = [
compute_fraction_reported_sbh(freq_vec, eps, delta, sampling_method,
threshold) for delta in deltas
]
ratio = [
x / y for x, y in zip(pws_fraction_reported, sbh_fraction_reported)
]
plt.semilogx(
deltas, ratio, label=dataset_name, marker=marker, markevery=[0, -1])
plt.xlabel("$\\delta$", fontsize=18)
plt.ylabel("$\\times$Gain", fontsize=18)
plt.title(
"Reporting gain: PWS/SbH, $\\varepsilon=$" + str(eps) + " ", fontsize=18)
plt.legend(prop={"size": 20})
plt.savefig(output_path)
def plot_gain_ratio_by_tau(
datasets,
eps,
delta,
output_path,
thresholds,
sampling_method=private_sampling.PpsworSamplingMethod,
markers=MARKERS):
"""Plots the gain in the number of reported keys (the ratio of PWS/SbH) for different sampling threshold values."""
plt.clf()
for (dataset_name, freq_vec), marker in zip(datasets, markers):
pws_fraction_reported = [
compute_fraction_reported_pws(freq_vec, eps, delta, sampling_method,
threshold) for threshold in thresholds
]
sbh_fraction_reported = [
compute_fraction_reported_sbh(freq_vec, eps, delta, sampling_method,
threshold) for threshold in thresholds
]
ratio = [
x / y for x, y in zip(pws_fraction_reported, sbh_fraction_reported)
]
plt.semilogx(
thresholds, ratio, label=dataset_name, marker=marker, markevery=[0, -1])
log1_delta = math.log10(delta)
if log1_delta == int(log1_delta):
log1_delta = int(log1_delta)
plt.xlabel("$\\tau$", fontsize=18)
plt.ylabel("$\\times$Gain", fontsize=18)
plt.title(
"Reporting Gain: PWS/SbH, %s, $\\varepsilon=$%s, $\\delta=10^{%s}$" %
(sampling_method.__name__.strip("SamplingMethod"), eps, log1_delta),
fontsize=18)
plt.legend(prop={"size": 20})
plt.savefig(output_path)
# Main functions used to generate plots for the paper
def main_precompute():
"""Precomputes and stores values."""
# pylint: disable=invalid-name
EPS_LIST = [1.0, 0.5, 0.25, 0.1]
DELTA = 0.5**20
SAMPLING_METHODS_AND_THRESHOLDS = [
(private_sampling.PpsworSamplingMethod, 0.1),
(private_sampling.PpsworSamplingMethod, 0.01),
(private_sampling.PpsworSamplingMethod, 0.001),
(private_sampling.PpsworSamplingMethod, 0.0001),
(private_sampling.PrioritySamplingMethod, 0.1),
(private_sampling.PrioritySamplingMethod, 0.01),
(private_sampling.PrioritySamplingMethod, 0.001),
(private_sampling.PrioritySamplingMethod, 0.0001),
(private_sampling.PrioritySamplingMethod, 10**-5),
(private_sampling.PrioritySamplingMethod, 10**-6),
(private_sampling.AlwaysIncludeSamplingMethod, 1.0),
]
# MAX_FREQ = 10000
# pylint: enable=invalid-name
for sampling_method, threshold in SAMPLING_METHODS_AND_THRESHOLDS:
for eps in EPS_LIST:
pre = PrecomputePrivateThresholdSampling(threshold, eps, DELTA,
sampling_method,
DEFAULT_DIR_FOR_PRECOMPUTED)
pre.precompute(10 * int((1 / eps) * np.log(1.0 / DELTA) + 1))
def main_plot_bias_using_precompute():
"""Generates bias plots."""
# pylint: disable=invalid-name
EPS_LIST = [1.0, 0.5, 0.25, 0.1]
DELTA = 0.5**20
SAMPLING_METHODS_AND_THRESHOLDS = [
# (private_sampling.PpsworSamplingMethod, 0.1),
# (private_sampling.PpsworSamplingMethod, 0.01),
# (private_sampling.PpsworSamplingMethod, 0.001),
# (private_sampling.PpsworSamplingMethod, 0.0001),
(private_sampling.PrioritySamplingMethod, 1.0),
(private_sampling.PrioritySamplingMethod, 0.1),
(private_sampling.PrioritySamplingMethod, 0.01),
(private_sampling.PrioritySamplingMethod, 0.001),
(private_sampling.PrioritySamplingMethod, 0.0001),
(private_sampling.PrioritySamplingMethod, 10**-5),
(private_sampling.PrioritySamplingMethod, 10**-6),
# (private_sampling.AlwaysIncludeSamplingMethod, 1.0),
]
# pylint: enable=invalid-name
for eps in EPS_LIST:
samples = []
for sampling_method, threshold in SAMPLING_METHODS_AND_THRESHOLDS:
pre = PrecomputePrivateThresholdSampling(
threshold,
eps,
DELTA,
sampling_method,
DEFAULT_DIR_FOR_PRECOMPUTED,
print_progress=False)
pre.precompute(10 * int((1 / eps) * np.log(1.0 / DELTA) + 1))
samples.append(pre.sample)
output_path = "norm_bias_e%s_full.pdf" % eps
plot_bias_using_precompute_and_private_histogram(
10 * int((1 / eps) * np.log(1.0 / DELTA) + 1), samples, output_path)
output_path = "norm_bias_e%s.pdf" % eps
plot_bias_using_precompute_and_private_histogram(
10 * int((1 / eps) * np.log(1.0 / DELTA) + 1), samples[::2],
output_path)
def main_plot_variance():
"""Generates variance plots."""
# pylint: disable=invalid-name
EPS_LIST = [1.0, 0.5, 0.25, 0.1]
DELTA = 0.5**20
# MAX_FREQ = 1000
SAMPLING_METHODS_AND_THRESHOLDS = [
(private_sampling.PrioritySamplingMethod, 0.1),
(private_sampling.PrioritySamplingMethod, 0.01),
(private_sampling.PrioritySamplingMethod, 0.001),
(private_sampling.PrioritySamplingMethod, 0.0001),
(private_sampling.PrioritySamplingMethod, 10**-5),
(private_sampling.PrioritySamplingMethod, 10**-6),
(private_sampling.AlwaysIncludeSamplingMethod, 1.0),
]
# pylint: enable=invalid-name
for sampling_method, threshold in SAMPLING_METHODS_AND_THRESHOLDS:
for eps in EPS_LIST:
pre = PrecomputePrivateThresholdSampling(
threshold,
eps,
DELTA,
sampling_method,
DEFAULT_DIR_FOR_PRECOMPUTED,
print_progress=False)
pre.precompute(4 * int((1 / eps) * np.log(1.0 / DELTA) + 1))
output_path = ("variance_%s_t%s_e%s" %
(sampling_method.__name__, threshold, eps)).replace(
".", "") + ".pdf"
plot_variance_using_precompute_and_private_histogram(
2 * int((1 / eps) * np.log(1.0 / DELTA) + 1),
pre.sample,
"norm_" + output_path,
normalized=True)
plot_variance_using_precompute_and_private_histogram(
2 * int((1 / eps) * np.log(1.0 / DELTA) + 1),
pre.sample,
output_path,
normalized=False,
short_name=True)
def main_plot_inclusion_probability():
"""Generates plots of the inclusion probability."""
# pylint: disable=invalid-name
EPS_LIST = [1.0, 0.5, 0.25, 0.1]
DELTA = 0.5**20
SAMPLING_METHODS_AND_THRESHOLDS = [
(private_sampling.PrioritySamplingMethod, 0.1),
(private_sampling.PrioritySamplingMethod, 0.01),
(private_sampling.PrioritySamplingMethod, 0.001),
(private_sampling.PrioritySamplingMethod, 0.0001),
(private_sampling.PrioritySamplingMethod, 10**-5),
(private_sampling.PrioritySamplingMethod, 10**-6),
(private_sampling.AlwaysIncludeSamplingMethod, 1.0),
(private_sampling.PpsworSamplingMethod, 0.1),
(private_sampling.PpsworSamplingMethod, 0.01),
(private_sampling.PpsworSamplingMethod, 0.001),
(private_sampling.PpsworSamplingMethod, 0.0001),
]
# pylint: enable=invalid-name
for sampling_method, threshold in SAMPLING_METHODS_AND_THRESHOLDS:
for eps in EPS_LIST:
pre = PrecomputePrivateThresholdSampling(
threshold,
eps,
DELTA,
sampling_method,
DEFAULT_DIR_FOR_PRECOMPUTED,
print_progress=False)
pre.precompute(2 * int((1 / eps) * np.log(1.0 / DELTA) + 1))
output_path = "inclusion_prob_%s_t%s_e%s.pdf" % (sampling_method.__name__,
threshold, eps)
plot_inclusion_prob_using_precompute(
2 * int((1 / eps) * np.log(1.0 / DELTA) + 1), pre.sample, output_path)
def int_zipf_distribution(size, a=1.0, mult=1):
"""Generates a synthetic dataset according to the Zipf distribution."""
# integer entries and minimum equal to 1
d = [(i + 1)**(-a) for i in range(size)]
return [int(mult * d[i] / d[size - 1]) for i in range(size)]
def main_plot_nrmse_on_dist_by_threshold():
# pylint: disable=invalid-name
"""Plots the error of the various methods on synthethic datasets."""
EPS_LIST = [0.5, 0.25, 0.1]
DELTA = 0.5**20
THRESHOLDS = [1.0, 0.1, 0.01, 0.001, 0.0001, 10**-5, 10**-6]
THRESHOLDS_BY_EPS = collections.defaultdict(lambda: THRESHOLDS)
THRESHOLDS_BY_EPS[0.5] = [1.0, 0.1, 0.01, 0.001, 0.0001, 10**-5]
SAMPLING_METHOD = private_sampling.PrioritySamplingMethod
datasets = []
UNIFORM_PARAMS = [(200, 1000)]
for max_freq, mult in UNIFORM_PARAMS:
datasets.append((list(range(1, max_freq + 1)) * mult,
"$[1,\\ldots,%d] \\cdot %d$" % (max_freq, mult),
"uniform_range%d_mult%d" % (max_freq, mult)))
ZIPF_PARAMS = []
# pylint: enable=invalid-name
for support_size, alpha, mult in ZIPF_PARAMS:
dist = int_zipf_distribution(support_size, alpha, mult)
dataset_name = "Zipf($10^%d$, %s, %s)" % (math.log10(support_size), alpha,
mult)
dataset_filename = "zipf_%s_%s_%s" % (support_size, alpha, mult)
datasets.append(dist, dataset_name, dataset_filename)
for dist, dataset_name, dataset_filename in datasets:
for eps in EPS_LIST:
output_name = "nrmse_on_%s_e%s.pdf" % (dataset_filename, eps)
plot_nrmse_on_freq_vector(dist, eps, DELTA, THRESHOLDS_BY_EPS[eps],
output_name, dataset_name, SAMPLING_METHOD,
DEFAULT_DIR_FOR_PRECOMPUTED)
if __name__ == "__main__":
main_precompute()
main_plot_bias_using_precompute()
main_plot_variance()
main_plot_nrmse_on_dist_by_threshold()
main_plot_inclusion_probability()
|
|
import re
import logging
from indra.resources import load_resource_json
logger = logging.getLogger(__name__)
identifiers_url = 'https://identifiers.org'
# These are just special cases of name spaces where the mapping from INDRA to
# identifiers.org is not a question of simplecapitalization.
identifiers_mappings = {
'UP': 'uniprot',
'UPPRO': 'uniprot.chain',
'UPISO': 'uniprot.isoform',
'REFSEQ_PROT': 'refseq',
'PF': 'pfam',
'IP': 'interpro',
'ECCODE': 'ec-code',
'NONCODE': 'noncodev4.rna',
'LNCRNADB': 'rnacentral',
'MIRBASEM': 'mirbase.mature',
'EGID': 'ncbigene',
'NCBI': 'ncibgene',
'HGNC_GROUP': 'hgnc.genefamily',
'LINCS': 'lincs.smallmolecule',
'PUBCHEM': 'pubchem.compound',
'CHEMBL': 'chembl.compound',
'CTD': 'ctd.chemical',
'CVCL': 'cellosaurus',
}
# These are namespaces used by INDRA that don't have corresponding
# identifiers.org entries
non_registry = {
'SDIS', 'SCHEM', 'SFAM', 'SCOMP', 'SIGNOR', 'HMS-LINCS', 'NXPFA',
'OMIM', 'LSPCI', 'UPLOC', 'BFO', 'CCLE'
}
# These are namespaces that can appear in db_refs but are actually not
# representing grounding.
non_grounding = {
'TEXT', 'TEXT_NORM'
}
# These are reverse mappings from identifiers.org namespaces to INDRA
# namespaces
identifiers_reverse = {
v: k for k, v in identifiers_mappings.items()
}
# We have to patch this one because it is ambiguous
identifiers_reverse['ncbigene'] = 'EGID'
# These are only the URLs that are strictly prefixes and not more complicated
# patterns. This is because some downstream code uses these as prefixes
# rather than arbitrary patterns.
url_prefixes = {
# Biology namespaces
'NXPFA': 'https://www.nextprot.org/term/FA-',
'SIGNOR': 'https://signor.uniroma2.it/relation_result.php?id=',
'LSPCI': 'https://labsyspharm.github.io/lspci/',
# WM namespaces
'UN': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'WDI': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'FAO': 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/',
'HUME': ('https://github.com/BBN-E/Hume/blob/master/resource/ontologies'
'/hume_ontology/'),
'CWMS': 'http://trips.ihmc.us/',
'SOFIA': 'http://cs.cmu.edu/sofia/',
}
def get_ns_from_identifiers(identifiers_ns):
""""Return a namespace compatible with INDRA from an identifiers namespace.
For example, this function can be used to map 'uniprot' to 'UP'.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
Returns
-------
str or None
The namespace compatible with INDRA's internal representation or
None if the given namespace isn't an identifiers.org standard.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
if not reg_entry:
return None
mapping = identifiers_reverse.get(identifiers_ns.lower())
if mapping:
return mapping
else:
return identifiers_ns.upper()
def get_ns_id_from_identifiers(identifiers_ns, identifiers_id):
"""Return a namespace/ID pair compatible with INDRA from identifiers.
Parameters
----------
identifiers_ns : str
An identifiers.org standard namespace.
identifiers_id : str
An identifiers.org standard ID in the given namespace.
Returns
-------
(str, str)
A namespace and ID that are valid in INDRA db_refs.
"""
reg_entry = identifiers_registry.get(identifiers_ns.lower())
db_ns = get_ns_from_identifiers(identifiers_ns)
if db_ns is None:
return None, None
db_id = identifiers_id
if reg_entry['namespace_embedded']:
if not identifiers_id.startswith(identifiers_ns.upper()):
db_id = '%s:%s' % (identifiers_ns.upper(), identifiers_id)
return db_ns, db_id
def get_identifiers_ns(db_name):
"""Map an INDRA namespace to an identifiers.org namespace when possible.
Example: this can be used to map 'UP' to 'uniprot'.
Parameters
----------
db_name : str
An INDRA namespace to map to identifiers.org
Returns
-------
str or None
An identifiers.org namespace or None if not available.
"""
mapped_db_name = identifiers_mappings.get(db_name, db_name.lower())
if mapped_db_name not in identifiers_registry:
return None
return mapped_db_name
def get_url_prefix(db_name):
"""Return the URL prefix for a given namespace."""
identifiers_ns = get_identifiers_ns(db_name)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if not identifiers_entry['namespace_embedded']:
return '%s/%s:' % (identifiers_url, identifiers_ns.lower())
else:
return '%s/' % identifiers_url
else:
if db_name in url_prefixes:
return url_prefixes[db_name]
return None
def get_identifiers_url(db_name, db_id):
"""Return an identifiers.org URL for a given database name and ID.
Parameters
----------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc.
db_id : str
An identifier in the given database.
Returns
-------
url : str
An identifiers.org URL corresponding to the given database name and ID.
"""
# This is the case where we have a prefix that we can simply attach the
# db_id to to get the desired URL.
if db_name == 'CHEMBL':
db_id = ensure_chembl_prefix(db_id)
elif db_name == 'CHEBI':
db_id = ensure_chebi_prefix(db_id)
prefix = get_url_prefix(db_name)
if prefix:
return '%s%s' % (prefix, db_id)
# Otherwise, we have to handle some special cases
bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/'
if db_name == 'LINCS':
if db_id.startswith('LSM-'): # Lincs Small Molecule ID
url = identifiers_url + '/lincs.smallmolecule:%s' % db_id
elif db_id.startswith('LCL-'): # Lincs Cell Line ID
url = identifiers_url + '/lincs.cell:%s' % db_id
else: # Assume LINCS Protein
url = identifiers_url + '/lincs.protein:%s' % db_id
elif db_name == 'CHEMBL':
if not db_id.startswith('CHEMBL'):
db_id = 'CHEMBL%s' % db_id
url = identifiers_url + '/chembl.compound:%s' % db_id
elif db_name == 'HMS-LINCS':
url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id
# Special cases with no identifiers entry
elif db_name == 'SCHEM':
url = bel_scai_url + 'selventa-legacy-chemicals/' + \
'selventa-legacy-chemicals-20150601.belns'
elif db_name == 'SCOMP':
url = bel_scai_url + 'selventa-named-complexes/' + \
'selventa-named-complexes-20150601.belns'
elif db_name == 'SFAM':
url = bel_scai_url + 'selventa-protein-families/' + \
'selventa-protein-families-20150601.belns'
elif db_name == 'TEXT' or db_name == 'TEXT_NORM':
return None
else:
logger.warning('Unhandled name space %s' % db_name)
url = None
return url
def parse_identifiers_url(url):
"""Retrieve database name and ID given the URL.
Parameters
----------
url : str
An identifiers.org URL to parse.
Returns
-------
db_name : str
An internal database name: HGNC, UP, CHEBI, etc. corresponding to the
given URL.
db_id : str
An identifier in the database.
"""
# Try matching by string pattern
db_ns, db_id = None, None
url_pattern = \
r'(?:https?)://identifiers.org/([A-Za-z0-9.-]+)(/|:)([A-Za-z0-9:_.-]+)'
match = re.match(url_pattern, url)
if match is not None:
g = match.groups()
if len(g) == 3:
pattern_ns, pattern_id = g[0], g[2]
db_ns, db_id = get_ns_id_from_identifiers(pattern_ns, pattern_id)
if db_ns == 'HGNC':
if db_id.startswith('HGNC:'):
db_id = db_id[5:]
# If we got UP and UPPRO, return UPPRO
if db_ns == 'UP' and '#PRO_' in url:
db_ns = 'UPPRO'
db_id = url[url.find('PRO_'):]
if db_ns and db_id:
return db_ns, db_id
for ns, prefix in url_prefixes.items():
if url.startswith(prefix):
return ns, url[len(prefix):]
# Handle other special cases
for part in ['/lincs.smallmolecule', '/lincs.cell', '/lincs.protein']:
if part in url:
return 'LINCS', url[(url.find(part) + len(part) + 1):]
if '/chembl.compound' in url:
return 'CHEMBL', url[
(url.find('/chembl.compound') + len('/chembl.compound:')):]
if 'lincs.hms.harvard.edu' in url:
return 'HMS-LINCS', url[len('http://lincs.hms.harvard.edu/db/sm/'):-4]
if 'selventa-legacy-chemicals/' in url:
return 'SCHEM', None
if 'selventa-named-complexes/' in url:
return 'SCOMP', None
if 'selventa-protein-families/' in url:
return 'SFAM', None
else:
logger.warning('Could not parse URL %s' % url)
return None, None
def namespace_embedded(db_ns: str) -> bool:
"""Return true if this namespace requires IDs to have namespace embedded.
This function first maps the given namespace to an identifiers.org
namespace and then checks the registry to see if namespaces need
to be embedded in IDs. If yes, it returns True. If not, or the ID can't
be mapped to identifiers.org, it returns False
Parameters
----------
db_ns :
The namespace to check.
Returns
-------
:
True if the namespace is known to be embedded in IDs of this
namespace. False if unknown or known not to be embedded.
"""
identifiers_ns = get_identifiers_ns(db_ns)
if identifiers_ns:
identifiers_entry = identifiers_registry.get(identifiers_ns)
if identifiers_entry['namespace_embedded']:
return True
return False
def ensure_prefix_if_needed(db_ns: str, db_id: str) -> str:
"""Return an ID ensuring a namespace prefix if known to be needed.
Parameters
----------
db_ns :
The namespace associated with the identifier.
db_id :
The original identifier.
Returns
-------
:
The identifier with namespace embedded if needed.
"""
if namespace_embedded(db_ns):
return ensure_prefix(db_ns, db_id)
return db_id
def ensure_prefix(db_ns, db_id, with_colon=True):
"""Return a valid ID that has the given namespace embedded.
This is useful for namespaces such as CHEBI, GO or BTO that require
the namespace to be part of the ID. Note that this function always
ensures that the given db_ns is embedded in the ID and can handle the
case whene it's already present.
Parameters
----------
db_ns : str
A namespace.
db_id : str
An ID within that namespace which should have the namespace
as a prefix in it.
with_colon: Optional[bool]
If True, the namespace prefix is followed by a colon in the ID (e.g.,
CHEBI:12345). Otherwise, no colon is added (e.g., CHEMBL1234).
Default: True
"""
if db_id is None:
return None
colon = ':' if with_colon else ''
if not db_id.startswith(f'{db_ns}{colon}'):
return f'{db_ns}{colon}{db_id}'
return db_id
def ensure_chebi_prefix(chebi_id):
"""Return a valid CHEBI ID that has the appropriate CHEBI: prefix."""
return ensure_prefix('CHEBI', chebi_id)
def ensure_chembl_prefix(chembl_id):
"""Return a valid CHEMBL ID that has the appropriate CHEMBL prefix."""
return ensure_prefix('CHEMBL', chembl_id, with_colon=False)
def _load_identifiers_registry():
identifiers_registry = load_resource_json('identifiers_patterns.json')
# Override pattern otherwise patterns like 1.1 can't be used
identifiers_registry['ec-code']['pattern'] = '^\\d{1,2}(\\.\\d{0,3}){0,3}$'
return identifiers_registry
identifiers_registry = _load_identifiers_registry()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
class Test_compute_implicit_line(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import clipping
return clipping.compute_implicit_line(nodes)
def test_no_rounding(self):
nodes = np.asfortranarray([[1.0, 5.0], [2.0, 2.0]])
coeff_a, coeff_b, coeff_c = self._call_function_under_test(nodes)
self.assertEqual(coeff_a, 0.0)
self.assertEqual(coeff_b, 4.0)
self.assertEqual(coeff_c, -8.0)
def test_rational_length(self):
nodes = np.asfortranarray([[3.0, 7.0], [2.0, 5.0]])
coeff_a, coeff_b, coeff_c = self._call_function_under_test(nodes)
self.assertEqual(coeff_a, -3.0)
self.assertEqual(coeff_b, 4.0)
self.assertEqual(coeff_c, 1.0)
def test_irrational_length(self):
nodes = np.asfortranarray([[4.0, 5.0], [7.0, 8.0]])
coeff_a, coeff_b, coeff_c = self._call_function_under_test(nodes)
self.assertEqual(coeff_a, -1.0)
self.assertEqual(coeff_b, 1.0)
self.assertEqual(coeff_c, -3.0)
class Test_compute_fat_line(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes):
from bezier.hazmat import clipping
return clipping.compute_fat_line(nodes)
def test_line(self):
nodes = np.asfortranarray([[1.0, 5.0], [2.0, 2.0]])
result = self._call_function_under_test(nodes)
coeff_a, coeff_b, coeff_c, d_min, d_max = result
self.assertEqual(coeff_a, 0.0)
self.assertEqual(coeff_b, 4.0)
self.assertEqual(coeff_c, -8.0)
self.assertEqual(d_min, 0.0)
self.assertEqual(d_max, 0.0)
def test_quadratic(self):
nodes = np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 1.0, 2.0]])
result = self._call_function_under_test(nodes)
coeff_a, coeff_b, coeff_c, d_min, d_max = result
self.assertEqual(coeff_a, -2.0)
self.assertEqual(coeff_b, 0.0)
self.assertEqual(coeff_c, 0.0)
self.assertEqual(d_min, -2.0)
self.assertEqual(d_max, 0.0)
def test_many_interior(self):
nodes = np.asfortranarray(
[[0.0, 1.0, 2.0, 3.0, 4.0], [0.0, 4.0, -4.0, 2.0, 0.0]]
)
result = self._call_function_under_test(nodes)
coeff_a, coeff_b, coeff_c, d_min, d_max = result
self.assertEqual(coeff_a, 0.0)
self.assertEqual(coeff_b, 4.0)
self.assertEqual(coeff_c, 0.0)
self.assertEqual(d_min, -16.0)
self.assertEqual(d_max, 16.0)
class Test__update_parameters(unittest.TestCase):
@staticmethod
def _call_function_under_test(s_min, s_max, start0, end0, start1, end1):
from bezier.hazmat import clipping
return clipping._update_parameters(
s_min, s_max, start0, end0, start1, end1
)
def test_parallel(self):
from bezier.hazmat import clipping
start0 = np.asfortranarray([0.0, 0.0])
end0 = np.asfortranarray([1.0, 0.0])
start1 = np.asfortranarray([0.0, -1.0])
end1 = np.asfortranarray([1.0, -1.0])
with self.assertRaises(NotImplementedError) as exc_info:
self._call_function_under_test(
None, None, start0, end0, start1, end1
)
expected_args = (clipping.NO_PARALLEL,)
self.assertEqual(exc_info.exception.args, expected_args)
def test_t_outside(self):
start0 = np.asfortranarray([0.0, -1.0])
end0 = np.asfortranarray([1.0, -1.0])
start1 = np.asfortranarray([0.5, 0.0])
end1 = np.asfortranarray([1.0, 2.0])
s_min, s_max = self._call_function_under_test(
2.0, -1.0, start0, end0, start1, end1
)
self.assertEqual(s_min, 2.0)
self.assertEqual(s_max, -1.0)
def _update_helper(self, s_min, s_max):
start0 = np.asfortranarray([0.0, 2.0])
end0 = np.asfortranarray([1.0, 2.0])
start1 = np.asfortranarray([0.0, 1.0])
end1 = np.asfortranarray([0.5, 3.0])
return self._call_function_under_test(
s_min, s_max, start0, end0, start1, end1
)
def test_update_both_unset(self):
s_min, s_max = self._update_helper(1.0, 0.0)
self.assertEqual(s_min, 0.25)
self.assertEqual(s_max, 0.25)
def test_update_s_max(self):
s_min, s_max = self._update_helper(0.125, -1.0)
self.assertEqual(s_min, 0.125)
self.assertEqual(s_max, 0.25)
def test_s_not_updated(self):
s_min, s_max = self._update_helper(0.125, 0.5)
self.assertEqual(s_min, 0.125)
self.assertEqual(s_max, 0.5)
class Test_clip_range(unittest.TestCase):
@staticmethod
def _call_function_under_test(nodes1, nodes2):
from bezier.hazmat import clipping
return clipping.clip_range(nodes1, nodes2)
def test_simple(self):
nodes1 = np.asfortranarray([[0.0, 1.0, 2.0], [0.0, 2.0, 0.0]])
nodes2 = np.asfortranarray([[0.0, 2.0, 0.0], [-1.0, 1.0, 3.0]])
start_s, end_s = self._call_function_under_test(nodes1, nodes2)
self.assertEqual(start_s, 0.25)
self.assertEqual(end_s, 0.75)
def test_parallel(self):
from bezier.hazmat import clipping
nodes1 = np.asfortranarray([[0.0, 1.0, 2.0], [1.0, 3.0, 1.0]])
nodes2 = np.asfortranarray(
[[0.0, 0.5, 1.0, 1.5, 2.0], [0.0, 4.0, 4.0, 4.0, 0.0]]
)
with self.assertRaises(NotImplementedError) as exc_info:
self._call_function_under_test(nodes1, nodes2)
expected_args = (clipping.NO_PARALLEL,)
self.assertEqual(exc_info.exception.args, expected_args)
def test_intersect_left_side(self):
# Due to a previous bug in ``_update_parameters``, this failed to set
# ``s_max`` one of the intersections.
nodes1 = np.asfortranarray(
[[2.0, 4.5, 2.5, 5.0], [0.0, 1.0, 3.0, 4.0]]
)
nodes2 = np.asfortranarray(
[[2.34375, 4.15625, 6.84375], [1.125, 0.875, 3.125]]
)
start_s, end_s = self._call_function_under_test(nodes1, nodes2)
self.assertEqual(start_s, 0.0)
self.assertEqual(end_s, 0.75)
def test_intersect_right_side(self):
nodes1 = np.asfortranarray(
[[2.0, 4.5, 2.5, 5.0], [0.0, 1.0, 3.0, 4.0]]
)
nodes2 = np.asfortranarray(
[[0.34375, 4.15625, 5.34375], [1.125, 0.875, 3.125]]
)
start_s, end_s = self._call_function_under_test(nodes1, nodes2)
self.assertEqual(start_s, 0.09375)
self.assertEqual(end_s, 1.0)
def test_fully_disjoint(self):
nodes1 = np.asfortranarray(
[[2.0, 4.5, 2.5, 5.0], [0.0, 1.0, 3.0, 4.0]]
)
nodes2 = np.asfortranarray(
[[0.34375, 2.15625, 2.59375], [3.125, 2.875, 4.125]]
)
start_s, end_s = self._call_function_under_test(nodes1, nodes2)
self.assertEqual(start_s, 1.0)
self.assertEqual(end_s, 0.0)
|
|
# coding=utf-8
"""Text encoding UTF-8"""
class SystemVariables:
"""SystemVariables(String, String, String, String, Boolean, [String],
(String), [String], String)
Class for constructing required system variables for the bot.
SystemVariables.prefix_qualifier
String
Command qualifier.
Intended to be the first character of all command prefix.
SystemVariables.prefix_question
String
Command prefix for question related commands.
SystemVariables.prefix_information
String
Command prefix for information related commands.
SystemVariables.prefix_debug
String
Command prefix for debug related commands.
SystemVariables.test_mode
Boolean
Determines if the bot will start in test mode or not.
SystemVariables.allowed_testing
[String]
Entries of server ID allowed when test mode is enabled.
SystemVariables.ATSUI
(String)
Entries of client ID allowed for debug prefix commands.
SystemVariables.trigger_exclude
[String]
Entries of server ID excluded from having trigger commands.
SystemVariables.previous_playing_message
String
Entry of previous playing message.
For playing message storage when testing mode is turned on and off."""
def __init__(self, prefix_qualifier, prefix, test_mode, allowed_testing, atsui, trigger_exclude,
previous_playing_message, forbidden_eval, token_status,
custom_filename_status, custom_filename_path, eval_error_message):
self.prefix_qualifier = prefix_qualifier
self.prefix = prefix
self.test_mode = test_mode
self.allowed_testing = allowed_testing
self.ATSUI = atsui
self.trigger_exclude = trigger_exclude
self.previous_playing_message = previous_playing_message
self.forbidden_eval = forbidden_eval
self.token_status = token_status
self.custom_filename_status = custom_filename_status
self.custom_filename_path = custom_filename_path
self.eval_error_message = eval_error_message
self.eval_error_length = len(eval_error_message)
self.ping_information = []
async def command_help(system, sophia, message):
trigger_status = True
if message.server.id in system.trigger_exclude:
trigger_status = False
if trigger_status:
await sophia.send_message(message.channel, 'Here are the commands I recognize at the moment:\n\n' +
'Question commands (starts with `' + system.prefix_question + '`)\n' +
'`about`, `help`, `command`, `botversion`, `infocheck`, `tunnelcheck`\n\n' +
'Information commands (starts with `' + system.prefix_information + '`)\n' +
'`tunnellink`, `tunnelenable`, `tunnelmode`, `tunnelleave`, `tunnelcreate`, `tunneldelete`\n'+
'`hello`, `invite`, `ping` (`pong`), `triggertoggle`\n\n' +
'Trigger commands\n' +
':coffee:, :tea:, `cawfee`, `gween tea`, ' +
'`\u0028\u256f\u00b0\u25a1\u00b0\uff09\u256f\ufe35 \u253b\u2501\u253b`, ' +
'`\u252c\u2500\u252c\ufeff \u30ce\u0028 \u309c\u002d\u309c\u30ce\u0029`\n' +
'...with 14 secret commands! \n\n' +
'For information of individual commands, please enter `' + system.prefix_question +
'command `*`command`*.')
else:
await sophia.send_message(message.channel, 'Here are the commands I recognize at the moment:\n\n' +
'Question commands (starts with `' + system.prefix_question + '`)\n' +
'`about`, `help`, `command`, `botversion`, `infocheck`, `tunnelcheck`\n\n' +
'Information commands (starts with `' + system.prefix_information + '`)\n' +
'`tunnellink`, `tunnelenable`, `tunnelmode`, `tunnelleave`, `tunnelcreate`, `tunneldelete`\n'+
'`ping` (`pong`), `hello`, `invite`, `triggertoggle`\n' +
'...with 14 secret commands! \n\n' +
'For information of individual commands, please enter `' + system.prefix_question +
'command `*`command`*.')
async def individual_command_help(system, sophia, message):
message_qualifier = ' '
space_position = message.content.find(message_qualifier, 0)
if space_position != -1:
message_content = message.content[space_position + 1:]
if message_content == 'about':
await sophia.send_message(message.channel, 'Category: Question\n' +
'Command format: `' + system.prefix_question + 'about`\n\n' +
'Allows Sophia to greet herself.')
elif message_content == 'help':
await sophia.send_message(message.channel, 'Category: Question\n' +
'Command format: `' + system.prefix_question + 'help`\n\n' +
'Displays current command list and current amount of secret commands.')
elif message_content == 'command':
await sophia.send_message(message.channel, 'Category: Question\n' +
'Command format: `' + system.prefix_question + 'command `*`command`*\n\n' +
'Displays detailed help information for individual command.')
elif message_content == 'botversion':
await sophia.send_message(message.channel, 'Category: Question\n' +
'Command format: `' + system.prefix_question + 'botversion`\n\n' +
'Displays the bot\'s current bot version and last update date.')
elif message_content == 'infocheck':
await sophia.send_message(message.channel, 'Category: Question\n' +
'Command format: `' + system.prefix_question + 'infocheck`\n\n' +
'Displays the author\'s current discord name, discrim number and ID.\n' +
'Also displays server ID and channel ID.')
elif message_content == 'tunnelcheck':
await sophia.send_message(message.channel, 'Category: Question\n' +
'Command format: `' + system.prefix_question + 'tunnelcheck `*`tunnel_id`*\n\n' +
'Displays the tunnel information for the specified tunnel ID.')
elif message_content == 'roomcheck':
await sophia.send_message(message.channel, 'Category: Question\n' +
'Command format: `' + system.prefix_question + 'roomcheck `*`room_id`*\n\n' +
'Displays the room information for the specified room ID.')
elif message_content == 'tunnellink':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information + 'tunnellink `*`room_id room_password`*\n' +
'Required user permission(s): *Administrator* or *Manage Server* or *Manage Channel*.\n\n'+
'Links the current channel to a tunnel room.')
elif message_content == 'tunnelenable':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information +
'tunnelenable `*`room_id option room_password`* \n' +
'Required user permission(s): *Administrator* or *Manage Server* or *Manage Channel*.\n' +
'Note: Requires tunnel room manager (currently first channel in the tunnel room list).\n\n' +
'Toggles the current room\'s tunnel enable option.')
elif message_content == 'tunnelmode':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information + 'tunnelmode `*`option`* \n' +
'Required user permission(s): *Administrator* or *Manage Server* or *Manage Channel*.\n' +
'Available option(s):\n' +
'`3` or `all` - Sets the channel to both send and receive messages.\n' +
'`2` or `receive` - Sets the channel to only receive messages.\n' +
'`1` or `send` - Sets the channel to only send messages.\n' +
'`0` or `none` - Sets the channel to not receive nor send any messages.\n\n'
'Changes the current tunnel mode.')
elif message_content == 'tunnelleave':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information + 'tunnelleave `*`room_password`* \n' +
'Required user permission(s): *Administrator* or *Manage Server* or *Manage Channel*.\n\n' +
'Leave the current tunnel room.')
elif message_content == 'tunnelcreate':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information + 'tunnelcreate `*`room_name room_password`*\n'
'Note: *`room_password`* is optional. Room ID is autogenerated.\n\n' +
'Creates a tunnel room with user specified room name.')
elif message_content == 'tunneldelete':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information + 'tunneldelete `*`room_password`*\n' +
'Required user permission(s): *Administrator* or *Manage Server* or *Manage Channel*.\n\n' +
'Note: Requires tunnel room manager (currently first channel in the tunnel room list).\n\n'
'Deletes a tunnel room.')
elif message_content == 'hello':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information + 'hello`\n\n' +
'Allows Sophia to say hello to the user.')
elif message_content == 'invite':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information + 'invite`\n\n' +
'Displays Sophia\'s invite link and server link.')
elif message_content == 'ping' or message_content == 'pong':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information + 'ping` or `' +
system.prefix_information + 'pong`\n\n' +
'Ping! Pong!')
elif message_content == 'roomcreate':
await sophia.send_message(message.channel, 'Category: ???\n' +
'Command format: `' + system.prefix_information + 'roomcreate `*`room_name room_password`*\n\n' +
'Creates a minigame room.')
elif message_content == 'roomjoin':
await sophia.send_message(message.channel, 'Category: ???\n' +
'Command format: `' + system.prefix_information + 'roomjoin `*`room_id room_password`*\n\n' +
'Join a minigame room.')
elif message_content == 'roomcheck':
await sophia.send_message(message.channel, 'Category: ???\n' +
'Command format: `' + system.prefix_information + 'roomcheck `*`room_id`*\n\n' +
'Check room information for the specified room ID.')
elif message_content == 'triggertoggle':
await sophia.send_message(message.channel, 'Category: Information\n' +
'Command format: `' + system.prefix_information + 'triggertoggle `*`room_id`*\n'
'Required user permission(s): *Administrator* or *Manage Server* or *Manage Channel*.\n' +
'Note: This toggle is server wide.\n\n' +
'Toggles trigger command.')
elif message_content == 'sara' or message_content == 'sarachan':
await sophia.send_message(message.channel, 'Category: ???\n' +
'Command format: `' + system.prefix_information + 'sara` or `' +
system.prefix_information + 'sarachan`\n\n' +
'Be-Music Source (BMS) meme. You have found a secret!')
else:
await sophia.send_message(message.channel, 'The command you have specified is invalid or missing ' +
'help informations.')
else:
await sophia.send_message(message.channel, 'Unable to show command help since the command you want is ' +
'not specified.\n' +
'Usage: `' + system.prefix_question + 'command `*`command`*')
async def info_check(sophia, message):
await sophia.send_message(message.channel, '`Author`: ' + str(message.author) +
' `' + str(message.author.id) + '`\n' +
# '`Bot`: ' + str(message.author.bot) + '\n' +
# '`MessLen`: ' + str(len(message.content)) + '\n' +
'`Channel`: ' + str(message.channel) + ' `' + str(message.channel.id) + '`\n' +
'`Server`: ' + str(message.server.name) + ' `' + str(message.server.id) + '`')
async def server_invite(sophia, message):
await sophia.send_message(message.channel,
'You can take me to your discord server by clicking the link below.' + '\n' +
'https://discordapp.com/oauth2/authorize?client_id=229134725569183745&scope=bot&permissions=0' + '\n\n' +
'Interested in joining my discord guild? You can visit it by using the invite link below!' + '\n' +
'https://discord.gg/SpTWKDd')
async def detailed_ping(system, sophia, message, ping_message):
channel_id = message.channel.id
timestamp_value = message.timestamp.hour * 3600000 + \
message.timestamp.minute * 60000 + \
message.timestamp.second * 1000 + \
int(message.timestamp.microsecond / 1000)
system.ping_information.append((channel_id, timestamp_value))
await sophia.send_message(message.channel, ping_message)
async def detailed_ping_edit(system, sophia, message):
if message.channel.id == system.ping_information[0][0]:
message_content = message.content
timestamp_value = message.timestamp.hour * 3600000 + message.timestamp.minute * 60000 + \
message.timestamp.second * 1000 + (message.timestamp.microsecond // 1000)
timestamp_difference = timestamp_value - system.ping_information[0][1]
if timestamp_difference < 0:
timestamp_difference -= 86400000
await sophia.edit_message(message, message_content + ' ' + str(timestamp_difference) + '`ms`')
del system.ping_information[0]
async def testing_mode(system, discord, sophia, message, message_low):
message_qualifier = ' '
message_start = message_low.find(message_qualifier, 0)
testing_mode_parameter = str(message_low)[message_start + 1:]
# await sophia.send_message(message.channel, testing_mode_parameter)
if testing_mode_parameter == 'yes' or testing_mode_parameter == '1':
system.test_mode = True
await sophia.change_presence(game=discord.Game(name='\u26A0 TEST MODE \u26A0'))
await sophia.send_message(message.channel, 'Testing mode enabled')
elif testing_mode_parameter == 'no' or testing_mode_parameter == '0':
system.test_mode = False
await sophia.change_presence(game=discord.Game(name=system.previous_playing_message))
await sophia.send_message(message.channel, 'Testing mode disabled')
async def prefix_change(system, sophia, message):
"""Changes the bot's prefix.
This command alters the following variables:
SystemVariables.prefix_qualifier
SystemVariables.prefix"""
message_split = message.content.split(' ', maxsplit=2)
if message_split[2] is not None:
if message_split[2].startswith(message_split[1]):
exception_check = False
else:
exception_check = True
else:
exception_check = True
if exception_check:
await sophia.send_message(message.channel, 'Prefix change failed')
else:
system.prefix_qualifier = message_split[1]
system.prefix = message_split[2]
await sophia.send_message(message.channel, 'Prefix change success')
async def change_name(sophia, message):
find_qualifier = ' '
name_position = message.content.find(find_qualifier, 0)
name_change = message.content[name_position + 1:]
# await sophia.send_message(message.channel, str(len(username)) + username)
await sophia.edit_profile(password='', username=name_change)
await sophia.send_message(message.channel, 'Bot name has successfully changed.')
async def change_avatar(sophia, message):
find_qualifier = ' '
filename_position = message.content.find(find_qualifier, 0)
filename_change = message.content[filename_position + 1:]
# await sophia.send_message(message.channel, filename_change)
try:
file_point = open(filename_change, 'rb')
except FileNotFoundError:
await sophia.send_message(message.channel, 'Image not found.')
else:
processed_file = file_point.read()
try:
await sophia.edit_profile(password='', avatar=processed_file)
except 'InvalidArgument':
await sophia.send_message(message.channel, 'Avatar change failed due to missing or bad image file.')
else:
await sophia.send_message(message.channel, 'Bot avatar has successfully changed.')
async def trigger_toggle(system, sophia, message, message_low, permission):
find_qualifier = ' '
option_position = message.content.find(find_qualifier, 0)
if permission:
if find_qualifier != -1:
option_message = message_low[option_position + 1:]
if option_message == 'disable' or option_message == 'no' or option_message == '0':
if message.server.id in system.trigger_exclude:
await sophia.send_message(message.channel, 'The trigger command has already ' +
'been disabled for this server.')
else:
system.trigger_exclude.append(message.server.id)
await sophia.send_message(message.channel, 'Trigger command is now disabled for this server.')
elif option_message == 'enable' or option_message == 'yes' or option_message == '1':
if message.server.id in system.trigger_exclude:
system.trigger_exclude.remove(message.server.id)
await sophia.send_message(message.channel, 'Trigger command is now enabled for this server.')
else:
await sophia.send_message(message.channel, 'The trigger command has already ' +
'been enabled for this server.')
else:
await sophia.send_message(message.channel, 'Unable to change trigger command settings ' +
'due to invalid option.')
else:
await sophia.send_message(message.channel,
'Unable to change trigger command settings due to missing option.')
else:
await sophia.send_message(message.channel,
'Unable to change trigger command since you do not have sufficient role permissions.')
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import random
import SignedAPICall
import XenAPI
from solidfire.factory import ElementFactory
from util import sf_util
# All tests inherit from cloudstackTestCase
from marvin.cloudstackTestCase import cloudstackTestCase
# Import Integration Libraries
# base - contains all resources as entities and defines create, delete, list operations on them
from marvin.lib.base import Account, Cluster, ServiceOffering, Snapshot, StoragePool, User, VirtualMachine, Volume
# common - commonly used methods for all tests are listed here
from marvin.lib.common import get_domain, get_template, get_zone, list_hosts, list_volumes
# utils - utility classes for common cleanup, external library wrappers, etc.
from marvin.lib.utils import cleanup_resources
# Prerequisites:
# Only one zone
# Only one pod
# Two clusters
#
# Running the tests:
# If using XenServer, verify the "xen_server_hostname" variable is correct.
# Set the Global Setting "max.number.managed.clustered.file.systems" equal to 2.
#
# Note:
# Verify that TestData.clusterId and TestData.clusterId2 are set properly.
class TestData():
# constants
account = "account"
allocationstate = "allocationstate"
capacityBytes = "capacitybytes"
capacityIops = "capacityiops"
clusterId = "clusterId"
clusterId2 = "clusterId2"
computeOffering = "computeoffering"
domainId = "domainId"
email = "email"
firstname = "firstname"
hypervisor = "hypervisor"
lastname = "lastname"
mvip = "mvip"
name = "name"
password = "password"
port = "port"
primaryStorage = "primarystorage"
provider = "provider"
scope = "scope"
solidFire = "solidfire"
storageTag = "SolidFire_SAN_1"
tags = "tags"
url = "url"
user = "user"
username = "username"
xenServer = "xenserver"
zoneId = "zoneId"
hypervisor_type = xenServer
xen_server_hostname = "XenServer-6.5-1"
def __init__(self):
self.testdata = {
TestData.solidFire: {
TestData.mvip: "10.117.78.225",
TestData.username: "admin",
TestData.password: "admin",
TestData.port: 443,
TestData.url: "https://10.117.78.225:443"
},
TestData.xenServer: {
TestData.username: "root",
TestData.password: "solidfire"
},
TestData.account: {
TestData.email: "test@test.com",
TestData.firstname: "John",
TestData.lastname: "Doe",
TestData.username: "test",
TestData.password: "test"
},
TestData.user: {
TestData.email: "user@test.com",
TestData.firstname: "Jane",
TestData.lastname: "Doe",
TestData.username: "testuser",
TestData.password: "password"
},
TestData.primaryStorage: {
TestData.name: "SolidFire-%d" % random.randint(0, 100),
TestData.scope: "ZONE",
TestData.url: "MVIP=10.117.78.225;SVIP=10.117.94.225;" +
"clusterAdminUsername=admin;clusterAdminPassword=admin;" +
"clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
"clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
TestData.provider: "SolidFire",
TestData.tags: TestData.storageTag,
TestData.capacityIops: 4500000,
TestData.capacityBytes: 2251799813685248,
TestData.hypervisor: "Any"
},
TestData.computeOffering: {
TestData.name: "SF_CO_1",
"displaytext": "SF_CO_1 (Min IOPS = 300; Max IOPS = 600)",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
"storagetype": "shared",
"customizediops": False,
"miniops": "300",
"maxiops": "600",
"hypervisorsnapshotreserve": 200,
TestData.tags: TestData.storageTag
},
TestData.zoneId: 1,
TestData.clusterId: 1,
TestData.clusterId2: 6,
TestData.domainId: 1,
TestData.url: "10.117.40.114"
}
class TestManagedClusteredFilesystems(cloudstackTestCase):
_should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list."
_volume_should_have_failed_to_attach_to_vm = "The volume should have failed to attach to the VM."
@classmethod
def setUpClass(cls):
# Set up API client
testclient = super(TestManagedClusteredFilesystems, cls).getClsTestClient()
cls.apiClient = testclient.getApiClient()
cls.configData = testclient.getParsedTestDataConfig()
cls.dbConnection = testclient.getDbConnection()
cls.testdata = TestData().testdata
sf_util.set_supports_resign(True, cls.dbConnection)
cls._connect_to_hypervisor()
# Set up SolidFire connection
solidfire = cls.testdata[TestData.solidFire]
cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password])
# Get Resources from Cloud Infrastructure
cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type)
cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
# Create test account
cls.account = Account.create(
cls.apiClient,
cls.testdata["account"],
admin=1
)
# Set up connection to make customized API calls
cls.user = User.create(
cls.apiClient,
cls.testdata["user"],
account=cls.account.name,
domainid=cls.domain.id
)
url = cls.testdata[TestData.url]
api_url = "http://" + url + ":8080/client/api"
userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)
cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)
primarystorage = cls.testdata[TestData.primaryStorage]
cls.primary_storage = StoragePool.create(
cls.apiClient,
primarystorage,
scope=primarystorage[TestData.scope],
zoneid=cls.zone.id,
provider=primarystorage[TestData.provider],
tags=primarystorage[TestData.tags],
capacityiops=primarystorage[TestData.capacityIops],
capacitybytes=primarystorage[TestData.capacityBytes],
hypervisor=primarystorage[TestData.hypervisor]
)
cls.compute_offering = ServiceOffering.create(
cls.apiClient,
cls.testdata[TestData.computeOffering]
)
# Resources that are to be destroyed
cls._cleanup = [
cls.compute_offering,
cls.user,
cls.account
]
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiClient, cls._cleanup)
cls.primary_storage.delete(cls.apiClient)
sf_util.purge_solidfire_volumes(cls.sfe)
except Exception as e:
logging.debug("Exception in tearDownClass(cls): %s" % e)
def setUp(self):
self.cleanup = []
def tearDown(self):
cleanup_resources(self.apiClient, self.cleanup)
# Only two 'permanent' SRs per cluster
#
# Disable the second cluster
#
# Create VM
# Create VM
# Create VM (should fail)
# Take snapshot of first root disk
# Create a volume from this snapshot
# Attach new volume to second VM (should fail)
#
# Enable the second cluster
#
# Attach new volume to second VM (should fail)
# Create VM (should end up in new cluster)
# Delete first VM (this should free up one SR in the first cluster)
# Attach new volume to second VM
# Detach new volume from second VM
# Attach new volume to second VM
# Create a volume from the snapshot
# Attach this new volume to the second VM (should fail)
# Attach this new volume to the first VM in the new cluster
def test_managed_clustered_filesystems_limit(self):
args = { "id": self.testdata[TestData.clusterId2], TestData.allocationstate: "Disabled" }
Cluster.update(self.apiClient, **args)
virtual_machine_names = {
"name": "TestVM1",
"displayname": "Test VM 1"
}
virtual_machine_1 = self._create_vm(virtual_machine_names)
list_volumes_response = list_volumes(
self.apiClient,
virtualmachineid=virtual_machine_1.id,
listall=True
)
sf_util.check_list(list_volumes_response, 1, self, TestManagedClusteredFilesystems._should_only_be_one_volume_in_list_err_msg)
vm_1_root_volume = list_volumes_response[0]
virtual_machine_names = {
"name": "TestVM2",
"displayname": "Test VM 2"
}
virtual_machine_2 = self._create_vm(virtual_machine_names)
virtual_machine_names = {
"name": "TestVM3",
"displayname": "Test VM 3"
}
class VMStartedException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
try:
# The VM should fail to be created as there should be an insufficient number of clustered filesystems
# remaining in the compute cluster.
self._create_vm(virtual_machine_names)
raise VMStartedException("The VM should have failed to start.")
except VMStartedException:
raise
except Exception:
pass
vol_snap = Snapshot.create(
self.apiClient,
volume_id=vm_1_root_volume.id
)
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
volume_created_from_snapshot_1 = Volume.create_from_snapshot(self.apiClient, vol_snap.id, services, account=self.account.name, domainid=self.domain.id)
class VolumeAttachedException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
try:
# The volume should fail to be attached as there should be an insufficient number of clustered filesystems
# remaining in the compute cluster.
virtual_machine_2.attach_volume(
self.apiClient,
volume_created_from_snapshot_1
)
raise VolumeAttachedException(TestManagedClusteredFilesystems._volume_should_have_failed_to_attach_to_vm)
except VolumeAttachedException:
raise
except Exception:
pass
args = { "id": self.testdata[TestData.clusterId2], TestData.allocationstate: "Enabled" }
Cluster.update(self.apiClient, **args)
try:
# The volume should fail to be attached as there should be an insufficient number of clustered filesystems
# remaining in the compute cluster.
virtual_machine_2.attach_volume(
self.apiClient,
volume_created_from_snapshot_1
)
raise VolumeAttachedException(TestManagedClusteredFilesystems._volume_should_have_failed_to_attach_to_vm)
except VolumeAttachedException:
raise
except Exception:
pass
virtual_machine_names = {
"name": "TestVMA",
"displayname": "Test VM A"
}
virtual_machine_a = self._create_vm(virtual_machine_names)
host_for_vm_1 = list_hosts(self.apiClient, id=virtual_machine_1.hostid)[0]
host_for_vm_a = list_hosts(self.apiClient, id=virtual_machine_a.hostid)[0]
self.assertTrue(
host_for_vm_1.clusterid != host_for_vm_a.clusterid,
"VMs 1 and VM a should be in different clusters."
)
virtual_machine_1.delete(self.apiClient, True)
volume_created_from_snapshot_1 = virtual_machine_2.attach_volume(
self.apiClient,
volume_created_from_snapshot_1
)
virtual_machine_2.detach_volume(self.apiClient, volume_created_from_snapshot_1)
volume_created_from_snapshot_1 = virtual_machine_2.attach_volume(
self.apiClient,
volume_created_from_snapshot_1
)
services = {"diskname": "Vol-2", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
volume_created_from_snapshot_2 = Volume.create_from_snapshot(self.apiClient, vol_snap.id, services, account=self.account.name, domainid=self.domain.id)
try:
# The volume should fail to be attached as there should be an insufficient number of clustered filesystems
# remaining in the compute cluster.
virtual_machine_2.attach_volume(
self.apiClient,
volume_created_from_snapshot_2
)
raise VolumeAttachedException(TestManagedClusteredFilesystems._volume_should_have_failed_to_attach_to_vm)
except VolumeAttachedException:
raise
except Exception:
pass
virtual_machine_a.attach_volume(
self.apiClient,
volume_created_from_snapshot_2
)
def _create_vm(self, virtual_machine_names):
return VirtualMachine.create(
self.apiClient,
virtual_machine_names,
accountid=self.account.name,
zoneid=self.zone.id,
serviceofferingid=self.compute_offering.id,
templateid=self.template.id,
domainid=self.domain.id,
startvm=True
)
@classmethod
def _connect_to_hypervisor(cls):
host_ip = "https://" + \
list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name=TestData.xen_server_hostname)[0].ipaddress
cls.xen_session = XenAPI.Session(host_ip)
xen_server = cls.testdata[TestData.xenServer]
cls.xen_session.xenapi.login_with_password(xen_server[TestData.username], xen_server[TestData.password])
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v1 import volume_metadata
from cinder.api.v1 import volumes
import cinder.db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v1 import stubs
CONF = cfg.CONF
def return_create_volume_metadata_max(context, volume_id, metadata, delete):
return stub_max_volume_metadata()
def return_create_volume_metadata(context, volume_id, metadata, delete):
return stub_volume_metadata()
def return_new_volume_metadata(context, volume_id, metadata, delete):
return stub_new_volume_metadata()
def return_create_volume_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_volume_metadata_insensitive()
def return_volume_metadata(context, volume_id):
if not isinstance(volume_id, str) or not len(volume_id) == 36:
msg = 'id %s must be a uuid in return volume metadata' % volume_id
raise Exception(msg)
return stub_volume_metadata()
def return_empty_volume_metadata(context, volume_id):
return {}
def return_empty_container_metadata(context, volume_id, metadata, delete):
return {}
def delete_volume_metadata(context, volume_id, key):
pass
def stub_volume_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_new_volume_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_volume_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_max_volume_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_volume(context, volume_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'metadata': {}}
def return_volume_nonexistent(context, volume_id):
raise exception.VolumeNotFound('bogus test message')
def fake_update_volume_metadata(self, context, volume, diff):
pass
class volumeMetaDataTest(test.TestCase):
def setUp(self):
super(volumeMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(cinder.db, 'volume_get', return_volume)
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(cinder.db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.stubs.Set(self.volume_api, 'update_volume_metadata',
fake_update_volume_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.volume_controller = volumes.VolumeController(self.ext_mgr)
self.controller = volume_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v1/fake/volumes/%s/metadata' % self.req_id
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"metadata": {}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.volume_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_delete',
delete_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank('/v1/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3", }}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
def test_create_with_keys_in_uppercase_and_lowercase(self):
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_empty_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v1/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_get',
return_volume_nonexistent)
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank('/v1/volume_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
def test_update_all(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_new_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_with_keys_in_uppercase_and_lowercase(self):
self.stubs.Set(cinder.db, 'volume_metadata_get',
return_create_volume_metadata)
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_new_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_get', return_volume_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_volume(self):
self.stubs.Set(cinder.db, 'volume_get',
return_volume_nonexistent)
req = fakes.HTTPRequest.blank('/v1.1/fake/volumes/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(cinder.db, 'volume_metadata_update',
return_create_volume_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
|
|
"""
Empirical likelihood inference on descriptive statistics
This module conducts hypothesis tests and constructs confidence
intervals for the mean, variance, skewness, kurtosis and correlation.
If matplotlib is installed, this module can also generate multivariate
confidence region plots as well as mean-variance contour plots.
See _OptFuncts docstring for technical details and optimization variable
definitions.
General References:
------------------
Owen, A. (2001). "Empirical Likelihood." Chapman and Hall
"""
from __future__ import division
import numpy as np
from scipy import optimize
from scipy.stats import chi2, skew, kurtosis
from statsmodels.base.optimizer import _fit_newton
import itertools
from statsmodels.graphics import utils
def DescStat(endog):
"""
Returns an instance to conduct inference on descriptive statistics
via empirical likelihood. See DescStatUV and DescStatMV for more
information.
Parameters
----------
endog : ndarray
Array of data
Returns : DescStat instance
If k=1, the function returns a univariate instance, DescStatUV.
If k>1, the function returns a multivariate instance, DescStatMV.
"""
if endog.ndim == 1:
endog = endog.reshape(len(endog), 1)
if endog.shape[1] == 1:
return DescStatUV(endog)
if endog.shape[1] > 1:
return DescStatMV(endog)
class _OptFuncts(object):
"""
A class that holds functions that are optimized/solved.
The general setup of the class is simple. Any method that starts with
_opt_ creates a vector of estimating equations named est_vect such that
np.dot(p, (est_vect))=0 where p is the weight on each
observation as a 1 x n array and est_vect is n x k. Then _modif_Newton is
called to determine the optimal p by solving for the Lagrange multiplier
(eta) in the profile likelihood maximization problem. In the presence
of nuisance parameters, _opt_ functions are optimized over to profile
out the nuisance parameters.
Any method starting with _ci_limits calculates the log likelihood
ratio for a specific value of a parameter and then subtracts a
pre-specified critical value. This is solved so that llr - crit = 0.
"""
def __init__(self, endog):
pass
def _log_star(self, eta, est_vect, weights, nobs):
"""
Transforms the log of observation probabilities in terms of the
Lagrange multiplier to the log 'star' of the probabilities.
Parameters
----------
eta : float
Lagrange multiplier
est_vect : ndarray (n,k)
Estimating equations vector
wts : nx1 array
Observation weights
Returns
------
data_star : array
The weighted logstar of the estimting equations
Notes
-----
This function is only a placeholder for the _fit_Newton.
The function value is not used in optimization and the optimal value
is disregarded when computing the log likelihood ratio.
"""
data_star = np.log(weights) + (np.sum(weights) +\
np.dot(est_vect, eta))
idx = data_star < 1. / nobs
not_idx = ~idx
nx = nobs * data_star[idx]
data_star[idx] = np.log(1. / nobs) - 1.5 + nx * (2. - nx / 2)
data_star[not_idx] = np.log(data_star[not_idx])
return data_star
def _hess(self, eta, est_vect, weights, nobs):
"""
Calculates the hessian of a weighted empirical likelihood
problem.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
hess : m x m array
Weighted hessian used in _wtd_modif_newton
"""
#eta = np.squeeze(eta)
data_star_doub_prime = np.sum(weights) + np.dot(est_vect, eta)
idx = data_star_doub_prime < 1. / nobs
not_idx = ~idx
data_star_doub_prime[idx] = - nobs ** 2
data_star_doub_prime[not_idx] = - (data_star_doub_prime[not_idx]) ** -2
wtd_dsdp = weights * data_star_doub_prime
return np.dot(est_vect.T, wtd_dsdp[:, None] * est_vect)
def _grad(self, eta, est_vect, weights, nobs):
"""
Calculates the gradient of a weighted empirical likelihood
problem
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
gradient : ndarray (m,1)
The gradient used in _wtd_modif_newton
"""
#eta = np.squeeze(eta)
data_star_prime = np.sum(weights) + np.dot(est_vect, eta)
idx = data_star_prime < 1. / nobs
not_idx = ~idx
data_star_prime[idx] = nobs * (2 - nobs * data_star_prime[idx])
data_star_prime[not_idx] = 1. / data_star_prime[not_idx]
return np.dot(weights * data_star_prime, est_vect)
def _modif_newton(self, eta, est_vect, weights):
"""
Modified Newton's method for maximizing the log 'star' equation. This
function calls _fit_newton to find the optimal values of eta.
Parameters
----------
eta : ndarray, (1,m)
Lagrange multiplier in the profile likelihood maximization
est_vect : ndarray, (n,k)
Estimating equations vector
weights : 1darray
Observation weights
Returns
-------
params : 1xm array
Lagrange multiplier that maximizes the log-likelihood
"""
nobs = len(est_vect)
f = lambda x0: - np.sum(self._log_star(x0, est_vect, weights, nobs))
grad = lambda x0: - self._grad(x0, est_vect, weights, nobs)
hess = lambda x0: - self._hess(x0, est_vect, weights, nobs)
kwds = {'tol': 1e-8}
eta = eta.squeeze()
res = _fit_newton(f, grad, eta, (), kwds, hess=hess, maxiter=50, \
disp=0)
return res[0]
def _find_eta(self, eta):
"""
Finding the root of sum(xi-h0)/(1+eta(xi-mu)) solves for
eta when computing ELR for univariate mean.
Parameters
----------
eta : float
Lagrange multiplier in the empirical likelihood maximization
Returns
-------
llr : float
n times the log likelihood value for a given value of eta
"""
return np.sum((self.endog - self.mu0) / \
(1. + eta * (self.endog - self.mu0)))
def _ci_limits_mu(self, mu):
"""
Calculates the difference between the log likelihood of mu_test and a
specified critical value.
Parameters
----------
mu : float
Hypothesized value of the mean.
Returns
-------
diff : float
The difference between the log likelihood value of mu0 and
a specified value.
"""
return self.test_mean(mu)[0] - self.r0
def _find_gamma(self, gamma):
"""
Finds gamma that satisfies
sum(log(n * w(gamma))) - log(r0) = 0
Used for confidence intervals for the mean
Parameters
----------
gamma : float
Lagrange multiplier when computing confidence interval
Returns
-------
diff : float
The difference between the log-liklihood when the Lagrange
multiplier is gamma and a pre-specified value
"""
denom = np.sum((self.endog - gamma) ** -1)
new_weights = (self.endog - gamma) ** -1 / denom
return -2 * np.sum(np.log(self.nobs * new_weights)) - \
self.r0
def _opt_var(self, nuisance_mu, pval=False):
"""
This is the function to be optimized over a nuisance mean parameter
to determine the likelihood ratio for the variance
Parameters
----------
nuisance_mu : float
Value of a nuisance mean parameter
Returns
-------
llr : float
Log likelihood of a pre-specified variance holding the nuisance
parameter constant
"""
endog = self.endog
nobs = self.nobs
sig_data = ((endog - nuisance_mu) ** 2 \
- self.sig2_0)
mu_data = (endog - nuisance_mu)
est_vect = np.column_stack((mu_data, sig_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
if pval: # Used for contour plotting
return chi2.sf(-2 * llr, 1)
return -2 * llr
def _ci_limits_var(self, var):
"""
Used to determine the confidence intervals for the variance.
It calls test_var and when called by an optimizer,
finds the value of sig2_0 that is chi2.ppf(significance-level)
Parameters
----------
var_test : float
Hypothesized value of the variance
Returns
-------
diff : float
The difference between the log likelihood ratio at var_test and a
pre-specified value.
"""
return self.test_var(var)[0] - self.r0
def _opt_skew(self, nuis_params):
"""
Called by test_skew. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-specified skewness holding
the nuisance parameters constant.
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
skew_data = ((((endog - nuis_params[0]) ** 3) /
(nuis_params[1] ** 1.5))) - self.skew0
est_vect = np.column_stack((mu_data, sig_data, skew_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
def _opt_kurt(self, nuis_params):
"""
Called by test_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
-------
llr : float
The log likelihood ratio of a pre-speified kurtosis holding the
nuisance parameters constant
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
kurt_data = (((((endog - nuis_params[0]) ** 4) / \
(nuis_params[1] ** 2))) - 3) - self.kurt0
est_vect = np.column_stack((mu_data, sig_data, kurt_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
def _opt_skew_kurt(self, nuis_params):
"""
Called by test_joint_skew_kurt. This function is optimized over
nuisance parameters mu and sigma
Parameters
-----------
nuis_params : 1darray
An array with a nuisance mean and variance parameter
Returns
------
llr : float
The log likelihood ratio of a pre-speified skewness and
kurtosis holding the nuisance parameters constant.
"""
endog = self.endog
nobs = self.nobs
mu_data = endog - nuis_params[0]
sig_data = ((endog - nuis_params[0]) ** 2) - nuis_params[1]
skew_data = ((((endog - nuis_params[0]) ** 3) / \
(nuis_params[1] ** 1.5))) - self.skew0
kurt_data = (((((endog - nuis_params[0]) ** 4) / \
(nuis_params[1] ** 2))) - 3) - self.kurt0
est_vect = np.column_stack((mu_data, sig_data, skew_data, kurt_data))
eta_star = self._modif_newton(np.array([1. / nobs,
1. / nobs,
1. / nobs,
1. / nobs]), est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1. + np.dot(eta_star, est_vect.T)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
def _ci_limits_skew(self, skew):
"""
Parameters
----------
skew0 : float
Hypothesized value of skewness
Returns
-------
diff : float
The difference between the log likelihood ratio at skew and a
pre-specified value.
"""
return self.test_skew(skew)[0] - self.r0
def _ci_limits_kurt(self, kurt):
"""
Parameters
---------
skew0 : float
Hypothesized value of kurtosis
Returns
-------
diff : float
The difference between the log likelihood ratio at kurt and a
pre-specified value.
"""
return self.test_kurt(kurt)[0] - self.r0
def _opt_correl(self, nuis_params, corr0, endog, nobs, x0, weights0):
"""
Parameters
----------
nuis_params : 1darray
Array containing two nuisance means and two nuisance variances
Returns
-------
llr : float
The log-likelihood of the correlation coefficient holding nuisance
parameters constant
"""
mu1_data, mu2_data = (endog - nuis_params[::2]).T
sig1_data = mu1_data ** 2 - nuis_params[1]
sig2_data = mu2_data ** 2 - nuis_params[3]
correl_data = ((mu1_data * mu2_data) - corr0 *
(nuis_params[1] * nuis_params[3]) ** .5)
est_vect = np.column_stack((mu1_data, sig1_data,
mu2_data, sig2_data, correl_data))
eta_star = self._modif_newton(x0, est_vect, weights0)
denom = 1. + np.dot(est_vect, eta_star)
self.new_weights = 1. / nobs * 1. / denom
llr = np.sum(np.log(nobs * self.new_weights))
return -2 * llr
def _ci_limits_corr(self, corr):
return self.test_corr(corr)[0] - self.r0
class DescStatUV(_OptFuncts):
"""
A class to compute confidence intervals and hypothesis tests involving
mean, variance, kurtosis and skewness of a univariate random variable.
Parameters
----------
endog : 1darray
Data to be analyzed
Attributes
----------
endog : 1darray
Data to be analyzed
nobs : float
Number of observations
"""
def __init__(self, endog):
self.endog = np.squeeze(endog)
self.nobs = endog.shape[0]
def test_mean(self, mu0, return_weights=False):
"""
Returns - 2 x log-likelihood ratio, p-value and weights
for a hypothesis test of the mean.
Parameters
----------
mu0 : float
Mean value to be tested
return_weights : bool
If return_weights is True the funtion returns
the weights of the observations under the null hypothesis.
Default is False
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of mu0
"""
self.mu0 = mu0
endog = self.endog
nobs = self.nobs
eta_min = (1. - (1. / nobs)) / (self.mu0 - max(endog))
eta_max = (1. - (1. / nobs)) / (self.mu0 - min(endog))
eta_star = optimize.brentq(self._find_eta, eta_min, eta_max)
new_weights = (1. / nobs) * 1. / (1. + eta_star * (endog - self.mu0))
llr = -2 * np.sum(np.log(nobs * new_weights))
if return_weights:
return llr, chi2.sf(llr, 1), new_weights
else:
return llr, chi2.sf(llr, 1)
def ci_mean(self, sig=.05, method='gamma', epsilon=10 ** -8,
gamma_low=-10 ** 10, gamma_high=10 ** 10):
"""
Returns the confidence interval for the mean.
Parameters
----------
sig : float
significance level. Default is .05
method : str
Root finding method, Can be 'nested-brent' or
'gamma'. Default is 'gamma'
'gamma' Tries to solve for the gamma parameter in the
Lagrange (see Owen pg 22) and then determine the weights.
'nested brent' uses brents method to find the confidence
intervals but must maximize the likelihhod ratio on every
iteration.
gamma is generally much faster. If the optimizations does not
converge, try expanding the gamma_high and gamma_low
variable.
gamma_low : float
Lower bound for gamma when finding lower limit.
If function returns f(a) and f(b) must have different signs,
consider lowering gamma_low.
gamma_high : float
Upper bound for gamma when finding upper limit.
If function returns f(a) and f(b) must have different signs,
consider raising gamma_high.
epsilon : float
When using 'nested-brent', amount to decrease (increase)
from the maximum (minimum) of the data when
starting the search. This is to protect against the
likelihood ratio being zero at the maximum (minimum)
value of the data. If data is very small in absolute value
(<10 ``**`` -6) consider shrinking epsilon
When using 'gamma', amount to decrease (increase) the
minimum (maximum) by to start the search for gamma.
If fucntion returns f(a) and f(b) must have differnt signs,
consider lowering epsilon.
Returns
-------
Interval : tuple
Confidence interval for the mean
"""
endog = self.endog
sig = 1 - sig
if method == 'nested-brent':
self.r0 = chi2.ppf(sig, 1)
middle = np.mean(endog)
epsilon_u = (max(endog) - np.mean(endog)) * epsilon
epsilon_l = (np.mean(endog) - min(endog)) * epsilon
ulim = optimize.brentq(self._ci_limits_mu, middle,
max(endog) - epsilon_u)
llim = optimize.brentq(self._ci_limits_mu, middle,
min(endog) + epsilon_l)
return llim, ulim
if method == 'gamma':
self.r0 = chi2.ppf(sig, 1)
gamma_star_l = optimize.brentq(self._find_gamma, gamma_low,
min(endog) - epsilon)
gamma_star_u = optimize.brentq(self._find_gamma, \
max(endog) + epsilon, gamma_high)
weights_low = ((endog - gamma_star_l) ** -1) / \
np.sum((endog - gamma_star_l) ** -1)
weights_high = ((endog - gamma_star_u) ** -1) / \
np.sum((endog - gamma_star_u) ** -1)
mu_low = np.sum(weights_low * endog)
mu_high = np.sum(weights_high * endog)
return mu_low, mu_high
def test_var(self, sig2_0, return_weights=False):
"""
Returns -2 x log-likelihoog ratio and the p-value for the
hypothesized variance
Parameters
----------
sig2_0 : float
Hypothesized variance to be tested
return_weights : bool
If True, returns the weights that maximize the
likelihood of observing sig2_0. Default is False
Returns
--------
test_results : tuple
The log-likelihood ratio and the p_value of sig2_0
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> random_numbers = np.random.standard_normal(1000)*100
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> hyp_test = el_analysis.test_var(9500)
"""
self.sig2_0 = sig2_0
mu_max = max(self.endog)
mu_min = min(self.endog)
llr = optimize.fminbound(self._opt_var, mu_min, mu_max, \
full_output=1)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
else:
return llr, p_val
def ci_var(self, lower_bound=None, upper_bound=None, sig=.05):
"""
Returns the confidence interval for the variance.
Parameters
----------
lower_bound : float
The minimum value the lower confidence interval can
take. The p-value from test_var(lower_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality
upper_bound : float
The maximum value the upper confidence interval
can take. The p-value from test_var(upper_bound) must be lower
than 1 - significance level. Default is .99 confidence
limit assuming normality
sig : float
The significance level. Default is .05
Returns
--------
Interval : tuple
Confidence interval for the variance
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> random_numbers = np.random.standard_normal(100)
>>> el_analysis = sm.emplike.DescStat(random_numbers)
>>> el_analysis.ci_var()
(0.7539322567470305, 1.229998852496268)
>>> el_analysis.ci_var(.5, 2)
(0.7539322567469926, 1.2299988524962664)
Notes
-----
If the function returns the error f(a) and f(b) must have
different signs, consider lowering lower_bound and raising
upper_bound.
"""
endog = self.endog
if upper_bound is None:
upper_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.0001, self.nobs - 1))
if lower_bound is None:
lower_bound = ((self.nobs - 1) * endog.var()) / \
(chi2.ppf(.9999, self.nobs - 1))
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_var, lower_bound, endog.var())
ulim = optimize.brentq(self._ci_limits_var, endog.var(), upper_bound)
return llim, ulim
def plot_contour(self, mu_low, mu_high, var_low, var_high, mu_step,
var_step,
levs=[.2, .1, .05, .01, .001]):
"""
Returns a plot of the confidence region for a univariate
mean and variance.
Parameters
----------
mu_low : float
Lowest value of the mean to plot
mu_high : float
Highest value of the mean to plot
var_low : float
Lowest value of the variance to plot
var_high : float
Highest value of the variance to plot
mu_step : float
Increments to evaluate the mean
var_step : float
Increments to evaluate the mean
levs : list
Which values of significance the contour lines will be drawn.
Default is [.2, .1, .05, .01, .001]
Returns
-------
fig : matplotlib figure instance
The contour plot
"""
fig, ax = utils.create_mpl_ax()
ax.set_ylabel('Variance')
ax.set_xlabel('Mean')
mu_vect = list(np.arange(mu_low, mu_high, mu_step))
var_vect = list(np.arange(var_low, var_high, var_step))
z = []
for sig0 in var_vect:
self.sig2_0 = sig0
for mu0 in mu_vect:
z.append(self._opt_var(mu0, pval=True))
z = np.asarray(z).reshape(len(var_vect), len(mu_vect))
ax.contour(mu_vect, var_vect, z, levels=levs)
return fig
def test_skew(self, skew0, return_weights=False):
"""
Returns -2 x log-likelihood and p-value for the hypothesized
skewness.
Parameters
----------
skew0 : float
Skewness value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
--------
test_results : tuple
The log-likelihood ratio and p_value of skew0
"""
self.skew0 = skew0
start_nuisance = np.array([self.endog.mean(),
self.endog.var()])
llr = optimize.fmin_powell(self._opt_skew, start_nuisance,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val
def test_kurt(self, kurt0, return_weights=False):
"""
Returns -2 x log-likelihood and the p-value for the hypothesized
kurtosis.
Parameters
----------
kurt0 : float
Kurtosis value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of kurt0
"""
self.kurt0 = kurt0
start_nuisance = np.array([self.endog.mean(),
self.endog.var()])
llr = optimize.fmin_powell(self._opt_kurt, start_nuisance,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val
def test_joint_skew_kurt(self, skew0, kurt0, return_weights=False):
"""
Returns - 2 x log-likelihood and the p-value for the joint
hypothesis test for skewness and kurtosis
Parameters
----------
skew0 : float
Skewness value to be tested
kurt0 : float
Kurtosis value to be tested
return_weights : bool
If True, function also returns the weights that
maximize the likelihood ratio. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value of the joint hypothesis test.
"""
self.skew0 = skew0
self.kurt0 = kurt0
start_nuisance = np.array([self.endog.mean(),
self.endog.var()])
llr = optimize.fmin_powell(self._opt_skew_kurt, start_nuisance,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 2)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val
def ci_skew(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence interval for skewness.
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value of skewness the upper limit can be.
Default is .99 confidence limit assuming normality.
lower_bound : float
Minimum value of skewness the lower limit can be.
Default is .99 confidence level assuming normality.
Returns
-------
Interval : tuple
Confidence interval for the skewness
Notes
-----
If function returns f(a) and f(b) must have different signs, consider
expanding lower and upper bounds
"""
nobs = self.nobs
endog = self.endog
if upper_bound is None:
upper_bound = skew(endog) + \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
if lower_bound is None:
lower_bound = skew(endog) - \
2.5 * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_skew, lower_bound, skew(endog))
ulim = optimize.brentq(self._ci_limits_skew, skew(endog), upper_bound)
return llim, ulim
def ci_kurt(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence interval for kurtosis.
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value of kurtosis the upper limit can be.
Default is .99 confidence limit assuming normality.
lower_bound : float
Minimum value of kurtosis the lower limit can be.
Default is .99 confidence limit assuming normality.
Returns
--------
Interval : tuple
Lower and upper confidence limit
Notes
-----
For small n, upper_bound and lower_bound may have to be
provided by the user. Consider using test_kurt to find
values close to the desired significance level.
If function returns f(a) and f(b) must have different signs, consider
expanding the bounds.
"""
endog = self.endog
nobs = self.nobs
if upper_bound is None:
upper_bound = kurtosis(endog) + \
(2.5 * (2. * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5) * \
(((nobs ** 2.) - 1.) / ((nobs - 3.) *\
(nobs + 5.))) ** .5)
if lower_bound is None:
lower_bound = kurtosis(endog) - \
(2.5 * (2. * ((6. * nobs * (nobs - 1.)) / \
((nobs - 2.) * (nobs + 1.) * \
(nobs + 3.))) ** .5) * \
(((nobs ** 2.) - 1.) / ((nobs - 3.) *\
(nobs + 5.))) ** .5)
self.r0 = chi2.ppf(1 - sig, 1)
llim = optimize.brentq(self._ci_limits_kurt, lower_bound, \
kurtosis(endog))
ulim = optimize.brentq(self._ci_limits_kurt, kurtosis(endog), \
upper_bound)
return llim, ulim
class DescStatMV(_OptFuncts):
"""
A class for conducting inference on multivariate means and correlation.
Parameters
----------
endog : ndarray
Data to be analyzed
Attributes
----------
endog : ndarray
Data to be analyzed
nobs : float
Number of observations
"""
def __init__(self, endog):
self.endog = endog
self.nobs = endog.shape[0]
def mv_test_mean(self, mu_array, return_weights=False):
"""
Returns -2 x log likelihood and the p-value
for a multivariate hypothesis test of the mean
Parameters
----------
mu_array : 1d array
Hypothesized values for the mean. Must have same number of
elements as columns in endog
return_weights : bool
If True, returns the weights that maximize the
likelihood of mu_array. Default is False.
Returns
-------
test_results : tuple
The log-likelihood ratio and p-value for mu_array
"""
endog = self.endog
nobs = self.nobs
if len(mu_array) != endog.shape[1]:
raise Exception('mu_array must have the same number of \
elements as the columns of the data.')
mu_array = mu_array.reshape(1, endog.shape[1])
means = np.ones((endog.shape[0], endog.shape[1]))
means = mu_array * means
est_vect = endog - means
start_vals = 1. / nobs * np.ones(endog.shape[1])
eta_star = self._modif_newton(start_vals, est_vect,
np.ones(nobs) * (1. / nobs))
denom = 1 + np.dot(eta_star, est_vect.T)
self.new_weights = 1 / nobs * 1 / denom
llr = -2 * np.sum(np.log(nobs * self.new_weights))
p_val = chi2.sf(llr, mu_array.shape[1])
if return_weights:
return llr, p_val, self.new_weights.T
else:
return llr, p_val
def mv_mean_contour(self, mu1_low, mu1_upp, mu2_low, mu2_upp, step1, step2,
levs=(.001, .01, .05, .1, .2), var1_name=None,
var2_name=None, plot_dta=False):
"""
Creates a confidence region plot for the mean of bivariate data
Parameters
----------
m1_low : float
Minimum value of the mean for variable 1
m1_upp : float
Maximum value of the mean for variable 1
mu2_low : float
Minimum value of the mean for variable 2
mu2_upp : float
Maximum value of the mean for variable 2
step1 : float
Increment of evaluations for variable 1
step2 : float
Increment of evaluations for variable 2
levs : list
Levels to be drawn on the contour plot.
Default = (.001, .01, .05, .1, .2)
plot_dta : bool
If True, makes a scatter plot of the data on
top of the contour plot. Defaultis False.
var1_name : str
Name of variable 1 to be plotted on the x-axis
var2_name : str
Name of variable 2 to be plotted on the y-axis
Notes
-----
The smaller the step size, the more accurate the intervals
will be
If the function returns optimization failed, consider narrowing
the boundaries of the plot
Examples
--------
>>> import statsmodels.api as sm
>>> two_rvs = np.random.standard_normal((20,2))
>>> el_analysis = sm.emplike.DescStat(two_rvs)
>>> contourp = el_analysis.mv_mean_contour(-2, 2, -2, 2, .1, .1)
>>> contourp.show()
"""
if self.endog.shape[1] != 2:
raise Exception('Data must contain exactly two variables')
fig, ax = utils.create_mpl_ax()
if var2_name is None:
ax.set_ylabel('Variable 2')
else:
ax.set_ylabel(var2_name)
if var1_name is None:
ax.set_xlabel('Variable 1')
else:
ax.set_xlabel(var1_name)
x = np.arange(mu1_low, mu1_upp, step1)
y = np.arange(mu2_low, mu2_upp, step2)
pairs = itertools.product(x, y)
z = []
for i in pairs:
z.append(self.mv_test_mean(np.asarray(i))[0])
X, Y = np.meshgrid(x, y)
z = np.asarray(z)
z = z.reshape(X.shape[1], Y.shape[0])
ax.contour(x, y, z.T, levels=levs)
if plot_dta:
ax.plot(self.endog[:, 0], self.endog[:, 1], 'bo')
return fig
def test_corr(self, corr0, return_weights=0):
"""
Returns -2 x log-likelihood ratio and p-value for the
correlation coefficient between 2 variables
Parameters
----------
corr0 : float
Hypothesized value to be tested
return_weights : bool
If true, returns the weights that maximize
the log-likelihood at the hypothesized value
"""
nobs = self.nobs
endog = self.endog
if endog.shape[1] != 2:
raise Exception('Correlation matrix not yet implemented')
nuis0 = np.array([endog[:, 0].mean(),
endog[:, 0].var(),
endog[:, 1].mean(),
endog[:, 1].var()])
x0 = np.zeros(5)
weights0 = np.array([1. / nobs] * int(nobs))
args = (corr0, endog, nobs, x0, weights0)
llr = optimize.fmin(self._opt_correl, nuis0, args=args,
full_output=1, disp=0)[1]
p_val = chi2.sf(llr, 1)
if return_weights:
return llr, p_val, self.new_weights.T
return llr, p_val
def ci_corr(self, sig=.05, upper_bound=None, lower_bound=None):
"""
Returns the confidence intervals for the correlation coefficient
Parameters
----------
sig : float
The significance level. Default is .05
upper_bound : float
Maximum value the upper confidence limit can be.
Default is 99% confidence limit assuming normality.
lower_bound : float
Minimum value the lower condidence limit can be.
Default is 99% confidence limit assuming normality.
Returns
-------
interval : tuple
Confidence interval for the correlation
"""
endog = self.endog
nobs = self.nobs
self.r0 = chi2.ppf(1 - sig, 1)
point_est = np.corrcoef(endog[:, 0], endog[:, 1])[0, 1]
if upper_bound is None:
upper_bound = min(.999, point_est + \
2.5 * ((1. - point_est ** 2.) / \
(nobs - 2.)) ** .5)
if lower_bound is None:
lower_bound = max(- .999, point_est - \
2.5 * (np.sqrt((1. - point_est ** 2.) / \
(nobs - 2.))))
llim = optimize.brenth(self._ci_limits_corr, lower_bound, point_est)
ulim = optimize.brenth(self._ci_limits_corr, point_est, upper_bound)
return llim, ulim
|
|
from collections import OrderedDict
from sfsimodels.exceptions import deprecation
import numpy as np
from sfsimodels.exceptions import ModelError, AnalysisError
from sfsimodels.functions import clean_float
from sfsimodels.models.abstract_models import PhysicalObject
from sfsimodels import checking_tools as ct
from sfsimodels import functions as sf
MASS_DENSITY_WATER = 1.0e3
class Soil(PhysicalObject):
"""
An object to simulate an element of soil
"""
_id = None
name = None
base_type = "soil"
type = "soil"
stype = "soil"
# strength parameters
_phi = None
_cohesion = None
# volume and weight
_e_min = None
_e_max = None
_e_curr = None
_dilation_angle = None
_relative_density = None # [decimal]
_specific_gravity = None
_unit_dry_weight = None
_unit_sat_weight = None
_unit_moist_weight = None
_saturation = None
_tolerance = 0.0001 # consistency tolerance
_permeability = None
# deformation parameters
_g_mod = None # Shear modulus [Pa]
_bulk_mod = None # Bulk modulus [Pa]
_poissons_ratio = None
_plasticity_index = None
_liq_sg = 1
def __init__(self, pw=None, wmd=None, liq_mass_density=None, liq_sg=1, g=9.8, **kwargs):
# Note: liq_mass_density has deprecated, and pw is no longer supported
self._gravity = g # m/s2
self._liq_sg = liq_sg
if liq_mass_density and wmd is None:
self._wmd = liq_mass_density / liq_sg
elif pw is not None:
if pw == 9800 and g == 9.8:
_liq_mass_density = 1.0e3
else:
_liq_mass_density = pw / self._gravity
self._wmd = _liq_mass_density / liq_sg
elif wmd is None:
self._wmd = 1000
else:
self._wmd = wmd
self.stack = [('gravity', self._gravity), ('wmd', self._wmd), ('liq_sg', self._liq_sg)]
self._extra_class_inputs = [
"id",
"name",
"base_type",
"type",
"stype",
"g_mod",
"bulk_mod",
"poissons_ratio",
"phi",
"dilation_angle",
"e_min",
"e_max",
"e_curr",
"relative_density",
"specific_gravity",
"unit_dry_weight",
"unit_sat_weight",
"saturation",
"cohesion",
"plasticity_index",
"permeability",
"gravity",
"wmd",
"liq_sg"
]
if not hasattr(self, "inputs"):
self.inputs = []
self.inputs += list(self._extra_class_inputs)
for param in kwargs:
if param in self.inputs:
setattr(self, param, kwargs[param])
@property
def ancestor_types(self):
"""View list of types from inherited objects"""
parent_ancestor_types = super(Soil, self).ancestor_types
return parent_ancestor_types + ["soil"]
def override(self, item, value):
"""
Can set a parameter to a value that is inconsistent with existing values.
This method sets the inconsistent value and then reapplies all existing values
that are still consistent, all non-consistent (conflicting) values are removed from the object
and returned as a list
:param item: name of parameter to be set
:param value: value of the parameter to be set
:return: list, conflicting values
"""
if not hasattr(self, item):
raise KeyError("Soil Object does not have property: %s", item)
try:
setattr(self, item, value) # try to set using normal setter method
return []
except ModelError:
pass # if inconsistency, then need to rebuild stack
# create a new temporary stack
temp_stack = list(self.stack)
# remove item from original position in stack
temp_stack[:] = (value for value in temp_stack if value[0] != item)
# add item to the start of the stack
temp_stack.insert(0, (item, value))
# clear object, ready to rebuild
self.reset_all()
# reapply trace, one item at a time, if conflict then don't add the conflict.
conflicts = []
for item, value in temp_stack:
# catch all conflicts
try:
setattr(self, item, value)
if item in ['gravity', 'wmd', 'liq_sg']:
self._add_to_stack(item, value)
except ModelError:
conflicts.append(item)
return conflicts
def reset_all(self):
"""
Resets all parameters to None
"""
for item in self.inputs:
setattr(self, "_%s" % item, None)
self.stack = []
def _add_to_stack(self, item, value):
"""
Add a parameter-value pair to the stack of parameters that have been set.
:param item:
:param value:
:return:
"""
p_value = (item, value)
if p_value not in self.stack:
self.stack.append(p_value)
@property
def id(self):
"""Object id"""
return self._id
@property
def phi(self):
"""Internal friction angle of the soil"""
return self._phi
@property
def dilation_angle(self):
"""
Internal dilation angle of the soil
peak_angle = phi + dilation_angle
"""
return self._dilation_angle
@property
def cohesion(self):
"""Cohesive strength of the soil"""
return self._cohesion
@property
def unit_dry_weight(self):
"""The unit weight of the soil if saturation=0"""
return self._unit_dry_weight
@property
def e_curr(self):
"""The current void ratio of the soil"""
return self._e_curr
@property
def specific_gravity(self):
"""The specific gravity of the soil"""
return self._specific_gravity
@property
def pw(self):
"""Specific weight of water"""
deprecation('Soil.pw is deprecated, will be removed. Use Soil.ulw')
return self.ulw
@property
def wmd(self):
return self._wmd
@wmd.setter
def wmd(self, value):
self._wmd = value
@property
def liq_mass_density(self):
return self._wmd * self._liq_sg
@property
def gravity(self):
return self._gravity
@property
def g(self):
return self._gravity
@gravity.setter
def gravity(self, value):
self._gravity = value
@g.setter
def g(self, value):
self._gravity = value
@liq_mass_density.setter
def liq_mass_density(self, value):
deprecation('liq_mass_density has deprecated, set liq_sg or wmd')
self._wmd = value / self.liq_sg
@property
def ulw(self):
"""Unit weight of liquid"""
return self.g * self.liq_mass_density
@property
def saturation(self):
"""The current saturation of the soil"""
return self._saturation
@property
def plasticity_index(self):
"""The plasticity index of the soil"""
return self._plasticity_index
@property
def moisture_content(self):
"""
The moisture of the soil :math:`(unit_moisture_weight) / (unit_dry_weight)`.
"""
try:
return self._calc_unit_moisture_weight() / self.unit_dry_weight
except TypeError:
return None
@property
def porosity(self):
"""Soil porosity"""
try:
return self.e_curr / (1 + self.e_curr)
except TypeError:
return None
@property
def unit_sat_weight(self):
"""The weight of the soil if saturation=1"""
return self._unit_sat_weight
@property
def unit_moist_weight(self):
"""The unit moist weight of the soil (accounts for saturation level)"""
return self._unit_moist_weight
@property
def unit_moist_mass(self):
"""The unit moist mass of the soil (accounts for saturation level)"""
return self._unit_moist_weight / self._gravity
@property
def unit_bouy_weight(self):
"""The unit moist weight of the soil (accounts for saturation level)"""
try:
return self._unit_sat_weight - self.ulw
except TypeError:
return None
@property
def unit_weight(self):
"""
The unit moist weight of the soil (accounts for saturation level)
:return: float
"""
if self.saturation is not None:
return self.unit_moist_weight
return self.unit_dry_weight
def get_unit_weight_or(self, alt='none'):
if self.saturation is not None:
return self.unit_moist_weight
elif alt == 'dry':
return self.unit_dry_weight
elif alt == 'sat':
return self.unit_sat_weight
return None
@property
def unit_dry_mass(self):
"""The mass of the soil in dry state"""
try:
return self._unit_dry_weight / self._gravity
except TypeError:
return None
@property
def unit_sat_mass(self):
"""The mass of the soil when fully saturated"""
try:
return self._unit_sat_weight / self._gravity
except TypeError:
return None
def get_shear_vel(self, saturated):
"""
Calculate the shear wave velocity
:param saturated: bool, if true then use saturated mass
:return:
"""
try:
if saturated:
return np.sqrt(self.g_mod / self.unit_sat_mass)
else:
return np.sqrt(self.g_mod / self.unit_dry_mass)
except TypeError:
return None
def calc_shear_vel(self, saturated):
deprecation("Use get_shear_vel")
return self.get_shear_vel(saturated)
def set_g_mod_from_shear_vel(self, shear_val, saturated):
if saturated:
self.g_mod = shear_val ** 2 * self.unit_sat_mass
else:
self.g_mod = shear_val ** 2 * self.unit_dry_mass
def get_unit_mass(self, saturated):
if saturated:
return self.unit_sat_mass
else:
return self.unit_dry_mass
@property
def permeability(self):
"""The permeability of the soil"""
return self._permeability
@property
def phi_r(self):
"""internal friction angle in radians"""
try:
return np.radians(self.phi)
except AttributeError:
return None
@property
def k_0(self):
k_0 = self.poissons_ratio / (1 - self.poissons_ratio)
# k_0 = 1 - np.sin(self.phi_r) # Jaky 1944
return k_0
@property
def g_mod(self):
"""Shear modulus of the soil"""
return self._g_mod
@property
def bulk_mod(self):
"""Bulk modulus of the soil"""
return self._bulk_mod
@property
def poissons_ratio(self):
"""Poisson's ratio of the soil"""
return self._poissons_ratio
@property
def e_min(self):
"""The minimum void ratio"""
return self._e_min
@property
def e_max(self):
"""The maximum void ratio"""
return self._e_max
@property
def relative_density(self):
"""The relative density :math (e_max - e_curr) / (.e_max - .e_min)"""
return self._relative_density
@id.setter
def id(self, value):
if value not in [None, ""]:
value = int(value)
self.stack.append(("id", value))
self._id = value
@e_curr.setter
def e_curr(self, value):
value = clean_float(value)
if value is None:
return
try:
void_ratio = self._calc_void_ratio()
if void_ratio is not None and not ct.isclose(void_ratio, value, rel_tol=self._tolerance):
raise ModelError("New void ratio (%.3f) inconsistent with one from specific_gravity (%.3f)"
% (value, void_ratio))
except TypeError:
pass
old_value = self._e_curr
self._e_curr = float(value)
try:
self.recompute_all_weights_and_void()
self._add_to_stack("e_curr", float(value))
except ModelError as e:
self._e_curr = old_value
raise ModelError(e)
@unit_dry_weight.setter
def unit_dry_weight(self, value):
value = clean_float(value)
if value is None:
return
try:
unit_dry_weight = self._calc_unit_dry_weight()
if unit_dry_weight is not None and not ct.isclose(unit_dry_weight, value, rel_tol=self._tolerance):
raise ModelError("new unit_dry_weight (%.2f) is inconsistent with calculated value (%.2f)." % (value, unit_dry_weight))
except TypeError:
pass
old_value = self.unit_dry_weight
self._unit_dry_weight = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("unit_dry_weight", value)
except ModelError as e:
self._unit_dry_weight = old_value
raise ModelError(e)
@unit_sat_weight.setter
def unit_sat_weight(self, value):
value = clean_float(value)
if value is None:
return
try:
unit_sat_weight = self._calc_unit_sat_weight()
if unit_sat_weight is not None and not ct.isclose(unit_sat_weight, value, rel_tol=self._tolerance):
raise ModelError("new unit_sat_weight (%.2f) with calculated value (%.2f)." % (value, unit_sat_weight))
except TypeError:
pass
old_value = self.unit_sat_weight
self._unit_sat_weight = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("unit_sat_weight", value)
except ModelError as e:
self._unit_sat_weight = old_value
raise ModelError(e)
@unit_moist_weight.setter
def unit_moist_weight(self, value):
value = clean_float(value)
if value is None:
return
try:
unit_moist_weight = self._calc_unit_moist_weight()
if unit_moist_weight is not None and not ct.isclose(unit_moist_weight, value, rel_tol=self._tolerance):
raise ModelError("new unit_moist_weight (%.2f) is inconsistent with calculated value (%.2f)." % (value, unit_moist_weight))
except TypeError:
pass
old_value = self.unit_moist_weight
self._unit_moist_weight = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("unit_moist_weight", value)
except ModelError as e:
self._unit_moist_weight = old_value
raise ModelError(e)
@saturation.setter
def saturation(self, value):
"""Volume of water to volume of voids"""
value = clean_float(value)
if value is None:
return
try:
unit_moisture_weight = self.unit_moist_weight - self.unit_dry_weight
unit_moisture_volume = unit_moisture_weight / self.ulw
saturation = unit_moisture_volume / self._calc_unit_void_volume()
if saturation is not None and not ct.isclose(saturation, value, rel_tol=self._tolerance):
raise ModelError("New saturation (%.3f) is inconsistent "
"with calculated value (%.3f)" % (value, saturation))
except TypeError:
pass
old_value = self.saturation
self._saturation = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("saturation", value)
except ModelError as e:
self._saturation = old_value
raise ModelError(e)
@relative_density.setter
def relative_density(self, value):
value = clean_float(value)
if value is None:
return
relative_density = self._calc_relative_density()
if relative_density is not None and not ct.isclose(relative_density, value, rel_tol=self._tolerance):
raise ModelError("New relative_density (%.3f) is inconsistent "
"with calculated value (%.3f)" % (value, relative_density))
old_value = self.relative_density
self._relative_density = value
try:
self.recompute_all_weights_and_void()
self._add_to_stack("relative_density", value)
except ModelError as e:
self._relative_density = old_value
raise ModelError(e)
@specific_gravity.setter
def specific_gravity(self, value):
""" Set the relative weight of the solid """
value = clean_float(value)
if value is None:
return
specific_gravity = self._calc_specific_gravity()
if specific_gravity is not None and not ct.isclose(specific_gravity, value, rel_tol=self._tolerance):
raise ModelError("specific gravity is inconsistent with set unit_dry_weight and void_ratio")
self._specific_gravity = float(value)
self.stack.append(("specific_gravity", float(value)))
self.recompute_all_weights_and_void()
@e_min.setter
def e_min(self, value):
value = clean_float(value)
if value is None:
return
self._e_min = value
self.stack.append(("e_min", value))
self.recompute_all_weights_and_void()
@e_max.setter
def e_max(self, value):
value = clean_float(value)
if value is None:
return
self._e_max = float(value)
self.stack.append(("e_max", value))
self.recompute_all_weights_and_void()
@phi.setter
def phi(self, value):
value = clean_float(value)
if value is None:
return
self._phi = value
self.stack.append(("phi", value))
@cohesion.setter
def cohesion(self, value):
value = clean_float(value)
if value is None:
return
self._cohesion = value
self.stack.append(("cohesion", value))
@porosity.setter
def porosity(self, value):
value = clean_float(value)
if value is None:
return
self._e_curr = value / (1 - value)
self.stack.append(("e_curr", value)) # note that it is the set store variable that goes in the stack
@dilation_angle.setter
def dilation_angle(self, value):
value = clean_float(value)
if value is None:
return
self._dilation_angle = value
self.stack.append(("dilation_angle", value))
@permeability.setter
def permeability(self, value):
value = clean_float(value)
if value is None:
return
self._permeability = value
self.stack.append(("permeability", value))
@g_mod.setter
def g_mod(self, value):
value = clean_float(value)
if value is None:
return
curr_g_mod = self._calc_g_mod()
if curr_g_mod is not None and not ct.isclose(curr_g_mod, value, rel_tol=0.001):
raise ModelError("New g_mod is inconsistent with current value")
old_value = self.g_mod
self._g_mod = value
try:
self.recompute_all_stiffness_parameters()
self._add_to_stack("g_mod", value)
except ModelError as e:
self._g_mod = old_value
raise ModelError(e)
@bulk_mod.setter
def bulk_mod(self, value):
value = clean_float(value)
if value is None:
return
curr_bulk_mod = self._calc_bulk_mod()
if curr_bulk_mod is not None and not ct.isclose(curr_bulk_mod, value, rel_tol=0.001):
raise ModelError("New bulk_mod is inconsistent with current value")
old_value = self.bulk_mod
self._bulk_mod = value
try:
self.recompute_all_stiffness_parameters()
self._add_to_stack("bulk_mod", value)
except ModelError as e:
self._bulk_mod = old_value
raise ModelError(e)
@poissons_ratio.setter
def poissons_ratio(self, value):
if value is None or value == "":
return
curr_poissons_ratio = self._calc_poissons_ratio()
if curr_poissons_ratio is not None and not ct.isclose(curr_poissons_ratio, value, rel_tol=0.001):
raise ModelError("New poissons_ratio (%.3f) is inconsistent "
"with current value (%.3f)" % (value, curr_poissons_ratio))
old_value = self.poissons_ratio
self._poissons_ratio = value
try:
self.recompute_all_stiffness_parameters()
self._add_to_stack("poissons_ratio", value)
except ModelError as e:
self._poissons_ratio = old_value
raise ModelError(e)
@plasticity_index.setter
def plasticity_index(self, value):
self._add_to_stack("plasticity_index", value)
self._plasticity_index = value
def _calc_void_ratio(self):
try:
return self.specific_gravity * self._uww / self.unit_dry_weight - 1
except TypeError:
pass
try:
return (self.specific_gravity * self._uww - self.unit_sat_weight) / (self.unit_sat_weight - self.liq_sg * self._uww)
except TypeError:
pass
try:
return self.e_max - self.relative_density * (self.e_max - self.e_min)
except TypeError:
return None
def _calc_relative_density(self):
try:
return (self.e_max - self.e_curr) / (self.e_max - self.e_min)
except TypeError:
return None
def _calc_max_void_ratio(self):
try:
# return (self.e_curr - self.relative_density) / (1. - self.relative_density)
return (self.relative_density * self.e_min - self.e_curr) / (self.relative_density - 1)
except TypeError:
return None
def _calc_min_void_ratio(self):
try:
return (self.e_curr + (self.relative_density - 1) * self.e_max) / self.relative_density
except TypeError:
return None
@property
def _uww(self):
"""
Unit water of reference water used to calculate specific gravity values
:return:
"""
return self.gravity * self.wmd
@property
def uww(self):
"""
Unit water of reference water used to calculate specific gravity values
:return:
"""
return self.gravity * self.wmd
@property
def liq_sg(self):
return self._liq_sg
@liq_sg.setter
def liq_sg(self, value):
if value is None or value == '':
return
if self.liq_sg is not None and self.liq_sg != value:
raise ModelError(f"New liq_sg ({value:.3g}) is inconsistent with current value ({self.liq_sg:.3g})")
self._liq_sg = value
def _calc_specific_gravity(self):
try:
return (1 + self.e_curr) * self.unit_dry_weight / self._uww
except TypeError:
pass
try:
return (1 + self.e_curr) * self.unit_sat_weight / self._uww - self.e_curr * self.liq_sg
except TypeError:
return None
def _calc_unit_dry_weight(self):
try:
return (self.specific_gravity * self._uww) / (1 + self.e_curr) # dry relationship
except TypeError:
return None
def _calc_unit_sat_weight(self):
try:
return ((self.specific_gravity + self.e_curr * self.liq_sg) * self._uww) / (1 + self.e_curr)
except TypeError:
return None
def _calc_unit_moist_weight(self):
try:
return self._calc_unit_moisture_weight() + self.unit_dry_weight
except TypeError:
return None
def _calc_saturation(self):
try:
unit_moisture_weight = self.unit_moist_weight - self.unit_dry_weight
unit_moisture_volume = unit_moisture_weight / self.ulw
return unit_moisture_volume / self._calc_unit_void_volume()
except TypeError:
return None
def _calc_g_mod(self):
try:
return 3 * self.bulk_mod * (1 - 2 * self.poissons_ratio) / (2 * (1 + self.poissons_ratio))
except TypeError:
return None
def _calc_bulk_mod(self):
try:
return 2 * self.g_mod * (1 + self.poissons_ratio) / (3 * (1 - 2 * self.poissons_ratio))
except TypeError:
return None
def _calc_poissons_ratio(self):
try:
return (3 * self.bulk_mod - 2 * self.g_mod) / (2 * (3 * self.bulk_mod + self.g_mod))
except TypeError:
return None
def recompute_all_weights_and_void(self):
# TODO: catch potential inconsistency when void ratio get defined based on weight and the again from saturation
f_map = OrderedDict()
# voids
f_map["_e_curr"] = self._calc_void_ratio
f_map["_relative_density"] = self._calc_relative_density
f_map["_e_min"] = self._calc_min_void_ratio
f_map["_e_max"] = self._calc_max_void_ratio
# weights
f_map["_specific_gravity"] = self._calc_specific_gravity
f_map["_unit_dry_weight"] = self._calc_unit_dry_weight
f_map["_unit_sat_weight"] = self._calc_unit_sat_weight
# saturation
f_map["_unit_moist_weight"] = self._calc_unit_moist_weight
f_map["_saturation"] = self._calc_saturation
for item in f_map:
value = f_map[item]()
if value is not None:
curr_value = getattr(self, item)
if curr_value is not None and not ct.isclose(curr_value, value, rel_tol=0.001):
raise ModelError("new %s is inconsistent with current value (%.3f, %.3f)" % (item, curr_value,
value))
setattr(self, item, value)
def recompute_all_stiffness_parameters(self):
f_map = OrderedDict()
# voids
f_map["_g_mod"] = self._calc_g_mod
f_map["_bulk_mod"] = self._calc_bulk_mod
f_map["_poissons_ratio"] = self._calc_poissons_ratio
for item in f_map:
value = f_map[item]()
if value is not None:
curr_value = getattr(self, item)
if curr_value is not None and not ct.isclose(curr_value, value, rel_tol=0.001):
#raise ModelError("new %s is inconsistent with current value (%.3f, %.3f)" % (item, curr_value,
# value))
raise ModelError(f"new {item} is inconsistent with current value ({curr_value}, {value})")
setattr(self, item, value)
def _calc_unit_void_volume(self):
"""Return the volume of the voids for total volume equal to a unit"""
try:
return self.e_curr / (1 + self.e_curr)
except ValueError:
return None
def _calc_unit_solid_volume(self):
"""Return the volume of the solids for total volume equal to a unit"""
try:
return 1.0 - self._calc_unit_void_volume()
except ValueError:
return None
def _calc_unit_moisture_weight(self):
"""Return the weight of the voids for total volume equal to a unit"""
try:
return self.saturation * self._calc_unit_void_volume() * self.ulw
except ValueError:
return None
class CriticalSoil(Soil):
# critical state parameters
e_cr0 = 0.0
p_cr0 = 0.0
lamb_crl = 0.0
type = "critical_soil"
def __init__(self, wmd=None, liq_mass_density=None, g=9.8, **kwargs):
# run parent class initialiser function
super(CriticalSoil, self).__init__(wmd=wmd, liq_mass_density=liq_mass_density, g=g, **kwargs)
self._extra_class_inputs = ["e_cr0", "p_cr0", "lamb_crl"]
self.inputs = self.inputs + self._extra_class_inputs
for param in kwargs:
if param in self.inputs:
setattr(self, param, kwargs[param])
@property
def ancestor_types(self):
return super(CriticalSoil, self).ancestor_types + [self.type]
def e_critical(self, p):
p = float(p)
return self.e_cr0 - self.lamb_crl * np.log(p / self.p_cr0)
class StressDependentSoil(Soil):
_g0_mod = None
_p_atm = 101000.0 # Pa
type = "stress_dependent_soil"
_a = 0.5 # stress factor
_g_mod_p0 = 0.0 # shear modulus at zero confining stress
_curr_m_eff_stress = None
def __init__(self, pw=None, wmd=None, liq_mass_density=None, liq_sg=1, g=9.8, **kwargs):
super(StressDependentSoil, self).__init__(pw=pw, wmd=wmd, liq_mass_density=liq_mass_density, liq_sg=liq_sg, g=g, **kwargs)
self._extra_class_inputs = ["g0_mod", "p_atm", "a"]
self.inputs = self.inputs + self._extra_class_inputs
for param in kwargs:
if param in self.inputs:
setattr(self, param, kwargs[param])
@property
def ancestor_types(self):
return super(StressDependentSoil, self).ancestor_types + [self.type]
@property
def bulk_mod(self):
try:
return 2 * self.g_mod * (1 + self.poissons_ratio) / (3 * (1 - 2 * self.poissons_ratio))
except TypeError:
return None
@bulk_mod.setter
def bulk_mod(self, value):
raise ModelError('Do not set bulk_mod on stress dependent soil, set curr_m_eff_stress')
@property
def poissons_ratio(self):
"""Poisson's ratio of the soil"""
return self._poissons_ratio
@poissons_ratio.setter
def poissons_ratio(self, value):
if value is None or value == "":
return
curr_poissons_ratio = self._calc_poissons_ratio()
if curr_poissons_ratio is not None and not ct.isclose(curr_poissons_ratio, value, rel_tol=0.001):
raise ModelError("New poissons_ratio (%.3f) is inconsistent "
"with current value (%.3f)" % (value, curr_poissons_ratio))
old_value = self.poissons_ratio
self._poissons_ratio = value
@property
def curr_m_eff_stress(self):
return self._curr_m_eff_stress
@curr_m_eff_stress.setter
def curr_m_eff_stress(self, value):
self._curr_m_eff_stress = value
@property
def g_mod(self):
if self._curr_m_eff_stress is not None:
return self.get_g_mod_at_m_eff_stress(self._curr_m_eff_stress)
else:
return self._g_mod
@g_mod.setter
def g_mod(self, value):
deprecation("Do not set g_mod directly on a stress dependent soil, set curr_m_eff_stress")
value = clean_float(value)
self._g_mod = value
def recompute_all_stiffness_parameters(self):
return
@property
def g0_mod(self):
return self._g0_mod
@g0_mod.setter
def g0_mod(self, value):
value = clean_float(value)
self._g0_mod = value
if value is not None:
self._add_to_stack("g0_mod", float(value))
@property
def a(self):
return self._a
@a.setter
def a(self, value):
value = clean_float(value)
self._a = value
if value is not None:
self._add_to_stack("a", float(value))
@property
def g_mod_p0(self):
return self._g_mod_p0
@g_mod_p0.setter
def g_mod_p0(self, value):
value = clean_float(value)
self._g_mod_p0 = value
if value is not None:
self._add_to_stack("g_mod_p0", float(value))
@property
def p_atm(self):
return self._p_atm
@p_atm.setter
def p_atm(self, value):
value = clean_float(value)
self._p_atm = value
if value is not None:
self._add_to_stack("p_atm", float(value))
def get_g_mod_at_v_eff_stress(self, v_eff_stress, k0=None):
# k0 = 1 - np.sin(self.phi_r)
if k0 is None:
k0 = self.poissons_ratio / (1 - self.poissons_ratio)
return self.g0_mod * self.p_atm * (v_eff_stress * (1 + 2 * k0) / 3 / self.p_atm) ** self.a + self.g_mod_p0
def set_g0_mod_at_v_eff_stress(self, v_eff_stress, g_mod, g_mod_p0=None, k0=None, plane_strain=False):
if g_mod_p0 is not None:
self.g_mod_p0 = g_mod_p0
if k0 is None:
k0 = self.poissons_ratio / (1 - self.poissons_ratio)
if plane_strain:
m = self.p_atm * (v_eff_stress * (1 + k0) / 2 / self.p_atm) ** self.a
else:
m = self.p_atm * (v_eff_stress * (1 + 2 * k0) / 3 / self.p_atm) ** self.a
self.g0_mod = (g_mod - self.g_mod_p0) / m
def set_curr_m_eff_stress_from_g_mod(self, g_mod):
self._curr_m_eff_stress = ((g_mod - self.g_mod_p0) / (self.g0_mod * self.p_atm)) ** (1. / self.a) * self.p_atm
# self._curr_m_eff_stress = (g_mod - self.g_mod_p0) / self.g0_mod
def get_g_mod_at_m_eff_stress(self, m_eff_stress):
return self.g0_mod * self.p_atm * (m_eff_stress / self.p_atm) ** self.a + self.g_mod_p0
def set_g0_mod_at_m_eff_stress(self, m_eff_stress, g_mod, g_mod_p0=None):
if g_mod_p0 is not None:
self.g_mod_p0 = g_mod_p0
m = self.p_atm * (m_eff_stress / self.p_atm) ** self.a
self.g0_mod = (g_mod - self.g_mod_p0) / m
def get_shear_vel_at_v_eff_stress(self, v_eff_stress, saturated):
try:
g_mod = self.get_g_mod_at_v_eff_stress(v_eff_stress)
if saturated:
return np.sqrt(g_mod / self.unit_sat_mass)
else:
return np.sqrt(g_mod / self.unit_dry_mass)
except TypeError:
return None
def g_mod_at_v_eff_stress(self, v_eff_stress):
deprecation("Use get_g_mod_at_v_eff_stress")
return self.get_g_mod_at_v_eff_stress(v_eff_stress)
def g_mod_at_m_eff_stress(self, m_eff_stress):
deprecation("Use get_g_mod_at_m_eff_stress")
return self.get_g_mod_at_m_eff_stress(m_eff_stress)
def calc_shear_vel_at_v_eff_stress(self, saturated, v_eff_stress):
deprecation("Use get_shear_vel_at_v_eff_stress - note inputs switched")
return self.get_shear_vel_at_v_eff_stress(v_eff_stress, saturated)
class SoilLayer(Soil): # not used
def __init__(self, depth=0.0, height=1000, top_total_stress=0.0, top_pore_pressure=0.0):
super(SoilLayer, self).__init__()
self.height = height # m from top of layer to bottom of layer
self.depth = depth # m from ground surface to top of layer
self.top_total_stress = top_total_stress # m total vertical stress at the top
self.top_pore_pressure = top_pore_pressure # m pore pressure at the top
class SoilProfile(PhysicalObject):
"""
An object to describe a soil profile
"""
_id = None
name = None
_gwl = 1e6 # Ground water level [m]
unit_weight_water = 9800. # [N/m3] # DEPRECATED
unit_water_weight = 9800. # [N/m3]
_height = None
hydrostatic = False
base_type = "soil_profile"
type = "soil_profile"
inputs = [
"id",
"name",
"gwl",
"unit_water_weight",
"layers",
"height",
"x_angles"
]
def __init__(self):
super(PhysicalObject, self).__init__() # run parent class initialiser function
self._layers = OrderedDict([(-1e6, Soil())]) # [depth to top of layer, Soil object]
self.skip_list = []
self.x_angles = [] # the slope of the top of each layer, first layer slope should be >= ground slope
self.split = OrderedDict()
def __str__(self):
return "SoilProfile id: {0}, name: {1}".format(self.id, self.name)
def add_to_dict(self, models_dict, **kwargs):
if self.base_type not in models_dict:
models_dict[self.base_type] = OrderedDict()
if "soil" not in models_dict:
models_dict["soil"] = OrderedDict()
profile_dict = self.to_dict(**kwargs)
profile_dict["layers"] = []
for layer in self.layers:
models_dict["soil"][self.layers[layer].unique_hash] = self.layers[layer].to_dict(**kwargs)
profile_dict["layers"].append({
"soil_id": str(self.layers[layer].id),
"soil_unique_hash": str(self.layers[layer].unique_hash),
"depth": float(layer)
})
models_dict["soil_profile"][self.unique_hash] = profile_dict
@property
def ancestor_types(self):
return super(SoilProfile, self).ancestor_types + ["soil_profile"]
def add_layer(self, depth, soil):
"""
Adds a soil to the SoilProfile at a set depth.
Note, the soils are automatically reordered based on depth from surface.
:param depth: depth from surface to top of soil layer
:param soil: Soil object
"""
if -1e6 in list(self._layers):
del self._layers[-1e6]
self._layers[depth] = soil
self._sort_layers()
if self.hydrostatic:
if depth >= self.gwl:
soil.saturation = 1.0
else:
li = self.get_layer_index_by_depth(depth)
layer_height = self.get_layer_height(li)
if layer_height is None:
soil.saturation = 0.0
elif depth + layer_height <= self.gwl:
soil.saturation = 0.0
else:
sat_height = depth + self.get_layer_height(li) - self.gwl
soil.saturation = sat_height / self.get_layer_height(li)
def _sort_layers(self):
"""Sort the layers by depth."""
self._layers = OrderedDict(sorted(self._layers.items(), key=lambda t: t[0]))
@property
def id(self):
"""Get the id number of the soil profile"""
return self._id
@id.setter
def id(self, value):
"""
Set the id of the soil profile
:param value: int
:return:
"""
self._id = int(value)
@property
def gwl(self):
"""Get the ground water level"""
return self._gwl
@gwl.setter
def gwl(self, value):
"""
Set the depth from the surface to the ground water level (gwl)
:param value:
:return:
"""
self._gwl = float(value)
@property
def height(self):
return self._height
@height.setter
def height(self, value):
"""
Sets the depth from the surface to the base of the soil profile
:param value: float, height
:return:
"""
self._height = float(value)
def get_layer_height(self, layer_int):
"""
Get the layer height by layer id number.
:param layer_int:
:return: float, height of the soil layer
"""
if layer_int == self.n_layers:
if self.height is None:
return None
return self.height - self.get_layer_depth(layer_int)
else:
return self.get_layer_depth(layer_int + 1) - self.get_layer_depth(layer_int)
def layer_height(self, layer_int):
return self.get_layer_height(layer_int)
def get_layer_depth(self, layer_int):
"""
Get the distance from the surface to the top of the layer by layer id number.
:param layer_int: int,
Layer index
:return: float,
Depth of the soil layer
"""
layer_int = int(layer_int)
try:
return self.depths[layer_int - 1]
except IndexError as e:
if layer_int == 0 or layer_int > self.n_layers:
raise IndexError("index={0}, but must be between 1 and {1}".format(layer_int, self.n_layers))
else:
raise e
def layer_depth(self, index):
return self.get_layer_depth(index)
def get_layer_mid_depth(self, layer_int):
"""
Get the distance from the surface to the centre of the layer by layer id number.
:param layer_int: int,
Layer index
:return: float,
Depth to middle of the soil layer
"""
return self.get_layer_depth(layer_int) + self.get_layer_height(layer_int) / 2
@property
def layers(self):
return self._layers
@property
def layer_objects(self):
return list(self._layers.values())
@layers.setter
def layers(self, layers):
for layer in layers:
layer_depth = layer["depth"]
sl = layer["soil"] # is actually a soil object
self.add_layer(layer_depth, sl)
def remove_layer_at_depth(self, depth):
try:
del self._layers[depth]
except KeyError:
raise KeyError("Depth: {0} not found in {1}".format(depth, list(self.layers.keys())))
def remove_layer(self, layer_int):
key = list(self._layers.keys())[layer_int - 1]
del self._layers[key]
def replace_layer(self, layer_int, soil):
key = list(self._layers.keys())[layer_int - 1]
self._layers[key] = soil
def move_layer(self, new_depth, layer_int, overwrite=False):
key = list(self._layers.keys())[layer_int - 1]
if new_depth != key and new_depth in self._layers.keys() and not overwrite:
raise ValueError('new_depth is already in soil profile. If you want to over write this layer then set overwrite=True')
soil = self._layers[key]
del self._layers[key]
self._layers[new_depth] = soil
self._sort_layers()
def shift_all_layers(self, delta_depth):
old_keys = self._layers.keys()
new_keys = [depth + delta_depth for depth in old_keys]
vals = self._layers.values()
self._layers = dict(zip(new_keys, vals))
def layer(self, index):
index = int(index)
if index == 0:
raise KeyError("index=%i, but must be 1 or greater." % index)
return list(self._layers.values())[index - 1]
def set_soil_ids_to_layers(self):
for i in range(1, len(self._layers) + 1):
self.layer(i).id = i
def get_layer_index_by_depth(self, depth):
for i, ld in enumerate(self.layers):
if ld > depth:
return i
return self.n_layers
def get_soil_at_depth(self, depth):
lay_index = self.get_layer_index_by_depth(depth)
return self.layer(lay_index)
def get_parameter_at_depth(self, depth, parameter):
lay_index = self.get_layer_index_by_depth(depth)
soil = self.layer(lay_index)
if hasattr(soil, parameter):
return getattr(soil, parameter)
else:
raise ModelError("%s not in soil object at depth (%.3f)." % (parameter, depth))
def get_parameters_at_depth(self, depth, parameters):
lay_index = self.get_layer_index_by_depth(depth)
soil = self.layer(lay_index)
od = OrderedDict()
for parameter in parameters:
if hasattr(soil, parameter):
od[parameter] = getattr(soil, parameter)
return od
def get_parameters_at_depths(self, depths, parameters):
od = OrderedDict()
for parameter in parameters:
od[parameter] = []
for depth in depths:
lay_index = self.get_layer_index_by_depth(depth)
soil = self.layer(lay_index)
for parameter in parameters:
if hasattr(soil, parameter):
od[parameter].append(getattr(soil, parameter))
return od
@property
def n_layers(self):
"""
Number of soil layers
:return:
"""
return len(self._layers)
@property
def depths(self):
"""
An ordered list of depths.
:return:
"""
return list(self._layers.keys())
# def set_soil_saturation_based_on_gwl(self):
# for depth in self._layers:
# if depth
def vertical_total_stress(self, y_c):
deprecation("Use get_v_total_stress_at_depth")
return self.get_v_total_stress_at_depth(y_c)
def get_v_total_stress_at_depth(self, z):
"""
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
"""
if not hasattr(z, "__len__"):
return self.one_vertical_total_stress(z)
else:
sigma_v_effs = []
for value in z:
sigma_v_effs.append(self.one_vertical_total_stress(value))
return np.array(sigma_v_effs)
def one_vertical_total_stress(self, z_c):
"""
Determine the vertical total stress at a single depth z_c.
:param z_c: depth from surface
"""
if self.gwl < 0:
total_stress = -self.gwl * self.unit_water_weight
else:
total_stress = 0.0
depths = self.depths
z_surface = 0
end = 0
for layer_int in range(1, len(depths) + 1):
l_index = layer_int - 1
if z_c > depths[layer_int - 1]:
if l_index < len(depths) - 1 and z_c > depths[l_index + 1]:
bottom_depth = depths[l_index + 1]
else:
end = 1
bottom_depth = z_c
if bottom_depth <= z_surface:
continue
height = bottom_depth - max(depths[l_index], z_surface)
if bottom_depth <= self.gwl:
total_stress += height * self.layer(layer_int).get_unit_weight_or('dry')
else:
if self.layer(layer_int).unit_sat_weight is None:
raise AnalysisError("Saturated unit weight not defined for layer %i." % layer_int)
sat_height = bottom_depth - max(self.gwl, depths[l_index])
dry_height = height - sat_height
total_stress += sat_height * self.layer(layer_int).unit_sat_weight
if dry_height > 0:
total_stress += dry_height * self.layer(layer_int).unit_dry_weight
else:
end = 1
if end:
break
return total_stress
def hydrostatic_pressure(self, y_c):
"""
Determine the vertical effective stress at a single depth y_c.
:param y_c: float, depth from surface
"""
deprecation("Use get_hydrostatic_pressure_at_depth")
return self.get_hydrostatic_pressure_at_depth(y_c)
def get_hydrostatic_pressure_at_depth(self, y_c):
return np.where(y_c < self.gwl, 0.0, (y_c - self.gwl) * self.unit_water_weight)
def vert_eff_stress(self, y_c):
"""
Determine the vertical effective stress at a single depth z_c.
:param y_c: float, depth from surface
"""
deprecation("Use get_v_eff_stress_at_depth")
return self.get_v_eff_stress_at_depth(y_c)
def get_v_eff_stress_at_depth(self, y_c):
"""
Determine the vertical effective stress at a single depth z_c.
:param y_c: float, depth from surface
"""
sigma_v_c = self.get_v_total_stress_at_depth(y_c)
pp = self.get_hydrostatic_pressure_at_depth(y_c)
sigma_veff_c = sigma_v_c - pp
return sigma_veff_c
def vertical_effective_stress(self, y_c): # deprecated function
"""Deprecated. Use get_vert_eff_stress"""
deprecation("Use get_v_eff_stress_at_depth")
return self.get_v_eff_stress_at_depth(y_c)
def get_shear_vel_at_depth(self, y_c):
"""
Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return:
"""
sl = self.get_soil_at_depth(y_c)
if y_c <= self.gwl:
saturation = False
else:
saturation = True
if hasattr(sl, "get_shear_vel_at_v_eff_stress"):
v_eff = self.get_v_eff_stress_at_depth(y_c)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation)
else:
vs = sl.get_shear_vel(saturation)
return vs
def shear_vel_at_depth(self, y_c):
deprecation("Use get_shear_vel_at_depth")
return self.get_shear_vel_at_depth(y_c)
def split_props(self, incs=None, target=1.0, props=None):
deprecation('Use gen_split')
self.gen_split(incs=incs, target=target, props=props)
def gen_split(self, incs=None, target=1.0, props=None, pos='centre'):
if incs is None:
incs = np.ones(self.n_layers) * target
if props is None:
props = ['unit_mass', 'shear_vel']
else:
if 'thickness' in props:
props.remove('thickness')
dd = OrderedDict([('thickness', []), ('depth', [])])
for item in props:
dd[item] = []
cum_thickness = 0
for i in range(self.n_layers):
if self.layer_depth(i + 1) >= self.height:
break
sl = self.layer(i + 1)
thickness = self.get_layer_height(i + 1)
if thickness is None:
raise ValueError("thickness of layer {0} is None, check if soil_profile.height is set".format(i + 1))
if thickness <= 0: # below soil profile height
continue
n_slices = max(int(thickness / incs[i]), 1)
slice_thickness = float(thickness) / n_slices
for j in range(n_slices):
dd["thickness"].append(slice_thickness)
v_eff = None
if pos == 'centre':
centre_depth = cum_thickness + slice_thickness * 0.5
elif pos == 'bottom':
centre_depth = cum_thickness + slice_thickness
else:
centre_depth = cum_thickness
dd['depth'].append(centre_depth)
cum_thickness += slice_thickness
if centre_depth > self.gwl:
saturated = True
else:
saturated = False
# some properties require vertical effective stress or saturation
for item in props:
if item == 'v_eff':
value = self.get_v_eff_stress_at_depth(centre_depth)
elif item == 'v_total':
value = self.get_v_total_stress_at_depth(centre_depth)
else:
value = None
fn0 = "get_{0}_at_v_eff_stress".format(item) # first check for stress dependence
fn1 = "get_{0}".format(item) # first check for stress dependence
if hasattr(sl, fn0):
try:
v_eff = self.get_v_eff_stress_at_depth(centre_depth)
except TypeError:
raise ValueError("Cannot compute vertical effective stress at depth: {0}".format(centre_depth))
value = sf.get_value_of_a_get_method(sl, fn0, extras={"saturated": saturated,
'v_eff_stress': v_eff})
elif hasattr(sl, fn1):
value = sf.get_value_of_a_get_method(sl, fn1, extras={"saturated": saturated})
elif hasattr(sl, item):
value = getattr(sl, item)
dd[item].append(value)
for item in dd:
dd[item] = np.array(dd[item])
self.split = dd
def discretize_soil_profile(sp, incs=None, target=1.0):
"""
Splits the soil profile into slices and stores as dictionary
:param sp: SoilProfile
:param incs: array_like, increments of depth to use for each layer
:param target: target depth increment size
:return: dict
"""
if incs is None:
incs = np.ones(sp.n_layers) * target
dd = {}
dd["thickness"] = []
dd["unit_mass"] = []
dd["shear_vel"] = []
cum_thickness = 0
for i in range(sp.n_layers):
sl = sp.layer(i + 1)
thickness = sp.get_layer_height(i + 1)
n_slices = max(int(thickness / incs[i]), 1)
slice_thickness = float(thickness) / n_slices
for j in range(n_slices):
cum_thickness += slice_thickness
if cum_thickness >= sp.gwl:
rho = sl.unit_sat_mass
saturation = True
else:
rho = sl.unit_dry_mass
saturation = False
if hasattr(sl, "get_shear_vel_at_v_eff_stress"):
v_eff = sp.vertical_effective_stress(cum_thickness)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation)
else:
vs = sl.get_shear_vel(saturation)
dd["shear_vel"].append(vs)
dd["unit_mass"].append(rho)
dd["thickness"].append(slice_thickness)
for item in dd:
dd[item] = np.array(dd[item])
return dd
def get_new_soil_profile_at_x_offset(sp_ref, x, dy_surf_at_x=0):
# make last 30m free-field
xangs = list(sp_ref.x_angles)
xangs[0] = 0
xangs = np.array(xangs)
lays = np.array(list(sp_ref.layers)) - xangs * x + dy_surf_at_x
sp_ff = SoilProfile()
for ll in range(len(lays)):
if ll == 0:
sp_ff.add_layer(0.0, sp_ref.layer(1))
else:
sp_ff.add_layer(lays[ll], sp_ref.layer(ll + 1))
sp_ff.height = sp_ref.height + dy_surf_at_x
return sp_ff
# TODO: extend to have LiquefiableSoil
class SoilCritical(CriticalSoil):
def __init__(self, pw=9800):
"""Deprecated. Use CriticalSoil"""
deprecation("SoilCritical class is deprecated (remove in version 1.0), use CriticalSoil.")
super(SoilCritical, self).__init__(pw=pw)
class SoilStressDependent(StressDependentSoil):
def __init__(self, pw=9800):
"""Deprecated. Use StressDependentSoil"""
deprecation("SoilStressDependent class is deprecated (remove in version 1.0), use StressDependentSoil.")
super(SoilStressDependent, self).__init__(pw=pw)
|
|
# Village People, 2017
# This is the model we got our best results with.
#
# The model receives the current state, and predicts the policy, the value
# of the state and various other outputs for the auxiliary task.
#
# The model uses BatchNormalization in the first stages of training
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from models.utils import conv_out_dim
ENV_CAUGHT_REWARD = 25
class NextRewardPredictor(nn.Module):
"""This model is used for the prediction of the next reward."""
def __init__(self, in_size):
super(NextRewardPredictor, self).__init__()
self.predictor = nn.Linear(in_size, 1)
def forward(self, x):
x = nn.Tanh()(self.predictor(x))
return x
class NextStateDepthPrediction(nn.Module):
"""This model """
def __init__(self, in_size, out_size):
super(NextStateDepthPrediction, self).__init__()
self.act = nn.ReLU()
inter = int(in_size / 2)
self.predictor1 = nn.Linear(in_size, inter)
self.bn1 = nn.BatchNorm1d(inter)
self.predictor2 = nn.Linear(inter, out_size)
def forward(self, x):
act = self.act
x = act(self.predictor1(x))
x = nn.Sigmoid()(self.predictor2(x))
return x
class PredictNextState(nn.Module):
def __init__(self, in_size):
super(PredictNextState, self).__init__()
self.act = nn.ReLU(True)
self.in_size = in_size
ngf = 64
nc = 15
self.first_cond_w = first_cond_w = 3
first_conv_depth = int(in_size // (first_cond_w ** 2))
cn_size = first_conv_depth * first_cond_w * first_cond_w
self.lin1 = nn.Linear(in_size, cn_size)
self.bnLin = nn.BatchNorm1d(cn_size)
self.cn2 = nn.ConvTranspose2d(first_conv_depth, 64, kernel_size=3,
stride=1, bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.cn3 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=1,
bias=False)
self.bn3 = nn.BatchNorm2d(32)
self.cn4 = nn.ConvTranspose2d(32, nc, kernel_size=3, stride=1,
bias=False)
self.bn4 = nn.BatchNorm2d(nc)
def forward(self, x):
act = self.act
x = act(self.bnLin(self.lin1(x)))
x = x.view(x.size(0), -1, self.first_cond_w, self.first_cond_w)
x = act(self.bn2(self.cn2(x)))
x = act(self.bn3(self.cn3(x)))
x = self.cn4(x)
x = nn.Sigmoid()(x)
return x.view(-1, 15, 9, 9)
def sampler(input_, tau=100):
noise = Variable(torch.randn(input_.size(0), input_.size(1))
.type_as(input_.data)) / tau
x = input_ + noise
return x
class Policy(nn.Module):
""" PigChase Model for the 18BinaryView batch x 18 x 9 x 9.
Args:
state_dim (tuple): input dims: (channels, width, history length)
action_no (int): no of actions
hidden_size (int): size of the hidden linear layer
"""
def __init__(self, config):
super(Policy, self).__init__()
state_dim = (18, 9, 1)
action_no = 3
hidden_size = hidden_size = 256
dropout = 0.1
self.rnn_type = rnn_type = "GRUCell"
self.rnn_layers = rnn_layers = 2
self.rnn_nhid = rnn_nhid = hidden_size
self.activation = nn.ReLU()
self.drop = nn.Dropout(dropout)
self.drop2d = nn.Dropout2d(p=dropout)
self.in_channels, self.in_width, self.hist_len = state_dim
self.action_no = action_no
self.hidden_size = hidden_size
in_depth = self.hist_len * self.in_channels
self.conv0 = nn.Conv2d(in_depth, 64, kernel_size=1, stride=1)
self.bn0 = nn.BatchNorm2d(64)
self.conv1 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.bn3 = nn.BatchNorm2d(64)
map_width1 = conv_out_dim(self.in_width, self.conv1)
map_width2 = conv_out_dim(map_width1, self.conv2)
map_width3 = conv_out_dim(map_width2, self.conv3)
# map_width4 = conv_out_dim(map_width3, self.conv4)
lin_size = 64 * map_width3 ** 2
# self.lin1 = nn.Linear(lin_size, lin_size)
# self.bnLin1 = nn.BatchNorm1d(lin_size)
self.rnn1 = getattr(nn, rnn_type)(lin_size, rnn_nhid)
self.rnn2 = getattr(nn, rnn_type)(rnn_nhid, rnn_nhid)
self.bnRnn1 = nn.BatchNorm1d(rnn_nhid)
self.bnRnn2 = nn.BatchNorm1d(rnn_nhid * self.rnn_layers)
self.lin2 = nn.Linear(rnn_nhid * self.rnn_layers, hidden_size)
self.bnLin2 = nn.BatchNorm1d(hidden_size)
lin_size_3 = hidden_size
self.lin3 = nn.Linear(hidden_size, lin_size_3)
self.bnLin3 = nn.BatchNorm1d(lin_size_3)
self.action_head = nn.Linear(lin_size_3, action_no)
self.value_head = nn.Linear(lin_size_3, 1)
self.bn_value_head = nn.BatchNorm1d(1)
# ---- Aux tasks
self.aux_predictors = []
if "noise" in [t[0] for t in config.auxiliary_tasks]:
self.action_noise = 10000
#
if "next_reward" in [t[0] for t in config.auxiliary_tasks]:
_input_size = rnn_nhid * self.rnn_layers
self.next_reward = NextRewardPredictor(_input_size)
self.aux_predictors.append(("next_reward", self.next_reward))
if "next_state_depth" in [t[0] for t in config.auxiliary_tasks]:
_input_size = rnn_nhid * self.rnn_layers + state_dim[0]
self.next_state_depth = NextStateDepthPrediction(_input_size,
state_dim[1] ** 2)
self.aux_predictors.append(("next_state_depth",
self.next_state_depth))
def forward(self, x, hidden_states, bn=True, aux_input={}):
act = self.activation
# if bn:
x = act(self.bn0(self.conv0(x)))
x = act(self.bn1(self.conv1(x)))
x = act(self.bn2(self.conv2(x)))
x = act(self.bn3(self.conv3(x)))
out_conv = x.view(x.size(0), -1)
hidden0 = self.rnn1(out_conv, hidden_states[0])
if self.rnn_type == 'LSTMCell':
x = hidden0[0]
else:
x = hidden0
hx = [x]
hidden1 = self.rnn2(x, hidden_states[1])
if self.rnn_type == 'LSTMCell':
x = hidden1[0]
else:
x = hidden1
all_hidden = [hidden0, hidden1]
hx.append(x)
x = torch.cat(hx, 1)
aux_predictions = {}
if self.training:
for (task, predictor) in self.aux_predictors:
if task == "next_reward":
aux_predictions[task] = predictor(x)
if task == "next_state_depth" and "next_state_depth" in aux_input:
in_next = torch.cat([aux_input["next_state_depth"], x], 1)
aux_predictions[task] = predictor(in_next)
x = act(self.lin2(x))
x = act(self.lin3(x))
action_scores = self.action_head(x)
state_values = self.value_head(x)
action_prob = sampler(F.softmax(action_scores), tau=self.action_noise)
return action_prob, state_values, all_hidden, aux_predictions
def init_hidden(self, bsz):
hidden_states = []
weight = next(self.parameters()).data
# Hidden 0
if self.rnn_type == 'LSTMCell':
hidden_states.append((Variable(weight.new(bsz, self.rnn_nhid)
.zero_()),
Variable(weight.new(bsz, self.rnn_nhid)
.zero_())))
else:
hidden_states.append(Variable(weight.new(bsz, self.rnn_nhid)
.zero_()))
# Hidden 0
if self.rnn_type == 'LSTMCell':
hidden_states.append((Variable(weight.new(bsz, self.rnn_nhid)
.zero_()),
Variable(weight.new(bsz, self.rnn_nhid)
.zero_())))
else:
hidden_states.append(Variable(weight.new(bsz, self.rnn_nhid)
.zero_()))
return hidden_states
def slice_hidden(self, hidden_state, not_done_idx):
hidden_states = []
i = 0
if self.rnn_type == 'LSTMCell':
hidden_states.append((hidden_state[i][0]
.index_select(0, Variable(not_done_idx)),
hidden_state[i][1].index_select(0, Variable(
not_done_idx))))
else:
hidden_states.append(hidden_state[i]
.index_select(0, Variable(not_done_idx)))
i = 1
if self.rnn_type == 'LSTMCell':
hidden_states.append((hidden_state[i][0]
.index_select(0, Variable(not_done_idx)),
hidden_state[i][1].index_select(0, Variable(
not_done_idx))))
else:
hidden_states.append(hidden_state[i]
.index_select(0, Variable(not_done_idx)))
return hidden_states
def get_attributes(self):
return (self.input_channels, self.hist_len, self.action_no,
self.hidden_size)
|
|
import os
import sys
from six import print_
from ccmlib import common, repository
from ccmlib.common import ArgumentError
from ccmlib.node import Node, NodeError
from ccmlib.cluster import Cluster
from ccmlib.cmds.command import Cmd
from ccmlib.dse_cluster import DseCluster
from ccmlib.dse_node import DseNode
from ccmlib.cluster_factory import ClusterFactory
def cluster_cmds():
return [
"create",
"add",
"populate",
"list",
"switch",
"status",
"remove",
"clear",
"liveset",
"start",
"stop",
"flush",
"compact",
"stress",
"updateconf",
"updatedseconf",
"updatelog4j",
"cli",
"setdir",
"bulkload",
"setlog",
"scrub",
"verify",
"invalidatecache",
"checklogerror",
]
def parse_populate_count(v):
if v is None:
return None
tmp = v.split(':')
if len(tmp) == 1:
return int(tmp[0])
else:
return [ int(t) for t in tmp ]
class ClusterCreateCmd(Cmd):
def description(self):
return "Create a new cluster"
def get_parser(self):
usage = "usage: ccm create [options] cluster_name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('--no-switch', action="store_true", dest="no_switch",
help="Don't switch to the newly created cluster", default=False)
parser.add_option('-p', '--partitioner', type="string", dest="partitioner",
help="Set the cluster partitioner class")
parser.add_option('-v', "--version", type="string", dest="version",
help="Download and use provided cassandra or dse version. If version is of the form 'git:<branch name>', then the specified cassandra branch will be downloaded from the git repo and compiled. (takes precedence over --install-dir)", default=None)
parser.add_option('-o', "--opsc", type="string", dest="opscenter",
help="Download and use provided opscenter version to install with DSE. Will have no effect on cassandra installs)", default=None)
parser.add_option("--dse", action="store_true", dest="dse",
help="Use with -v to indicate that the version being loaded is DSE")
parser.add_option("--dse-username", type="string", dest="dse_username",
help="The username to use to download DSE with", default=None)
parser.add_option("--dse-password", type="string", dest="dse_password",
help="The password to use to download DSE with", default=None)
parser.add_option("--install-dir", type="string", dest="install_dir",
help="Path to the cassandra or dse directory to use [default %default]", default="./")
parser.add_option('-n', '--nodes', type="string", dest="nodes",
help="Populate the new cluster with that number of nodes (a single int or a colon-separate list of ints for multi-dc setups)")
parser.add_option('-i', '--ipprefix', type="string", dest="ipprefix",
help="Ipprefix to use to create the ip of a node while populating")
parser.add_option('-I', '--ip-format', type="string", dest="ipformat",
help="Format to use when creating the ip of a node (supports enumerating ipv6-type addresses like fe80::%d%lo0)")
parser.add_option('-s', "--start", action="store_true", dest="start_nodes",
help="Start nodes added through -s", default=False)
parser.add_option('-d', "--debug", action="store_true", dest="debug",
help="If -s is used, show the standard output when starting the nodes", default=False)
parser.add_option('-b', "--binary-protocol", action="store_true", dest="binary_protocol",
help="Enable the binary protocol (starting from C* 1.2.5 the binary protocol is started by default and this option is a no-op)", default=False)
parser.add_option('-D', "--debug-log", action="store_true", dest="debug_log",
help="With -n, sets debug logging on the new nodes", default=False)
parser.add_option('-T', "--trace-log", action="store_true", dest="trace_log",
help="With -n, sets trace logging on the new nodes", default=False)
parser.add_option("--vnodes", action="store_true", dest="vnodes",
help="Use vnodes (256 tokens). Must be paired with -n.", default=False)
parser.add_option('--jvm_arg', action="append", dest="jvm_args",
help="Specify a JVM argument", default=[])
parser.add_option('--profile', action="store_true", dest="profile",
help="Start the nodes with yourkit agent (only valid with -s)", default=False)
parser.add_option('--profile-opts', type="string", action="store", dest="profile_options",
help="Yourkit options when profiling", default=None)
parser.add_option('--ssl', type="string", dest="ssl_path",
help="Path to keystore.jks and cassandra.crt files (and truststore.jks [not required])", default=None)
parser.add_option('--require_client_auth', action="store_true", dest="require_client_auth",
help="Enable client authentication (only vaid with --ssl)", default=False)
parser.add_option('--node-ssl', type="string", dest="node_ssl_path",
help="Path to keystore.jks and truststore.jks for internode encryption", default=None)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, cluster_name=True)
if options.ipprefix and options.ipformat:
parser.print_help()
parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
self.nodes = parse_populate_count(options.nodes)
if self.options.vnodes and self.nodes is None:
print_("Can't set --vnodes if not populating cluster in this command.")
parser.print_help()
exit(1)
if not options.version:
try:
common.validate_install_dir(options.install_dir)
except ArgumentError:
parser.print_help()
parser.error("%s is not a valid cassandra directory. You must define a cassandra dir or version." % options.install_dir)
def run(self):
try:
if self.options.dse or (not self.options.version and common.isDse(self.options.install_dir)):
cluster = DseCluster(self.path, self.name, install_dir=self.options.install_dir, version=self.options.version, dse_username=self.options.dse_username, dse_password=self.options.dse_password, opscenter=self.options.opscenter, verbose=True)
else:
cluster = Cluster(self.path, self.name, install_dir=self.options.install_dir, version=self.options.version, verbose=True)
except OSError as e:
cluster_dir = os.path.join(self.path, self.name)
import traceback
print_('Cannot create cluster: %s\n%s' % (str(e), traceback.format_exc()), file=sys.stderr)
exit(1)
if self.options.partitioner:
cluster.set_partitioner(self.options.partitioner)
if cluster.cassandra_version() >= "1.2.5":
self.options.binary_protocol = True
if self.options.binary_protocol:
cluster.set_configuration_options({ 'start_native_transport' : True })
if cluster.cassandra_version() >= "1.2" and self.options.vnodes:
cluster.set_configuration_options({ 'num_tokens' : 256 })
if not self.options.no_switch:
common.switch_cluster(self.path, self.name)
print_('Current cluster is now: %s' % self.name)
if not (self.options.ipprefix or self.options.ipformat):
self.options.ipformat = '127.0.0.%d'
if self.options.ssl_path:
cluster.enable_ssl(self.options.ssl_path, self.options.require_client_auth)
if self.options.node_ssl_path:
cluster.enable_internode_ssl(self.options.node_ssl_path)
if self.nodes is not None:
try:
if self.options.debug_log:
cluster.set_log_level("DEBUG")
if self.options.trace_log:
cluster.set_log_level("TRACE")
cluster.populate(self.nodes, self.options.debug, use_vnodes=self.options.vnodes, ipprefix=self.options.ipprefix, ipformat=self.options.ipformat)
if self.options.start_nodes:
profile_options = None
if self.options.profile:
profile_options = {}
if self.options.profile_options:
profile_options['options'] = self.options.profile_options
if cluster.start(verbose=self.options.debug_log, wait_for_binary_proto=self.options.binary_protocol, jvm_args=self.options.jvm_args, profile_options=profile_options) is None:
details = ""
if not self.options.debug_log:
details = " (you can use --debug-log for more information)"
print_("Error starting nodes, see above for details%s" % details, file=sys.stderr)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterAddCmd(Cmd):
def description(self):
return "Add a new node to the current cluster"
def get_parser(self):
usage = "usage: ccm add [options] node_name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-b', '--auto-bootstrap', action="store_true", dest="bootstrap",
help="Set auto bootstrap for the node", default=False)
parser.add_option('-s', '--seeds', action="store_true", dest="is_seed",
help="Configure this node as a seed", default=False)
parser.add_option('-i', '--itf', type="string", dest="itfs",
help="Set host and port for thrift, the binary protocol and storage (format: host[:port])")
parser.add_option('-t', '--thrift-itf', type="string", dest="thrift_itf",
help="Set the thrift host and port for the node (format: host[:port])")
parser.add_option('-l', '--storage-itf', type="string", dest="storage_itf",
help="Set the storage (cassandra internal) host and port for the node (format: host[:port])")
parser.add_option('--binary-itf', type="string", dest="binary_itf",
help="Set the binary protocol host and port for the node (format: host[:port]).")
parser.add_option('-j', '--jmx-port', type="string", dest="jmx_port",
help="JMX port for the node", default="7199")
parser.add_option('-r', '--remote-debug-port', type="string", dest="remote_debug_port",
help="Remote Debugging Port for the node", default="2000")
parser.add_option('-n', '--token', type="string", dest="initial_token",
help="Initial token for the node", default=None)
parser.add_option('-d', '--data-center', type="string", dest="data_center",
help="Datacenter name this node is part of", default=None)
parser.add_option('--dse', action="store_true", dest="dse_node",
help="Add node to DSE Cluster", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, node_name=True, load_cluster=True, load_node=False)
if options.itfs is None and (options.thrift_itf is None or options.storage_itf is None or options.binary_itf is None):
print_('Missing thrift and/or storage and/or binary protocol interfaces or jmx port', file=sys.stderr)
parser.print_help()
exit(1)
used_jmx_ports = [node.jmx_port for node in self.cluster.nodelist()]
if options.jmx_port in used_jmx_ports:
print_("This JMX port is already in use. Choose another.", file=sys.stderr)
parser.print_help()
exit(1)
if options.thrift_itf is None:
options.thrift_itf = options.itfs
if options.storage_itf is None:
options.storage_itf = options.itfs
if options.binary_itf is None:
options.binary_itf = options.itfs
self.thrift = common.parse_interface(options.thrift_itf, 9160)
self.storage = common.parse_interface(options.storage_itf, 7000)
self.binary = common.parse_interface(options.binary_itf, 9042)
if self.binary[0] != self.thrift[0]:
print_('Cannot set a binary address different from the thrift one', file=sys.stderr)
exit(1)
self.jmx_port = options.jmx_port
self.remote_debug_port = options.remote_debug_port
self.initial_token = options.initial_token
def run(self):
try:
if self.options.dse_node:
node = DseNode(self.name, self.cluster, self.options.bootstrap, self.thrift, self.storage, self.jmx_port, self.remote_debug_port, self.initial_token, binary_interface=self.binary)
else:
node = Node(self.name, self.cluster, self.options.bootstrap, self.thrift, self.storage, self.jmx_port, self.remote_debug_port, self.initial_token, binary_interface=self.binary)
self.cluster.add(node, self.options.is_seed, self.options.data_center)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterPopulateCmd(Cmd):
def description(self):
return "Add a group of new nodes with default options"
def get_parser(self):
usage = "usage: ccm populate -n <node count> {-d}"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-n', '--nodes', type="string", dest="nodes",
help="Number of nodes to populate with (a single int or a colon-separate list of ints for multi-dc setups)")
parser.add_option('-d', '--debug', action="store_true", dest="debug",
help="Enable remote debugging options", default=False)
parser.add_option('--vnodes', action="store_true", dest="vnodes",
help="Populate using vnodes", default=False)
parser.add_option('-i', '--ipprefix', type="string", dest="ipprefix",
help="Ipprefix to use to create the ip of a node")
parser.add_option('-I', '--ip-format', type="string", dest="ipformat",
help="Format to use when creating the ip of a node (supports enumerating ipv6-type addresses like fe80::%d%lo0)")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
if options.ipprefix and options.ipformat:
parser.print_help()
parser.error("%s and %s may not be used together" % (parser.get_option('-i'), parser.get_option('-I')))
self.nodes = parse_populate_count(options.nodes)
if self.nodes is None:
parser.print_help()
parser.error("Not a valid number of nodes. Did you use -n?")
exit(1)
def run(self):
try:
if self.cluster.cassandra_version() >= "1.2" and self.options.vnodes:
self.cluster.set_configuration_options({ 'num_tokens' : 256 })
if not (self.options.ipprefix or self.options.ipformat):
self.options.ipformat = '127.0.0.%d'
self.cluster.populate(self.nodes, self.options.debug, use_vnodes=self.options.vnodes, ipprefix=self.options.ipprefix, ipformat=self.options.ipformat)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterListCmd(Cmd):
def description(self):
return "List existing clusters"
def get_parser(self):
usage = "usage: ccm list [options]"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
try:
current = common.current_cluster_name(self.path)
except Exception as e:
current = ''
for dir in os.listdir(self.path):
if os.path.exists(os.path.join(self.path, dir, 'cluster.conf')):
print_(" %s%s" % ('*' if current == dir else ' ', dir))
class ClusterSwitchCmd(Cmd):
def description(self):
return "Switch of current (active) cluster"
def get_parser(self):
usage = "usage: ccm switch [options] cluster_name"
return self._get_default_parser(usage, self.description())
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, cluster_name=True)
if not os.path.exists(os.path.join(self.path, self.name, 'cluster.conf')):
print_("%s does not appear to be a valid cluster (use ccm list to view valid clusters)" % self.name, file=sys.stderr)
exit(1)
def run(self):
common.switch_cluster(self.path, self.name)
class ClusterStatusCmd(Cmd):
def description(self):
return "Display status on the current cluster"
def get_parser(self):
usage = "usage: ccm status [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print full information on all nodes", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.show(self.options.verbose)
class ClusterRemoveCmd(Cmd):
def description(self):
return "Remove the current or specified cluster (delete all data)"
def get_parser(self):
usage = "usage: ccm remove [options] [cluster_name]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
self.other_cluster = None
if len(args) > 0:
# Setup to remove the specified cluster:
Cmd.validate(self, parser, options, args)
self.other_cluster = args[0]
if not os.path.exists(os.path.join(
self.path, self.other_cluster, 'cluster.conf')):
print_("%s does not appear to be a valid cluster" \
" (use ccm list to view valid clusters)" \
% self.other_cluster, file=sys.stderr)
exit(1)
else:
# Setup to remove the current cluster:
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
if self.other_cluster:
# Remove the specified cluster:
cluster = ClusterFactory.load(self.path, self.other_cluster)
cluster.remove()
# Remove CURRENT flag if the specified cluster is the current cluster:
if self.other_cluster == common.current_cluster_name(self.path):
os.remove(os.path.join(self.path, 'CURRENT'))
else:
# Remove the current cluster:
self.cluster.remove()
os.remove(os.path.join(self.path, 'CURRENT'))
class ClusterClearCmd(Cmd):
def description(self):
return "Clear the current cluster data (and stop all nodes)"
def get_parser(self):
usage = "usage: ccm clear [options]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.clear()
class ClusterLivesetCmd(Cmd):
def description(self):
return "Print a comma-separated list of addresses of running nodes (handful in scripts)"
def get_parser(self):
usage = "usage: ccm liveset [options]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
l = [ node.network_interfaces['storage'][0] for node in list(self.cluster.nodes.values()) if node.is_live() ]
print_(",".join(l))
class ClusterSetdirCmd(Cmd):
def description(self):
return "Set the install directory (cassandra or dse) to use"
def get_parser(self):
usage = "usage: ccm setdir [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', "--version", type="string", dest="version",
help="Download and use provided cassandra or dse version. If version is of the form 'git:<branch name>', then the specified cassandra branch will be downloaded from the git repo and compiled. (takes precedence over --install-dir)", default=None)
parser.add_option("--install-dir", type="string", dest="install_dir",
help="Path to the cassandra or dse directory to use [default %default]", default="./")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
self.cluster.set_install_dir(install_dir=self.options.install_dir, version=self.options.version, verbose=True)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterClearrepoCmd(Cmd):
def description(self):
return "Cleanup downloaded cassandra sources"
def get_parser(self):
usage = "usage: ccm clearrepo [options]"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
repository.clean_all()
class ClusterStartCmd(Cmd):
def description(self):
return "Start all the non started nodes of the current cluster"
def get_parser(self):
usage = "usage: ccm cluster start [options]"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print standard output of cassandra process", default=False)
parser.add_option('--no-wait', action="store_true", dest="no_wait",
help="Do not wait for cassandra node to be ready", default=False)
parser.add_option('--wait-other-notice', action="store_true", dest="wait_other_notice",
help="Wait until all other live nodes of the cluster have marked this node UP", default=False)
parser.add_option('--wait-for-binary-proto', action="store_true", dest="wait_for_binary_proto",
help="Wait for the binary protocol to start", default=False)
parser.add_option('--jvm_arg', action="append", dest="jvm_args",
help="Specify a JVM argument", default=[])
parser.add_option('--profile', action="store_true", dest="profile",
help="Start the nodes with yourkit agent (only valid with -s)", default=False)
parser.add_option('--profile-opts', type="string", action="store", dest="profile_options",
help="Yourkit options when profiling", default=None)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
profile_options = None
if self.options.profile:
profile_options = {}
if self.options.profile_options:
profile_options['options'] = self.options.profile_options
if len(self.cluster.nodes) == 0:
print_("No node in this cluster yet. Use the populate command before starting.")
exit(1)
if self.cluster.start(no_wait=self.options.no_wait,
wait_other_notice=self.options.wait_other_notice,
wait_for_binary_proto=self.options.wait_for_binary_proto,
verbose=self.options.verbose,
jvm_args=self.options.jvm_args,
profile_options=profile_options) is None:
details = ""
if not self.options.verbose:
details = " (you can use --verbose for more information)"
print_("Error starting nodes, see above for details%s" % details, file=sys.stderr)
exit(1)
except NodeError as e:
print_(str(e), file=sys.stderr)
print_("Standard error output is:", file=sys.stderr)
for line in e.process.stderr:
print_(line.rstrip('\n'), file=sys.stderr)
exit(1)
class ClusterStopCmd(Cmd):
def description(self):
return "Stop all the nodes of the cluster"
def get_parser(self):
usage = "usage: ccm cluster stop [options] name"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="Print nodes that were not running", default=False)
parser.add_option('--no-wait', action="store_true", dest="no_wait",
help="Do not wait for the node to be stopped", default=False)
parser.add_option('-g', '--gently', action="store_true", dest="gently",
help="Shut down gently (default)", default=True)
parser.add_option('--not-gently', action="store_false", dest="gently",
help="Shut down immediately (kill -9)", default=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
try:
not_running = self.cluster.stop(not self.options.no_wait, gently=self.options.gently)
if self.options.verbose and len(not_running) > 0:
sys.stdout.write("The following nodes were not running: ")
for node in not_running:
sys.stdout.write(node.name + " ")
print_("")
except NodeError as e:
print_(str(e), file=sys.stderr)
exit(1)
class _ClusterNodetoolCmd(Cmd):
def get_parser(self):
parser = self._get_default_parser(self.usage, self.description())
return parser
def description(self):
return self.descr_text
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
self.cluster.nodetool(self.nodetool_cmd)
class ClusterFlushCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster flush [options] name"
nodetool_cmd = 'flush'
descr_text = "Flush all (running) nodes of the cluster"
class ClusterCompactCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster compact [options] name"
nodetool_cmd = 'compact'
descr_text = "Compact all (running) node of the cluster"
class ClusterDrainCmd(_ClusterNodetoolCmd):
usage = "usage: ccm cluster drain [options] name"
nodetool_cmd = 'drain'
descr_text = "Drain all (running) node of the cluster"
class ClusterStressCmd(Cmd):
def description(self):
return "Run stress using all live nodes"
def get_parser(self):
usage = "usage: ccm stress [options] [stress_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.stress_options = parser.get_ignored() + args
def run(self):
try:
self.cluster.stress(self.stress_options)
except Exception as e:
print_(e, file=sys.stderr)
class ClusterUpdateconfCmd(Cmd):
def description(self):
return "Update the cassandra config files for all nodes"
def get_parser(self):
usage = "usage: ccm updateconf [options] [ new_setting | ... ], where new_setting should be a string of the form 'compaction_throughput_mb_per_sec: 32'; nested options can be separated with a period like 'client_encryption_options.enabled: false'"
parser = self._get_default_parser(usage, self.description())
parser.add_option('--no-hh', '--no-hinted-handoff', action="store_false",
dest="hinted_handoff", default=True, help="Disable hinted handoff")
parser.add_option('--batch-cl', '--batch-commit-log', action="store_true",
dest="cl_batch", default=False, help="Set commit log to batch mode")
parser.add_option('--rt', '--rpc-timeout', action="store", type='int',
dest="rpc_timeout", help="Set rpc timeout")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.setting = common.parse_settings(args)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
self.setting['hinted_handoff_enabled'] = self.options.hinted_handoff
if self.options.rpc_timeout is not None:
if self.cluster.cassandra_version() < "1.2":
self.setting['rpc_timeout_in_ms'] = self.options.rpc_timeout
else:
self.setting['read_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['range_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['write_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['truncate_request_timeout_in_ms'] = self.options.rpc_timeout
self.setting['request_timeout_in_ms'] = self.options.rpc_timeout
self.cluster.set_configuration_options(values=self.setting, batch_commitlog=self.options.cl_batch)
class ClusterUpdatedseconfCmd(Cmd):
def description(self):
return "Update the dse config files for all nodes"
def get_parser(self):
usage = "usage: ccm updatedseconf [options] [ new_setting | ... ], where new_setting should be a string of the form 'max_solr_concurrency_per_core: 2'; nested options can be separated with a period like 'cql_slow_log_options.enabled: true'"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.setting = common.parse_settings(args)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
self.cluster.set_dse_configuration_options(values=self.setting)
#
# Class implements the functionality of updating log4j-server.properties
# on ALL nodes by copying the given config into
# ~/.ccm/name-of-cluster/nodeX/conf/log4j-server.properties
#
class ClusterUpdatelog4jCmd(Cmd):
def description(self):
return "Update the Cassandra log4j-server.properties configuration file on all nodes"
def get_parser(self):
usage = "usage: ccm updatelog4j -p <log4j config>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
parser.add_option('-p', '--path', type="string", dest="log4jpath",
help="Path to new Cassandra log4j configuration file")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
try:
self.log4jpath = options.log4jpath
if self.log4jpath is None:
raise KeyError("[Errno] -p or --path <path of new log4j congiguration file> is not provided")
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
except KeyError as e:
print_(str(e), file=sys.stderr)
exit(1)
def run(self):
try:
self.cluster.update_log4j(self.log4jpath)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterCliCmd(Cmd):
def description(self):
return "Launch cassandra cli connected to some live node (if any)"
def get_parser(self):
usage = "usage: ccm cli [options] [cli_options]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
parser.add_option('-x', '--exec', type="string", dest="cmds", default=None,
help="Execute the specified commands and exit")
parser.add_option('-v', '--verbose', action="store_true", dest="verbose",
help="With --exec, show cli output after completion", default=False)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.cli_options = parser.get_ignored() + args[1:]
def run(self):
self.cluster.run_cli(self.options.cmds, self.options.verbose, self.cli_options)
class ClusterBulkloadCmd(Cmd):
def description(self):
return "Bulkload files into the cluster"
def get_parser(self):
usage = "usage: ccm bulkload [options] [sstable_dir]"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.loader_options = parser.get_ignored() + args
def run(self):
self.cluster.bulkload(self.loader_options)
class ClusterScrubCmd(Cmd):
def description(self):
return "Scrub files"
def get_parser(self):
usage = "usage: ccm scrub [options] <keyspace> <cf>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.scrub_options = parser.get_ignored() + args
def run(self):
self.cluster.scrub(self.scrub_options)
class ClusterVerifyCmd(Cmd):
def description(self):
return "Verify files"
def get_parser(self):
usage = "usage: ccm verify [options] <keyspace> <cf>"
parser = self._get_default_parser(usage, self.description(), ignore_unknown_options=True)
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
self.verify_options = parser.get_ignored() + args
def run(self):
self.cluster.verify(self.verify_options)
class ClusterSetlogCmd(Cmd):
def description(self):
return "Set log level (INFO, DEBUG, ...) with/without Java class for all node of the cluster - require a node restart"
def get_parser(self):
usage = "usage: ccm setlog [options] level"
parser = self._get_default_parser(usage, self.description())
parser.add_option('-c', '--class', type="string", dest="class_name", default=None,
help="Optional java class/package. Logging will be set for only this class/package if set")
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
if len(args) == 0:
print_('Missing log level', file=sys.stderr)
parser.print_help()
exit(1)
self.level = args[0]
def run(self):
try:
self.cluster.set_log_level(self.level, self.options.class_name)
except common.ArgumentError as e:
print_(str(e), file=sys.stderr)
exit(1)
class ClusterInvalidatecacheCmd(Cmd):
def description(self):
return "Destroys ccm's local git cache."
def get_parser(self):
usage = "usage: ccm invalidatecache"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args)
def run(self):
try:
common.invalidate_cache()
except Exception as e:
print_(str(e), file=sys.stderr)
print_("Error while deleting cache. Please attempt manually.")
exit(1)
class ClusterChecklogerrorCmd(Cmd):
def description(self):
return "Check for errors in log file of each node."
def get_parser(self):
usage = "usage: ccm checklogerror"
parser = self._get_default_parser(usage, self.description())
return parser
def validate(self, parser, options, args):
Cmd.validate(self, parser, options, args, load_cluster=True)
def run(self):
for node in self.cluster.nodelist():
errors = node.grep_log_for_errors()
for mylist in errors:
for line in mylist:
print_(line)
|
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from collections import OrderedDict
from knack.log import get_logger
from azure.cli.core.util import empty_on_404
from azure.cli.core.profiles import ResourceType, PROFILE_TYPE
from azure.cli.core.commands import CliCommandType, DeploymentOutputLongRunningOperation
from azure.cli.core.commands.arm import handle_template_based_exception
from azure.cli.command_modules.resource._client_factory import (
cf_resource_groups, cf_providers, cf_features, cf_feature_registrations, cf_tags, cf_deployments,
cf_deployment_operations, cf_policy_definitions, cf_policy_set_definitions, cf_policy_exemptions, cf_resource_links,
cf_resource_deploymentscripts, cf_resource_managedapplications, cf_resource_managedappdefinitions, cf_management_groups, cf_management_group_subscriptions, cf_resource_templatespecs)
from azure.cli.command_modules.resource._validators import (
process_deployment_create_namespace, process_ts_create_or_update_namespace, _validate_template_spec, _validate_template_spec_out,
process_assign_identity_namespace, process_assignment_create_namespace)
from ._exception_handler import managementgroups_exception_handler
logger = get_logger(__name__)
# Resource group commands
def transform_resource_group_list(result):
return [OrderedDict([
('Name', r['name']), ('Location', r['location']), ('Status', r['properties']['provisioningState'])]) for r in result]
def transform_resource_list(result):
transformed = []
for r in result:
res = OrderedDict([('Name', r['name']), ('ResourceGroup', r['resourceGroup']), ('Location', r['location']), ('Type', r['type'])])
try:
res['Status'] = r['properties']['provisioningStatus']
except TypeError:
res['Status'] = ' '
transformed.append(res)
return transformed
def transform_deployment(result):
r = result
format_result = OrderedDict([('Name', r['name']),
('State', r['properties']['provisioningState']),
('Timestamp', r['properties']['timestamp']),
('Mode', r['properties']['mode'])])
# For deployments that are not under the resource group level, the return data does not contain 'resourceGroup'
if 'resourceGroup' in r and r['resourceGroup']:
format_result['ResourceGroup'] = r['resourceGroup']
return format_result
def transform_deployments_list(result):
sort_list = sorted(result, key=lambda deployment: deployment['properties']['timestamp'])
return [transform_deployment(r) for r in sort_list]
# pylint: disable=too-many-statements
def load_command_table(self, _):
from azure.cli.core.commands.arm import deployment_validate_table_format
resource_custom = CliCommandType(operations_tmpl='azure.cli.command_modules.resource.custom#{}')
resource_group_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#ResourceGroupsOperations.{}',
client_factory=cf_resource_groups,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_provider_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#ProvidersOperations.{}',
client_factory=cf_providers,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_feature_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.features.operations#FeaturesOperations.{}',
client_factory=cf_features,
resource_type=ResourceType.MGMT_RESOURCE_FEATURES
)
resource_feature_registration_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.features.operations#SubscriptionFeatureRegistrationsOperations.{}',
client_factory=cf_feature_registrations,
resource_type=ResourceType.MGMT_RESOURCE_FEATURES
)
resource_tag_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#TagsOperations.{}',
client_factory=cf_tags,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_deployment_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#DeploymentsOperations.{}',
client_factory=cf_deployments,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_deployment_operation_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.resources.operations#DeploymentOperationsOperations.{}',
client_factory=cf_deployment_operations,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_policy_definitions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.policy.operations#PolicyDefinitionsOperations.{}',
client_factory=cf_policy_definitions,
resource_type=ResourceType.MGMT_RESOURCE_POLICY
)
resource_policy_set_definitions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.policy.operations#PolicySetDefinitionsOperations.{}',
client_factory=cf_policy_set_definitions,
resource_type=ResourceType.MGMT_RESOURCE_POLICY
)
resource_policy_exemptions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.policy.operations#PolicyExemptionsOperations.{}',
client_factory=cf_policy_exemptions,
resource_type=ResourceType.MGMT_RESOURCE_POLICY
)
resource_lock_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.locks.operations#ManagementLocksOperations.{}',
resource_type=ResourceType.MGMT_RESOURCE_LOCKS
)
resource_link_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.links.operations#ResourceLinksOperations.{}',
client_factory=cf_resource_links,
resource_type=ResourceType.MGMT_RESOURCE_LINKS
)
resource_deploymentscripts_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.deploymentscripts.operations#ResourceLinksOperations.{}',
client_factory=cf_resource_deploymentscripts,
resource_type=ResourceType.MGMT_RESOURCE_DEPLOYMENTSCRIPTS
)
resource_managedapp_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.managedapplications.operations#ApplicationsOperations.{}',
client_factory=cf_resource_managedapplications,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_managedapp_def_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.managedapplications.operations#ApplicationDefinitionsOperations.{}',
client_factory=cf_resource_managedappdefinitions,
resource_type=ResourceType.MGMT_RESOURCE_RESOURCES
)
resource_managementgroups_sdk = CliCommandType(
operations_tmpl='azure.mgmt.managementgroups.operations#ManagementGroupsOperations.{}',
client_factory=cf_management_groups,
exception_handler=managementgroups_exception_handler
)
resource_managementgroups_subscriptions_sdk = CliCommandType(
operations_tmpl='azure.mgmt.managementgroups.operations#ManagementGroupSubscriptionsOperations.{}',
client_factory=cf_management_group_subscriptions,
exception_handler=managementgroups_exception_handler
)
resource_managementgroups_update_type = CliCommandType(
operations_tmpl='azure.cli.command_modules.resource.custom#{}',
client_factory=cf_management_groups,
exception_handler=managementgroups_exception_handler
)
resource_templatespecs_sdk = CliCommandType(
operations_tmpl='azure.mgmt.resource.templatespecs.operations#ResourceLinksOperations.{}',
client_factory=cf_resource_templatespecs,
resource_type=ResourceType.MGMT_RESOURCE_TEMPLATESPECS
)
with self.command_group('account lock', resource_lock_sdk, resource_type=ResourceType.MGMT_RESOURCE_LOCKS) as g:
g.custom_command('create', 'create_lock')
g.custom_command('delete', 'delete_lock')
g.custom_command('list', 'list_locks')
g.custom_show_command('show', 'get_lock')
g.custom_command('update', 'update_lock')
with self.command_group('group', resource_group_sdk, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.command('delete', 'begin_delete', supports_no_wait=True, confirmation=True)
g.show_command('show', 'get')
g.command('exists', 'check_existence')
g.custom_command('list', 'list_resource_groups', table_transformer=transform_resource_group_list)
g.custom_command('create', 'create_resource_group')
g.custom_command('export', 'export_group_as_template')
g.generic_update_command('update', custom_func_name='update_resource_group', custom_func_type=resource_custom)
g.wait_command('wait')
with self.command_group('group lock', resource_type=ResourceType.MGMT_RESOURCE_LOCKS) as g:
g.custom_command('create', 'create_lock')
g.custom_command('delete', 'delete_lock')
g.custom_command('list', 'list_locks')
g.custom_show_command('show', 'get_lock')
g.custom_command('update', 'update_lock')
with self.command_group('resource', resource_custom, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('create', 'create_resource')
g.custom_command('delete', 'delete_resource')
g.custom_show_command('show', 'show_resource')
g.custom_command('list', 'list_resources', table_transformer=transform_resource_list)
g.custom_command('tag', 'tag_resource')
g.custom_command('move', 'move_resource')
g.custom_command('invoke-action', 'invoke_resource_action', transform=DeploymentOutputLongRunningOperation(self.cli_ctx))
g.generic_update_command('update', getter_name='show_resource', setter_name='update_resource',
client_factory=None)
g.wait_command('wait', getter_name='show_resource')
with self.command_group('resource lock', resource_type=ResourceType.MGMT_RESOURCE_LOCKS) as g:
g.custom_command('create', 'create_lock')
g.custom_command('delete', 'delete_lock')
g.custom_command('list', 'list_locks')
g.custom_show_command('show', 'get_lock')
g.custom_command('update', 'update_lock')
# Resource provider commands
with self.command_group('provider', resource_provider_sdk, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.command('list', 'list')
g.show_command('show', 'get')
g.custom_command('register', 'register_provider')
g.custom_command('unregister', 'unregister_provider')
g.custom_command('operation list', 'list_provider_operations')
g.custom_command('permission list', 'list_provider_permissions')
g.custom_show_command('operation show', 'show_provider_operations')
# Resource feature commands
with self.command_group('feature', resource_feature_sdk, client_factory=cf_features, resource_type=PROFILE_TYPE,
min_api='2019-03-02-hybrid') as g:
feature_table_transform = '{Name:name, RegistrationState:properties.state}'
g.custom_command('list', 'list_features', table_transformer='[].' + feature_table_transform)
g.show_command('show', 'get', table_transformer=feature_table_transform)
g.custom_command('register', 'register_feature')
g.custom_command('unregister', 'unregister_feature')
with self.command_group('feature registration', resource_feature_registration_sdk, client_factory=cf_feature_registrations, resource_type=PROFILE_TYPE,
min_api='2021-07-01') as g:
feature_table_transform = '{Name:name, RegistrationState:properties.state}'
g.custom_command('list', 'list_feature_registrations', table_transformer='[].' + feature_table_transform)
g.show_command('show', 'get', table_transformer=feature_table_transform)
g.custom_command('create', 'create_feature_registration')
g.custom_command('delete ', 'delete_feature_registration', confirmation=True)
# Tag commands
with self.command_group('tag', resource_tag_sdk, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'get_tag_at_scope')
g.custom_command('create', 'create_or_update_tag_at_scope')
g.custom_command('delete', 'delete_tag_at_scope', confirmation=True)
g.custom_command('update', 'update_tag_at_scope', min_api='2019-10-01')
g.command('add-value', 'create_or_update_value')
g.command('remove-value', 'delete_value')
# az group deployment
with self.command_group('group deployment', resource_deployment_sdk, deprecate_info=self.deprecate(redirect='deployment group', hide=True)) as g:
g.custom_command('create', 'deploy_arm_template', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.command('list', 'list_by_resource_group', table_transformer=transform_deployments_list, min_api='2017-05-10')
g.command('list', 'list', table_transformer=transform_deployments_list, max_api='2016-09-01')
g.show_command('show', 'get', table_transformer=transform_deployment)
g.command('delete', 'begin_delete', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template', table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('export', 'export_deployment_as_template')
g.wait_command('wait')
g.command('cancel', 'cancel')
with self.command_group('group deployment operation', resource_deployment_operation_sdk, deprecate_info=self.deprecate(redirect='deployment operation group', hide=True)) as g:
g.command('list', 'list')
g.custom_show_command('show', 'get_deployment_operations', client_factory=cf_deployment_operations)
# az deployment
with self.command_group('deployment', resource_deployment_sdk, min_api='2018-05-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_subscription_scope', table_transformer=transform_deployments_list, deprecate_info=g.deprecate(redirect='deployment sub list', hide=True))
g.custom_show_command('show', 'get_deployment_at_subscription_scope', deprecate_info=g.deprecate(redirect='deployment sub show', hide=True))
g.custom_command('delete', 'delete_deployment_at_subscription_scope', supports_no_wait=True, deprecate_info=g.deprecate(redirect='deployment sub delete', hide=True))
g.custom_command('validate', 'validate_arm_template_at_subscription_scope', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception,
deprecate_info=g.deprecate(redirect='deployment sub validate', hide=True))
g.custom_command('create', 'deploy_arm_template_at_subscription_scope', supports_no_wait=True, validator=process_deployment_create_namespace,
exception_handler=handle_template_based_exception, deprecate_info=g.deprecate(redirect='deployment sub create', hide=True))
g.custom_command('export', 'export_template_at_subscription_scope', deprecate_info=g.deprecate(redirect='deployment sub export', hide=True))
g.custom_wait_command('wait', 'get_deployment_at_subscription_scope', deprecate_info=g.deprecate(redirect='deployment sub wait', hide=True))
g.custom_command('cancel', 'cancel_deployment_at_subscription_scope', deprecate_info=g.deprecate(redirect='deployment sub cancel', hide=True))
with self.command_group('deployment operation', resource_deployment_operation_sdk, min_api='2018-05-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployment_operations_at_subscription_scope',
deprecate_info=self.deprecate(redirect='deployment operation sub list', hide=True))
g.custom_show_command('show', 'get_deployment_operations_at_subscription_scope', client_factory=cf_deployment_operations,
deprecate_info=self.deprecate(redirect='deployment operation sub show', hide=True))
# az deployment sub
with self.command_group('deployment sub', resource_deployment_sdk, min_api='2018-05-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_subscription_scope', table_transformer=transform_deployments_list)
g.custom_show_command('show', 'get_deployment_at_subscription_scope', table_transformer=transform_deployment)
g.custom_command('delete', 'delete_deployment_at_subscription_scope', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template_at_subscription_scope', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('create', 'deploy_arm_template_at_subscription_scope', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.custom_command('what-if', 'what_if_deploy_arm_template_at_subscription_scope', validator=process_deployment_create_namespace,
exception_handler=handle_template_based_exception, min_api='2019-07-01')
g.custom_command('export', 'export_template_at_subscription_scope')
g.custom_wait_command('wait', 'get_deployment_at_subscription_scope')
g.custom_command('cancel', 'cancel_deployment_at_subscription_scope')
with self.command_group('deployment operation sub', resource_deployment_operation_sdk, min_api='2018-05-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployment_operations_at_subscription_scope')
g.custom_show_command('show', 'get_deployment_operations_at_subscription_scope', client_factory=cf_deployment_operations)
with self.command_group('deployment-scripts', resource_deploymentscripts_sdk, resource_type=ResourceType.MGMT_RESOURCE_DEPLOYMENTSCRIPTS) as g:
g.custom_command('list', 'list_deployment_scripts')
g.custom_show_command('show', 'get_deployment_script')
g.custom_command('show-log', 'get_deployment_script_logs')
g.custom_command('delete', 'delete_deployment_script', confirmation=True)
with self.command_group('ts', resource_templatespecs_sdk, resource_type=ResourceType.MGMT_RESOURCE_TEMPLATESPECS, min_api='2019-06-01-preview') as g:
g.custom_command('create', 'create_template_spec', validator=process_ts_create_or_update_namespace)
g.custom_command('update', 'update_template_spec', validator=process_ts_create_or_update_namespace, confirmation=True)
g.custom_command('export', 'export_template_spec', validator=_validate_template_spec_out)
g.custom_show_command('show', 'get_template_spec', validator=_validate_template_spec)
g.custom_command('list', 'list_template_specs')
g.custom_command('delete', 'delete_template_spec', validator=_validate_template_spec, confirmation=True)
# az deployment group
with self.command_group('deployment group', resource_deployment_sdk, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_resource_group', table_transformer=transform_deployments_list)
g.custom_show_command('show', 'get_deployment_at_resource_group', table_transformer=transform_deployment)
g.custom_command('delete', 'delete_deployment_at_resource_group', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template_at_resource_group', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('create', 'deploy_arm_template_at_resource_group', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.custom_command('what-if', 'what_if_deploy_arm_template_at_resource_group', validator=process_deployment_create_namespace,
exception_handler=handle_template_based_exception, min_api='2019-07-01')
g.custom_command('export', 'export_template_at_resource_group')
g.custom_wait_command('wait', 'get_deployment_at_resource_group')
g.custom_command('cancel', 'cancel_deployment_at_resource_group')
with self.command_group('deployment operation group', resource_deployment_operation_sdk, resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployment_operations_at_resource_group')
g.custom_show_command('show', 'get_deployment_operations_at_resource_group', client_factory=cf_deployment_operations)
# az deployment mg
with self.command_group('deployment mg', resource_deployment_sdk, min_api='2019-07-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_management_group', table_transformer=transform_deployments_list)
g.custom_show_command('show', 'get_deployment_at_management_group', table_transformer=transform_deployment)
g.custom_command('delete', 'delete_deployment_at_management_group', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template_at_management_group', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('create', 'deploy_arm_template_at_management_group', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.custom_command('what-if', 'what_if_deploy_arm_template_at_management_group', validator=process_deployment_create_namespace,
exception_handler=handle_template_based_exception, min_api='2019-10-01')
g.custom_command('export', 'export_template_at_management_group')
g.custom_wait_command('wait', 'get_deployment_at_management_group')
g.custom_command('cancel', 'cancel_deployment_at_management_group')
with self.command_group('deployment operation mg', resource_deployment_operation_sdk, min_api='2019-07-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployment_operations_at_management_group')
g.custom_show_command('show', 'get_deployment_operations_at_management_group', client_factory=cf_deployment_operations)
# az deployment tenant
with self.command_group('deployment tenant', resource_deployment_sdk, min_api='2019-07-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployments_at_tenant_scope', table_transformer=transform_deployments_list)
g.custom_show_command('show', 'get_deployment_at_tenant_scope', table_transformer=transform_deployment)
g.custom_command('delete', 'delete_deployment_at_tenant_scope', supports_no_wait=True)
g.custom_command('validate', 'validate_arm_template_at_tenant_scope', validator=process_deployment_create_namespace,
table_transformer=deployment_validate_table_format, exception_handler=handle_template_based_exception)
g.custom_command('create', 'deploy_arm_template_at_tenant_scope', supports_no_wait=True, validator=process_deployment_create_namespace,
table_transformer=transform_deployment, exception_handler=handle_template_based_exception)
g.custom_command('what-if', 'what_if_deploy_arm_template_at_tenant_scope', validator=process_deployment_create_namespace,
exception_handler=handle_template_based_exception, min_api='2019-10-01')
g.custom_command('export', 'export_template_at_tenant_scope')
g.custom_wait_command('wait', 'get_deployment_at_tenant_scope')
g.custom_command('cancel', 'cancel_deployment_at_tenant_scope')
with self.command_group('deployment operation tenant', resource_deployment_operation_sdk, min_api='2019-07-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('list', 'list_deployment_operations_at_tenant_scope')
g.custom_show_command('show', 'get_deployment_operations_at_tenant_scope', client_factory=cf_deployment_operations)
with self.command_group('policy assignment', resource_type=ResourceType.MGMT_RESOURCE_POLICY) as g:
g.custom_command('create', 'create_policy_assignment', validator=process_assignment_create_namespace)
g.custom_command('delete', 'delete_policy_assignment')
g.custom_command('list', 'list_policy_assignment')
g.custom_show_command('show', 'show_policy_assignment')
g.custom_command('update', 'update_policy_assignment')
with self.command_group('policy assignment identity', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2018-05-01') as g:
g.custom_command('assign', 'set_identity', validator=process_assign_identity_namespace, min_api='2021-06-01')
g.custom_show_command('show', 'show_identity')
g.custom_command('remove', 'remove_identity')
with self.command_group('policy assignment non-compliance-message', resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2020-09-01') as g:
g.custom_command('create', 'create_policy_non_compliance_message')
g.custom_command('list', 'list_policy_non_compliance_message')
g.custom_command('delete', 'delete_policy_non_compliance_message')
with self.command_group('policy definition', resource_policy_definitions_sdk, resource_type=ResourceType.MGMT_RESOURCE_POLICY) as g:
g.custom_command('create', 'create_policy_definition')
g.custom_command('delete', 'delete_policy_definition')
g.custom_command('list', 'list_policy_definition')
g.custom_show_command('show', 'get_policy_definition')
g.custom_command('update', 'update_policy_definition')
with self.command_group('policy set-definition', resource_policy_set_definitions_sdk, resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2017-06-01-preview') as g:
g.custom_command('create', 'create_policy_setdefinition')
g.custom_command('delete', 'delete_policy_setdefinition')
g.custom_command('list', 'list_policy_setdefinition')
g.custom_show_command('show', 'get_policy_setdefinition')
g.custom_command('update', 'update_policy_setdefinition')
with self.command_group('policy exemption', resource_policy_exemptions_sdk, is_preview=True, resource_type=ResourceType.MGMT_RESOURCE_POLICY, min_api='2020-09-01') as g:
g.custom_command('create', 'create_policy_exemption')
g.custom_command('delete', 'delete_policy_exemption')
g.custom_command('list', 'list_policy_exemption')
g.custom_show_command('show', 'get_policy_exemption')
g.custom_command('update', 'update_policy_exemption')
with self.command_group('lock', resource_type=ResourceType.MGMT_RESOURCE_LOCKS) as g:
g.custom_command('create', 'create_lock')
g.custom_command('delete', 'delete_lock')
g.custom_command('list', 'list_locks')
g.custom_show_command('show', 'get_lock')
g.custom_command('update', 'update_lock')
with self.command_group('resource link', resource_link_sdk, resource_type=ResourceType.MGMT_RESOURCE_LINKS) as g:
g.custom_command('create', 'create_resource_link')
g.command('delete', 'delete')
g.show_command('show', 'get')
g.custom_command('list', 'list_resource_links')
g.custom_command('update', 'update_resource_link')
with self.command_group('managedapp', resource_managedapp_sdk, min_api='2017-05-10', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('create', 'create_application')
g.command('delete', 'begin_delete')
g.custom_show_command('show', 'show_application')
g.custom_command('list', 'list_applications')
with self.command_group('managedapp definition', resource_managedapp_def_sdk, min_api='2017-05-10', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) as g:
g.custom_command('create', 'create_or_update_applicationdefinition')
g.custom_command('update', 'create_or_update_applicationdefinition')
g.command('delete', 'begin_delete')
g.custom_show_command('show', 'show_applicationdefinition')
g.command('list', 'list_by_resource_group', exception_handler=empty_on_404)
with self.command_group('account management-group', resource_managementgroups_sdk, client_factory=cf_management_groups) as g:
g.custom_command('list', 'cli_managementgroups_group_list')
g.custom_show_command('show', 'cli_managementgroups_group_show')
g.custom_command('create', 'cli_managementgroups_group_create')
g.custom_command('delete', 'cli_managementgroups_group_delete')
g.generic_update_command(
'update',
getter_name='cli_managementgroups_group_update_get',
getter_type=resource_managementgroups_update_type,
setter_name='cli_managementgroups_group_update_set',
setter_type=resource_managementgroups_update_type,
custom_func_name='cli_managementgroups_group_update_custom_func',
custom_func_type=resource_managementgroups_update_type,
exception_handler=managementgroups_exception_handler)
with self.command_group('account management-group subscription', resource_managementgroups_subscriptions_sdk, client_factory=cf_management_group_subscriptions) as g:
g.custom_command('add', 'cli_managementgroups_subscription_add')
g.custom_command('remove', 'cli_managementgroups_subscription_remove')
with self.command_group('bicep') as g:
g.custom_command('install', 'install_bicep_cli')
g.custom_command('uninstall', 'uninstall_bicep_cli')
g.custom_command('upgrade', 'upgrade_bicep_cli')
g.custom_command('build', 'build_bicep_file')
g.custom_command('decompile', 'decompile_bicep_file')
g.custom_command('publish', 'publish_bicep_file')
g.custom_command('version', 'show_bicep_cli_version')
g.custom_command('list-versions', 'list_bicep_cli_versions')
|
|
"""Helper functions for k-medoids algorithms."""
import numpy as np
from numba import jit
def _get_clusters(metric=None, method='memory'):
# if a method requires it, check if a metric is given
if method in ('hybrid', 'cpu') and not metric:
print("Error: with method `{:}` a metric is necessary.")
return
if method == 'memory':
return get_clusters_memory
if method == 'hybrid':
return lambda data, medoids: get_clusters_hybrid(data, medoids, metric)
if method == 'cpu':
return _get_clusters_cpu(metric)
print("Error: method `{:}` unknown.".format(method))
return
def _get_medoid(metric=None, method='memory'):
# if a method requires it, check if a metric is given
if method in ('hybrid', 'cpu') and not metric:
print("Error: with method `{:}` a metric is necessary.")
return
if method == 'memory':
return get_medoid_memory
if method == 'hybrid':
return _get_medoid_hybrid(metric)
if method == 'cpu':
return _get_medoid_cpu(metric)
print("Error: method `{:}` unknown.".format(method))
return
@jit
def get_clusters_memory(diss, medoids):
r"""Compute the clusters induced by the medoids on the dissimilarity matrix.
Parameters
----------
diss : (n, n) ndarray
Squared symmetric dissimilarity matrix.
medoids : (n,) ndarray
Set of medoids, given as index of data objects representing them.
Returns
-------
clusterid : ndarray
An array containing the number of the cluster to which each object was
assigned, where the cluster number is defined as the object number of
the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Notes
-----
Very fast implementation. Requires enough memory to store a n\*n matrix
(that is the dissimilarity matrix, n is the number of data objects).
"""
# take the submatrix in which columns corresponds to the medoids, then take
# the argmin row-wise
clustermem = diss[:, medoids].argmin(axis=1)
# we want a vector with medoid indices with respect to the data and not
# positional indices, i.e. we do not want [0, 1, 2] but
# [med_1, med_2, med_3]
clusterid = np.empty(clustermem.shape[0], dtype=np.uint32)
for i, medoid in enumerate(medoids):
clusterid[clustermem == i] = medoid
# compute also the error
error = diss[:, medoids].min(axis=1).sum()
return clusterid, error
@jit
def get_medoid_memory(diss, cluster):
r"""Compute the medoid of a cluster.
Parameters
----------
diss : (n, n) ndarray
Squared symmetric dissimilarity matrix.
cluster : (n,) ndarray
Set of the indices of all objects belonging to the cluster.
Returns
-------
medoid : int
Index of the object chosen as medoid of the cluster, i.e. it is the
object that minimizes the sum of distances with respect to all the
other cluster members.
Notes
-----
Very fast implementation. Requires enough memory to store a n\*n matrix
(that is the dissimilarity matrix, n is the number of data objects).
"""
medoid = cluster[np.sum(
diss[np.ix_(cluster, cluster)], axis=1
).argmin()]
return medoid
@jit
def get_clusters_hybrid(data, medoids, metric):
r"""Compute the clusters induced by the medoids on data.
Parameters
----------
data : (n,) ndarray
Data set.
medoids : (n,) ndarray
Set of medoids, given as index of data objects representing them.
metric : function
Function to compute pairwise distances.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object was
assigned, where the cluster number is defined as the object number of
the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Notes
-----
Quite fast implementation. Requires enough memory to store a n\*k matrix
(n is the number of data objects and k is the number of clusters).
"""
# make a big matrix that in the i-th row has the distances between the i-th
# object and the medoids
dists = np.zeros((data.shape[0], medoids.shape[0]))
for i, obj in enumerate(data):
for j, med in enumerate(medoids):
if i != med:
dists[i, j] = metric(obj, data[med])
# take the index corresponding to the medoid with minimum distance from the
# object
clustermem = dists.argmin(axis=1)
# we want a vector with medoid indices with respect to the data and not
# positional indices, i.e. we do not want [0, 1, 2] but
# [med_1, med_2, med_3]
clusterid = np.empty(clustermem.shape[0], dtype=np.uint32)
for i, medoid in enumerate(medoids):
clusterid[clustermem == i] = medoid
# take the minimum row-wise and sum the resulting vector to get the error
error = dists.min(axis=1).sum()
return clusterid, error
def _get_medoid_hybrid(metric):
@jit(nopython=True)
def get_medoid_hybrid(data, cluster):
r"""Compute the medoid of a cluster.
Parameters
----------
data : (n,) ndarray
Data set.
cluster : (n,) ndarray
Set of the indices of all objects belonging to the cluster.
metric : function
Function to compute pairwise distances.
Returns
-------
medoid : int
Index of the object chosen as medoid of the cluster, i.e. it is the
object that minimizes the sum of distances with respect to all the
other cluster members.
Notes
-----
Quite fast implementation. Requires enough memory to store a m\*m
matrix (m is the size of the given cluster).
"""
# make a dissimilarity matrix of the cluster passed in
m = cluster.shape[0]
diss = np.zeros((m, m))
for i in range(m):
for j in range(i+1):
dist = metric(data[cluster[i]], data[cluster[j]])
diss[i, j] = dist
diss[j, i] = dist
# then take the sum by row and choose the cluster member that minimizes
# it
medoid = cluster[diss.sum(axis=1).argmin()]
return medoid
return get_medoid_hybrid
def _get_clusters_cpu(metric):
@jit(nopython=True)
def get_clusters_cpu(data, medoids):
"""Compute the clusters induced by the medoids on data.
Parameters
----------
data : (n,) ndarray
Data set.
medoids : (n,) ndarray
Set of medoids, given as index of data objects representing them.
metric : function
Function to compute pairwise distances.
Returns
-------
clusterid : (n,) ndarray
An array containing the number of the cluster to which each object
was assigned, where the cluster number is defined as the object
number of the objects representing the cluster centroid.
error : float
The within-cluster sum of distances of the clustering solution.
Notes
-----
Slowest implementation. Does not require to store matrices in memory.
Version to let `numba` run in `nopython` mode (faster).
"""
n = data.shape[0]
k = medoids.shape[0]
clusterid = np.empty(n, dtype=np.uint32)
error = 0
for i in range(n):
# select the cluster whom medoid is closest to the current object
min_dist = np.inf
min_j = -1
for j in range(k):
if i == medoids[j]:
# if the object is a medoid, its cluster will not change
# hence end the loop
min_dist = 0
min_j = j
break
else:
dist = metric(data[i], data[medoids[j]])
if dist < min_dist:
min_dist = dist
min_j = j
clusterid[i] = medoids[min_j]
error += min_dist
return clusterid, error
return get_clusters_cpu
def _get_medoid_cpu(metric):
@jit(nopython=True)
def get_medoid_cpu(data, cluster):
"""Compute the medoid of a cluster.
Parameters
----------
data : (n,) ndarray
Data set.
cluster : (n,) ndarray
Set of the indices of all objects belonging to the cluster.
metric : function
Function to compute pairwise distances.
Returns
-------
medoid : int
Index of the object chosen as medoid of the cluster, i.e. it is the
object that minimizes the sum of distances with respect to all the
other cluster members.
Notes
-----
Slowest implementation. Does not require to store matrices in memory.
Version to let `numba` run in `nopython` mode (faster).
"""
min_dist = np.inf
medoid = -1
for prop in cluster:
# for each proposed medoid, compute the sum of distances between it
# and each other cluster member
dist = 0
for j in cluster:
if prop != j:
dist += metric(data[prop], data[j])
# retain it only if it has a lower sum of distances
if dist < min_dist:
min_dist = dist
medoid = prop
return medoid
return get_medoid_cpu
|
|
try: # Python 3
import http.client as httplib
from urllib.parse import parse_qsl
except ImportError: # Python 2
import httplib
from urlparse import parse_qsl
import textwrap
import json
import os
from .exceptions import QuickbooksException, SevereException, AuthorizationException
try:
from rauth import OAuth1Session, OAuth1Service
except ImportError:
print("Please import Rauth:\n\n")
print("http://rauth.readthedocs.org/en/latest/\n")
raise
class QuickBooks(object):
"""A wrapper class around Python's Rauth module for Quickbooks the API"""
access_token = ''
access_token_secret = ''
consumer_key = ''
consumer_secret = ''
company_id = 0
callback_url = ''
session = None
sandbox = False
minorversion = None
qbService = None
sandbox_api_url_v3 = "https://sandbox-quickbooks.api.intuit.com/v3"
api_url_v3 = "https://quickbooks.api.intuit.com/v3"
request_token_url = "https://oauth.intuit.com/oauth/v1/get_request_token"
access_token_url = "https://oauth.intuit.com/oauth/v1/get_access_token"
authorize_url = "https://appcenter.intuit.com/Connect/Begin"
current_user_url = "https://appcenter.intuit.com/api/v1/user/current"
disconnect_url = "https://appcenter.intuit.com/api/v1/connection/disconnect"
reconnect_url = "https://appcenter.intuit.com/api/v1/connection/reconnect"
request_token = ''
request_token_secret = ''
_BUSINESS_OBJECTS = [
"Account", "Attachable", "Bill", "BillPayment",
"Class", "CreditMemo", "Customer",
"Department", "Deposit", "Employee", "Estimate", "Invoice",
"Item", "JournalEntry", "Payment", "PaymentMethod",
"Purchase", "PurchaseOrder", "RefundReceipt",
"SalesReceipt", "TaxCode", "TaxService/Taxcode", "TaxRate", "Term",
"TimeActivity", "Transfer", "Vendor", "VendorCredit"
]
__instance = None
__use_global = False
def __new__(cls, **kwargs):
"""
If global is disabled, don't set global client instance.
"""
if QuickBooks.__use_global:
if QuickBooks.__instance is None:
QuickBooks.__instance = object.__new__(cls)
instance = QuickBooks.__instance
else:
instance = object.__new__(cls)
if 'consumer_key' in kwargs:
instance.consumer_key = kwargs['consumer_key']
if 'consumer_secret' in kwargs:
instance.consumer_secret = kwargs['consumer_secret']
if 'access_token' in kwargs:
instance.access_token = kwargs['access_token']
if 'access_token_secret' in kwargs:
instance.access_token_secret = kwargs['access_token_secret']
if 'company_id' in kwargs:
instance.company_id = kwargs['company_id']
if 'callback_url' in kwargs:
instance.callback_url = kwargs['callback_url']
if 'sandbox' in kwargs:
instance.sandbox = kwargs['sandbox']
if 'minorversion' in kwargs:
instance.minorversion = kwargs['minorversion']
return instance
@classmethod
def get_instance(cls):
return cls.__instance
@classmethod
def disable_global(cls):
"""
Disable use of singleton pattern.
"""
QuickBooks.__use_global = False
QuickBooks.__instance = None
@classmethod
def enable_global(cls):
"""
Allow use of singleton pattern.
"""
QuickBooks.__use_global = True
def _drop(self):
QuickBooks.__instance = None
@property
def api_url(self):
if self.sandbox:
return self.sandbox_api_url_v3
else:
return self.api_url_v3
def create_session(self):
if self.consumer_secret and self.consumer_key and self.access_token_secret and self.access_token:
session = OAuth1Session(
self.consumer_key,
self.consumer_secret,
self.access_token,
self.access_token_secret,
)
self.session = session
else:
raise QuickbooksException("Quickbooks authenication fields not set. Cannot create session.")
return self.session
def get_authorize_url(self):
"""
Returns the Authorize URL as returned by QB, and specified by OAuth 1.0a.
:return URI:
"""
self.authorize_url = self.authorize_url[:self.authorize_url.find('?')] \
if '?' in self.authorize_url else self.authorize_url
if self.qbService is None:
self.set_up_service()
response = self.qbService.get_raw_request_token(
params={'oauth_callback': self.callback_url})
oauth_resp = dict(parse_qsl(response.text))
self.request_token = oauth_resp['oauth_token']
self.request_token_secret = oauth_resp['oauth_token_secret']
return self.qbService.get_authorize_url(self.request_token)
def get_current_user(self):
'''Get data from the current user endpoint'''
url = self.current_user_url
result = self.make_request("GET", url)
return result
def get_report(self, report_type, qs=None):
'''Get data from the report endpoint'''
if qs == None:
qs = {}
url = self.api_url + "/company/{0}/reports/{1}".format(self.company_id, report_type)
result = self.make_request("GET", url, params=qs)
return result
def set_up_service(self):
self.qbService = OAuth1Service(
name=None,
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
request_token_url=self.request_token_url,
access_token_url=self.access_token_url,
authorize_url=self.authorize_url,
base_url=None
)
def get_access_tokens(self, oauth_verifier):
"""
Wrapper around get_auth_session, returns session, and sets access_token and
access_token_secret on the QB Object.
:param oauth_verifier: the oauth_verifier as specified by OAuth 1.0a
"""
session = self.qbService.get_auth_session(
self.request_token,
self.request_token_secret,
data={'oauth_verifier': oauth_verifier})
self.access_token = session.access_token
self.access_token_secret = session.access_token_secret
return session
def disconnect_account(self):
"""
Disconnect current account from the application
:return:
"""
url = self.disconnect_url
result = self.make_request("GET", url)
return result
def reconnect_account(self):
"""
Reconnect current account by refreshing OAuth access tokens
:return:
"""
url = self.reconnect_url
result = self.make_request("GET", url)
return result
def make_request(self, request_type, url, request_body=None, content_type='application/json',
params=None, file_path=None):
if not params:
params = {}
if self.minorversion:
params['minorversion'] = self.minorversion
if not request_body:
request_body = {}
if self.session is None:
self.create_session()
headers = {
'Content-Type': content_type,
'Accept': 'application/json'
}
if file_path:
attachment = open(file_path, 'rb')
url = url.replace('attachable', 'upload')
boundary = '-------------PythonMultipartPost'
headers.update({
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Accept-Encoding': 'gzip;q=1.0,deflate;q=0.6,identity;q=0.3',
'User-Agent': 'OAuth gem v0.4.7',
'Accept': 'application/json',
'Connection': 'close'
})
binary_data = attachment.read()
request_body = textwrap.dedent(
"""
--%s
Content-Disposition: form-data; name="file_metadata_01"
Content-Type: application/json
%s
--%s
Content-Disposition: form-data; name="file_content_01"
Content-Type: application/pdf
%s
--%s--
"""
) % (boundary, request_body, boundary, binary_data, boundary)
req = self.session.request(
request_type, url, True, self.company_id, headers=headers, params=params, data=request_body)
if req.status_code == httplib.UNAUTHORIZED:
raise AuthorizationException("Application authentication failed", detail=req.text)
try:
result = req.json()
except:
raise QuickbooksException("Error reading json response: {0}".format(req.text), 10000)
if "Fault" in result:
self.handle_exceptions(result["Fault"])
elif not req.status_code == httplib.OK:
raise QuickbooksException("Error returned with status code '{0}': {1}".format(
req.status_code, req.text), 10000)
else:
return result
def get_single_object(self, qbbo, pk):
url = self.api_url + "/company/{0}/{1}/{2}/".format(self.company_id, qbbo.lower(), pk)
result = self.make_request("GET", url, {})
return result
def handle_exceptions(self, results):
# Needs to handle multiple errors
for error in results["Error"]:
message = error["Message"]
detail = ""
if "Detail" in error:
detail = error["Detail"]
code = ""
if "code" in error:
code = int(error["code"])
if code >= 10000:
raise SevereException(message, code, detail)
else:
raise QuickbooksException(message, code, detail)
def create_object(self, qbbo, request_body, _file_path=None):
self.isvalid_object_name(qbbo)
url = self.api_url + "/company/{0}/{1}".format(self.company_id, qbbo.lower())
results = self.make_request("POST", url, request_body, file_path=_file_path)
return results
def query(self, select):
url = self.api_url + "/company/{0}/query".format(self.company_id)
result = self.make_request("POST", url, select, content_type='application/text')
return result
def isvalid_object_name(self, object_name):
if object_name not in self._BUSINESS_OBJECTS:
raise Exception("{0} is not a valid QBO Business Object.".format(object_name))
return True
def update_object(self, qbbo, request_body, _file_path=None):
url = self.api_url + "/company/{0}/{1}".format(self.company_id, qbbo.lower())
result = self.make_request("POST", url, request_body, file_path=_file_path)
return result
def batch_operation(self, request_body):
url = self.api_url + "/company/{0}/batch".format(self.company_id)
results = self.make_request("POST", url, request_body)
return results
def download_pdf(self, qbbo, item_id):
url = self.api_url + "/company/{0}/{1}/{2}/pdf".format(self.company_id, qbbo.lower(), item_id)
if self.session is None:
self.create_session()
headers = {
'Content-Type': 'application/pdf',
'Accept': 'application/pdf, application/json',
}
response = self.session.request("GET", url, True, self.company_id, headers=headers)
if response.status_code != httplib.OK:
try:
json = response.json()
except:
raise QuickbooksException("Error reading json response: {0}".format(response.text), 10000)
self.handle_exceptions(json["Fault"])
else:
return response.content
|
|
__author__ = 'tylin'
__version__ = '1.0.1'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
import urllib
import copy
import itertools
import mask
import os
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset = {}
self.anns = []
self.imgToAnns = {}
self.catToImgs = {}
self.imgs = {}
self.cats = {}
if not annotation_file == None:
print 'loading annotations into memory...'
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
print 'Done (t=%0.2fs)'%(time.time()- tic)
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print 'creating index...'
anns = {}
imgToAnns = {}
catToImgs = {}
cats = {}
imgs = {}
if 'annotations' in self.dataset:
imgToAnns = {ann['image_id']: [] for ann in self.dataset['annotations']}
anns = {ann['id']: [] for ann in self.dataset['annotations']}
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
if 'images' in self.dataset:
imgs = {im['id']: {} for im in self.dataset['images']}
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
cats = {cat['id']: [] for cat in self.dataset['categories']}
for cat in self.dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in self.dataset['categories']}
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
# this can be changed by defaultdict
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for catId in catIds:
if len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
if datasetType == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = mask.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = mask.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print ann['caption']
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# res.dataset['info'] = copy.deepcopy(self.dataset['info'])
# res.dataset['licenses'] = copy.deepcopy(self.dataset['licenses'])
print 'Loading and preparing results... '
tic = time.time()
anns = json.load(open(resFile))
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = mask.area([ann['segmentation']])[0]
if not 'bbox' in ann:
ann['bbox'] = mask.toBbox([ann['segmentation']])[0]
ann['id'] = id+1
ann['iscrowd'] = 0
print 'DONE (t=%0.2fs)'%(time.time()- tic)
res.dataset['annotations'] = anns
res.createIndex()
return res
def download( self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print 'Please specify target directory'
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urllib.urlretrieve(img['coco_url'], fname)
print 'downloaded %d/%d images (t=%.1fs)'%(i, N, time.time()- tic)
|
|
# Python C++ Compiler Invocation Library
# Copyright 2014 Joshua Buckman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import re
from .compiler import compiler
class visual_cpp(compiler):
split_includes_text_index = 0
split_includes_deps_index = 1
def host(self):
return 'Windows'
def target_family(self):
return 'windows'
def object_details(self, source_extension):
if source_extension == '.cpp' or source_extension == '.c':
return ('.obj', True)
elif source_extension == '.rc':
return ('.res', False)
else:
self.handle_error("error: Invalid source extension %1" % source_extension)
def check_for_winsdk_in_key(self, winreg, key):
# All of the Windows SDKs installed on the machine get their own sub-key here.
done_key_enum = False
version_keys = []
key_info = winreg.QueryInfoKey(key)
for index in range(key_info[0]):
version_key = winreg.EnumKey(key, index)
version_keys.append(version_key)
# Select the version: The most recent v7.x SDK.
selected_version = 0
# TODO
version_key = winreg.OpenKey(key, version_keys[selected_version])
return winreg.QueryValueEx(version_key, 'InstallationFolder')[0]
def find_winsdk(self):
# Try to locate the Windows SDK. This is a heuristic, at best.
winreg = __import__('_winreg')
self.winsdk_dir = None
try:
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'SOFTWARE\Microsoft\Windows Kits\Installed Roots')
try:
self.winsdk_dir = winreg.QueryValueEx(key, 'KitsRoot81')[0]
self.winsdk_new_layout = True
except:
pass
if not self.winsdk_dir:
self.winsdk_dir = winreg.QueryValueEx(key, 'KitsRoot')[0]
self.winsdk_new_layout = True
except:
pass
if not self.winsdk_dir:
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\Windows Kits\Installed Roots')
try:
self.winsdk_dir = winreg.QueryValueEx(key, 'KitsRoot81')[0]
self.winsdk_new_layout = True
except:
pass
if not self.winsdk_dir:
self.winsdk_dir = winreg.QueryValueEx(key, 'KitsRoot')[0]
self.winsdk_new_layout = True
except:
pass
if not self.winsdk_dir:
try:
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'SOFTWARE\Microsoft\Microsoft SDKs\Windows')
self.winsdk_dir = self.check_for_winsdk_in_key(winreg, key)
self.winsdk_new_layout = False
except:
pass
if not self.winsdk_dir:
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\Microsoft SDKs\Windows')
self.winsdk_dir = self.check_for_winsdk_in_key(winreg, key)
self.winsdk_new_layout = False
except:
pass
def detect(self):
vs_common_tools_var = self.get_vs_common_tools_var()
if not vs_common_tools_var in os.environ:
self.print_console("error: Visual C++ can not be located ({0} is not set.)".format(vs_common_tools_var))
return False
vs_common_tools = os.environ[vs_common_tools_var]
if not os.path.isdir(vs_common_tools):
self.print_console("error: Visual C++ can not be located ({0} is not valid.)".format(vs_common_tools_var))
return False
self.tool_dir = os.path.abspath(os.path.join(vs_common_tools, os.pardir, os.pardir, 'VC'))
if not os.path.isdir(self.tool_dir):
self.print_console("error: Visual C++ can not be located (VC directory not found.)")
return False
self.builtin_include_list = []
self.builtin_libpath_list = []
# Add Windows SDK stuff.
self.find_winsdk()
if not self.winsdk_dir or not os.path.isdir(self.winsdk_dir):
self.print_console("error: Windows SDK can not be located.")
return False
# Find the MFC/ATL directory, if it's there.
mfcatl_dir = os.path.join(self.tool_dir, 'atlmfc')
if os.path.isdir(mfcatl_dir):
self.mfcatl_dir = mfcatl_dir
# Make sure to remove the magic environment variable to allow compiler
# output to be redirected even from within the IDE.
if 'VS_UNICODE_OUTPUT' in os.environ:
del(os.environ['VS_UNICODE_OUTPUT'])
return True
def default_x86_tools(self):
self.cl = os.path.join(self.tool_dir, 'bin', 'cl.exe')
self.link = os.path.join(self.tool_dir, 'bin', 'link.exe')
self.lib = os.path.join(self.tool_dir, 'bin', 'lib.exe')
if self.mfcatl_dir:
self.builtin_include_list.append(os.path.join(self.mfcatl_dir, 'include'))
self.builtin_libpath_list.append(os.path.join(self.mfcatl_dir, 'lib'))
if self.winsdk_new_layout:
# Windows 8 and newer SDKs
self.rc = os.path.join(self.winsdk_dir, 'bin', 'x86', 'rc.exe')
self.builtin_include_list.append(os.path.join(self.winsdk_dir, 'Include', 'shared'))
self.builtin_include_list.append(os.path.join(self.winsdk_dir, 'Include', 'um'))
self.builtin_libpath_list.append(os.path.join(self.winsdk_dir, 'Lib', 'winv6.3', 'um', 'x86'))
else:
# Windows 7 and older SDKs
self.rc = os.path.join(self.winsdk_dir, 'bin', 'rc.exe')
self.builtin_include_list.append(os.path.join(self.winsdk_dir, 'include'))
self.builtin_libpath_list.append(os.path.join(self.winsdk_dir, 'lib'))
# Add the C standard library
self.builtin_include_list.append(os.path.join(self.tool_dir, 'include'))
self.builtin_libpath_list.append(os.path.join(self.tool_dir, 'lib'))
# Visual C++ needs to access dlls scattered through the installation, so add to the PATH.
path = os.environ['PATH']
if path[-1] != ';':
path += ';'
path += os.path.abspath(os.path.join(self.tool_dir, os.pardir, 'Common7', 'IDE'))
path += ';'
path += os.path.abspath(os.path.join(self.tool_dir, 'bin'))
os.environ['PATH'] = path
return True
def default_x64_tools(self):
host_proc = os.environ['PROCESSOR_ARCHITECTURE']
if host_proc != 'AMD64' and host_proc != 'x86':
self.print_both("error: Only x86 and x64 host processor architectures are supported for Visual C++.")
return False
if host_proc == 'AMD64':
self.cl = os.path.join(self.tool_dir, 'bin', 'amd64', 'cl.exe')
self.link = os.path.join(self.tool_dir, 'bin', 'amd64', 'link.exe')
self.lib = os.path.join(self.tool_dir, 'bin', 'amd64', 'lib.exe')
elif host_proc == 'x86':
self.cl = os.path.join(self.tool_dir, 'bin', 'x86_amd64', 'cl.exe')
self.link = os.path.join(self.tool_dir, 'bin', 'x86_amd64', 'link.exe')
self.lib = os.path.join(self.tool_dir, 'bin', 'x86_amd64', 'lib.exe')
if self.mfcatl_dir:
self.builtin_include_list.append(os.path.join(self.mfcatl_dir, 'include'))
self.builtin_libpath_list.append(os.path.join(self.mfcatl_dir, 'lib', 'amd64'))
if self.winsdk_new_layout:
# Windows 8 and newer SDKs
self.rc = os.path.join(self.winsdk_dir, 'bin', 'x86', 'rc.exe')
self.builtin_include_list.append(os.path.join(self.winsdk_dir, 'Include', 'shared'))
self.builtin_include_list.append(os.path.join(self.winsdk_dir, 'Include', 'um'))
self.builtin_libpath_list.append(os.path.join(self.winsdk_dir, 'Lib', 'winv6.3', 'um', 'x64'))
else:
# Windows 7 and older SDKs
self.rc = os.path.join(self.winsdk_dir, 'bin', 'rc.exe')
self.builtin_include_list.append(os.path.join(self.winsdk_dir, 'include'))
self.builtin_libpath_list.append(os.path.join(self.winsdk_dir, 'lib', 'x64'))
# Add the C standard library
self.builtin_include_list.append(os.path.join(self.tool_dir, 'include'))
self.builtin_libpath_list.append(os.path.join(self.tool_dir, 'lib', 'amd64'))
# Visual C++ needs to access dlls scattered through the installation, so add to the PATH.
path = os.environ['PATH']
if path[-1] != ';':
path += ';'
path += os.path.abspath(os.path.join(self.tool_dir, os.pardir, 'Common7', 'IDE'))
path += ';'
if host_proc == 'AMD64':
path += os.path.abspath(os.path.join(self.tool_dir, 'bin', 'amd64'))
elif host_proc == 'x86':
path += os.path.abspath(os.path.join(self.tool_dir, 'bin'))
os.environ['PATH'] = path
return True
def compile(self, name, config, output_dir, rebuild_list, include_list, define_list):
# Build the basic compiler invocation command line arguments
compile_flags = ['"' + self.cl + '"',
'/nologo', # do not output complier version string
'/c', # compile only. No link on cl.exe invoke
'/W4', # set the warning level to the maximum
'/WX', # treat warnings as errors
'/Zi', # generate full symbol information (pdb)
'/EHsc', # enable C++ exception handling (and assume extern "C" is nothrow)
'/fp:fast', # sacrifice 100% float compliance for speed
'/showIncludes', # print include files to stderr
'/TP', # compile everything as C++
'/X'] # ignore standard include paths
compile_flags.extend(self.target_compile_flags())
if config == 'debug':
compile_flags.extend(['/Od', # disable optimizations
'/MTd', # multithreaded c++ debug library
'/RTCscu']) # enable all runtime checking
else:
compile_flags.extend(['/MT', # multithreaded c++ library
'/GL', # whole program optimizations
'/O1', # maximum speed optimization
'/GS-']) # disable stack overflow checking
rc_flags = ['"' + self.rc + '"',
'/nologo', # do not output rc version string
'/X'] # Ignore standard include poaths
for define in define_list:
compile_flags.append('/D' + define)
rc_flags.append('/D' + define)
for include_dir in include_list:
compile_flags.append('/I"' + include_dir + '"')
rc_flags.append('/I"' + include_dir + '"')
for include_dir in self.builtin_include_list:
compile_flags.append('/I"' + include_dir + '"')
rc_flags.append('/I"' + include_dir + '"')
compile_flags.append('/Fd"' + os.path.join(output_dir, name + '.pdb"'))
did_pch = False
did_rc = False
for r in rebuild_list:
source_split = os.path.split(r.source)
source_name_split = os.path.splitext(source_split[1])
source_base_name = source_name_split[0]
source_extension = source_name_split[1]
if source_base_name.lower() == 'precomp':
if did_pch:
self.handle_error("error: found multiple precompiled header source files")
# Super-naive C++ parsing; assume the precompiled header source file includes
# ONE file only.
precomp_match = None
with open(r.source, 'r') as precomp_file:
precomp_text = precomp_file.read()
precomp_match = re.search(r'#include\s*[<"](.*)[>"]', precomp_text)
if not precomp_match:
self.handle_error("error: Can not parse precompiled header source file")
precompiled_header = precomp_match.group(1)
precompiled_binary = os.path.join(output_dir, name + '.intermediates', source_base_name + '.pch')
invocation_flags = copy.copy(compile_flags)
invocation_flags.extend(['/Yc"' + precompiled_header + '"',
'/Fp"' + precompiled_binary + '"',
'/Fo"' + r.obj + '"',
'"' + r.source + '"'])
compile_flags.extend(['/Yu"' + precompiled_header + '"',
'/Fp"' + precompiled_binary + '"'])
# Run it
self.print_both("building precompiled header")
i = self.invoke(invocation_flags)
self.handle_compiler_invoke_result(i, r.dep)
# Do not compile the precompiled header source file again
rebuild_list.remove(r)
did_pch = True
elif source_extension.lower() == '.rc':
if did_rc:
# This is a Microsoft linker limitation
self.handle_error("error: found multiple resource source files")
invocation_flags = copy.copy(rc_flags)
invocation_flags.extend(['/Fo"' + r.obj + '"',
'"' + r.source + '"'])
# Run it
self.print_both("resource compile %s" % source_split[1])
i = self.invoke(invocation_flags)
if i.return_val != 0:
self.handle_error(i.stdout)
rebuild_list.remove(r)
did_rc = True
for r in rebuild_list:
# Finish the flags for this particular compiler invocation
invocation_flags = copy.copy(compile_flags)
invocation_flags.extend(['/Fo"' + r.obj + '"',
'"' + r.source + '"'])
# Run it
self.print_both("compiling %s" % os.path.basename(r.source))
i = self.invoke(invocation_flags)
self.handle_compiler_invoke_result(i, r.dep)
def handle_compiler_invoke_result(self, i, deps_file_name):
# Visual C++ interleaves the header list we use for deps files into the normal output
stdout_split = self.split_cl_output(i.stdout)
if i.return_val != 0:
self.handle_error(stdout_split[visual_cpp.split_includes_text_index])
# Write the dependent information into the .dep file
with open(deps_file_name, 'w') as deps_file:
deps_file.write(stdout_split[visual_cpp.split_includes_deps_index])
def split_cl_output(self, compiler_output):
# Parse out the dependent header file information. Remmove duplicates.
headers = []
text = []
unique_headers = set()
compiler_output_lines = compiler_output.splitlines()
for line in compiler_output_lines:
partiton = line.partition('Note: including file:')
if partiton[1]:
header = partiton[2].strip().lower()
if not header in unique_headers:
unique_headers.add(header)
headers.append(header)
else:
text.append(partiton[0])
return ('\n'.join(text), '\n'.join(headers))
def link_static_lib(self, name, output_dir, config, built_code):
lib_name = self.get_lib_name(name)
lib_path = os.path.join(output_dir, lib_name)
if not built_code and os.path.isfile(lib_path):
self.print_both("%s is up to date" % lib_name)
return
lib_flags = ['"' + self.lib + '"']
object_code_dir = os.path.join(output_dir, name + '.intermediates', "obj")
for root, dirs, files in os.walk(object_code_dir, topdown=True):
for filename in files:
lib_flags.append('"' + os.path.join(root, filename) + '"')
lib_flags.append('/OUT:"' + lib_path + '"')
self.print_both("linking %s" % lib_name)
i = self.invoke(lib_flags)
if i.return_val != 0:
self.handle_error(i.stdout)
def link_module(self, name, output_dir, config, built_code, link_module_type, libpath_list, lib_list):
link_name = self.get_link_name(name, link_module_type)
link_path = os.path.join(output_dir, link_name)
link_libpath_list = copy.copy(libpath_list)
link_libpath_list.extend(self.builtin_libpath_list)
if not built_code and not self.check_for_link_update(link_path, link_libpath_list, lib_list):
self.print_both("%s is up to date" % link_name)
return
link_flags = ['"' + self.link + '"',
'/NOLOGO', # do not output linker version string
'/WX', # treat link warnings as errors
'/INCREMENTAL:NO', # control incremental linking
'/MAP', # generate a .map output file
'/DEBUG', # generate debug information
'/NODEFAULTLIB', # ignore default libs
'/SWAPRUN:NET', # ensure the image is copied to memory when loaded over CD or net
'/SWAPRUN:CD',
'/DYNAMICBASE', # use address space layout randomization
'/NXCOMPAT', # compatible with Data Execution Prevention
'/MANIFEST'] # generate manifest with default UAC and SxS settings
link_flags.extend(self.target_link_flags(link_module_type))
if config != 'debug':
link_flags.extend(['/OPT:REF', # remove unreferenced comdats
'/OPT:ICF', # identical comdat folding
'/LTCG']) # link-time code generation
if config == 'ship':
link_flags.append('/RELEASE') # set the checksum in the image header
for libpath_dir in link_libpath_list:
link_flags.append('/LIBPATH:"' + libpath_dir + '"')
link_flags.append('/OUT:"' + link_path + '"')
object_code_dir = os.path.join(output_dir, name + '.intermediates', "obj")
for root, dirs, files in os.walk(object_code_dir, topdown=True):
for filename in files:
link_flags.append('"' + os.path.join(root, filename) + '"')
for lib in lib_list:
link_flags.append(self.get_lib_name(lib))
if config == 'debug':
link_flags.extend(['libcpmtd.lib', 'libcmtd.lib'])
else:
link_flags.extend(['libcpmt.lib', 'libcmt.lib'])
self.print_both("linking %s" % link_name)
i = self.invoke(link_flags)
if i.return_val != 0:
self.handle_error(i.stdout)
def get_lib_name(self, name):
return name + '.lib'
def get_link_name(self, name, link_module_type):
if link_module_type == compiler.link_module_type_shared:
return name + '.dll'
elif link_module_type == compiler.link_module_type_application:
return name + '.exe'
else:
self.handle_error("error: invalid module link type")
def target_link_flags(self, link_module_type):
link_flags = self.machine_link_flags()
if link_module_type == compiler.link_module_type_shared:
link_flags.append('/DLL')
elif link_module_type == compiler.link_module_type_application:
link_flags.append('/SUBSYSTEM:CONSOLE')
else:
self.handle_error("error: invalid module link type")
return link_flags
class visual_cpp_2008(visual_cpp):
def get_vs_common_tools_var(self):
return 'VS90COMNTOOLS'
class visual_cpp_2008_x86(visual_cpp_2008):
def target_proc(self):
return 'x86'
def detect(self):
if not visual_cpp_2008.detect(self):
return False
return self.default_x86_tools()
def target_compile_flags(self):
return ['/arch:SSE2']
def machine_link_flags(self):
return ['/MACHINE:X86']
class visual_cpp_2008_x64(visual_cpp_2008):
def target_proc(self):
return 'x64'
def detect(self):
if not visual_cpp_2008.detect(self):
return False
return self.default_x64_tools()
def target_compile_flags(self):
return []
def machine_link_flags(self):
return ['/MACHINE:X64']
class visual_cpp_2010(visual_cpp):
def get_vs_common_tools_var(self):
return 'VS100COMNTOOLS'
class visual_cpp_2010_x86(visual_cpp_2010):
def target_proc(self):
return 'x86'
def detect(self):
if not visual_cpp_2010.detect(self):
return False
return self.default_x86_tools()
def target_compile_flags(self):
return ['/arch:SSE2']
def machine_link_flags(self):
return ['/MACHINE:X86']
class visual_cpp_2010_x64(visual_cpp_2010):
def target_proc(self):
return 'x64'
def detect(self):
if not visual_cpp_2010.detect(self):
return False
return self.default_x64_tools()
def target_compile_flags(self):
return []
def machine_link_flags(self):
return ['/MACHINE:X64']
class visual_cpp_2013(visual_cpp):
def get_vs_common_tools_var(self):
return 'VS120COMNTOOLS'
class visual_cpp_2013_x86(visual_cpp_2013):
def target_proc(self):
return 'x86'
def detect(self):
if not visual_cpp_2013.detect(self):
return False
return self.default_x86_tools()
def target_compile_flags(self):
return ['/arch:SSE2']
def machine_link_flags(self):
return ['/MACHINE:X86']
class visual_cpp_2013_x64(visual_cpp_2013):
def target_proc(self):
return 'x64'
def detect(self):
if not visual_cpp_2013.detect(self):
return False
return self.default_x64_tools()
def target_compile_flags(self):
return []
def machine_link_flags(self):
return ['/MACHINE:X64']
|
|
"""Class to perform under-sampling based on one-sided selection method."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from __future__ import division
from collections import Counter
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import check_random_state, safe_indexing
from ..base import BaseCleaningSampler
from .tomek_links import TomekLinks
class OneSidedSelection(BaseCleaningSampler):
"""Class to perform under-sampling based on one-sided selection method.
Read more in the :ref:`User Guide <condensed_nearest_neighbors>`.
Parameters
----------
ratio : str, dict, or callable, optional (default='auto')
Ratio to use for resampling the data set.
- If ``str``, has to be one of: (i) ``'minority'``: resample the
minority class; (ii) ``'majority'``: resample the majority class,
(iii) ``'not minority'``: resample all classes apart of the minority
class, (iv) ``'all'``: resample all classes, and (v) ``'auto'``:
correspond to ``'all'`` with for over-sampling methods and ``'not
minority'`` for under-sampling methods. The classes targeted will be
over-sampled or under-sampled to achieve an equal number of sample
with the majority or minority class.
- If ``dict``, the keys correspond to the targeted classes. The values
correspond to the desired number of samples.
- If callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples.
.. warning::
This algorithm is a cleaning under-sampling method. When providing a
``dict``, only the targeted classes will be used; the number of
samples will be discarded.
return_indices : bool, optional (default=False)
Whether or not to return the indices of the samples randomly
selected from the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, ``random_state`` is the seed used by the random number
generator; If ``RandomState`` instance, random_state is the random
number generator; If ``None``, the random number generator is the
``RandomState`` instance used by ``np.random``.
n_neighbors : int or object, optional (default=\
KNeighborsClassifier(n_neighbors=1))
If ``int``, size of the neighbourhood to consider to compute the
nearest neighbors. If object, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be used to
find the nearest-neighbors.
n_seeds_S : int, optional (default=1)
Number of samples to extract in order to build the set S.
n_jobs : int, optional (default=1)
The number of threads to open if possible.
Notes
-----
The method is based on [1]_.
Supports mutli-class resampling. A one-vs.-one scheme is used when sampling
a class as proposed in [1]_. For each class to be sampled, all samples of
this class and the minority class are used during the sampling procedure.
See
:ref:`sphx_glr_auto_examples_under-sampling_plot_one_sided_selection.py`
References
----------
.. [1] M. Kubat, S. Matwin, "Addressing the curse of imbalanced training
sets: one-sided selection," In ICML, vol. 97, pp. 179-186, 1997.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.under_sampling import \
OneSidedSelection # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> oss = OneSidedSelection(random_state=42)
>>> X_res, y_res = oss.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({1: 495, 0: 100})
"""
def __init__(self,
ratio='auto',
return_indices=False,
random_state=None,
n_neighbors=None,
n_seeds_S=1,
n_jobs=1):
super(OneSidedSelection, self).__init__(ratio=ratio)
self.random_state = random_state
self.return_indices = return_indices
self.n_neighbors = n_neighbors
self.n_seeds_S = n_seeds_S
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Private function to create the NN estimator"""
if self.n_neighbors is None:
self.estimator_ = KNeighborsClassifier(
n_neighbors=1, n_jobs=self.n_jobs)
elif isinstance(self.n_neighbors, int):
self.estimator_ = KNeighborsClassifier(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs)
elif isinstance(self.n_neighbors, KNeighborsClassifier):
self.estimator_ = self.n_neighbors
else:
raise ValueError('`n_neighbors` has to be a int or an object'
' inhereited from KNeighborsClassifier.'
' Got {} instead.'.format(type(self.n_neighbors)))
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
idx_under : ndarray, shape (n_samples, )
If `return_indices` is `True`, a boolean array will be returned
containing the which samples have been selected.
"""
self._validate_estimator()
random_state = check_random_state(self.random_state)
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
idx_under = np.empty((0, ), dtype=int)
for target_class in np.unique(y):
if target_class in self.ratio_.keys():
# select a sample from the current class
idx_maj = np.flatnonzero(y == target_class)
idx_maj_sample = idx_maj[random_state.randint(
low=0, high=target_stats[target_class],
size=self.n_seeds_S)]
minority_class_indices = np.flatnonzero(y == class_minority)
C_indices = np.append(minority_class_indices, idx_maj_sample)
# create the set composed of all minority samples and one
# sample from the current class.
C_x = safe_indexing(X, C_indices)
C_y = safe_indexing(y, C_indices)
# create the set S with removing the seed from S
# since that it will be added anyway
idx_maj_extracted = np.delete(idx_maj, idx_maj_sample, axis=0)
S_x = safe_indexing(X, idx_maj_extracted)
S_y = safe_indexing(y, idx_maj_extracted)
self.estimator_.fit(C_x, C_y)
pred_S_y = self.estimator_.predict(S_x)
S_misclassified_indices = np.flatnonzero(pred_S_y != S_y)
idx_tmp = idx_maj_extracted[S_misclassified_indices]
idx_under = np.concatenate(
(idx_under, idx_maj_sample, idx_tmp), axis=0)
else:
idx_under = np.concatenate(
(idx_under, np.flatnonzero(y == target_class)), axis=0)
X_resampled = safe_indexing(X, idx_under)
y_resampled = safe_indexing(y, idx_under)
# apply Tomek cleaning
tl = TomekLinks(ratio=self.ratio_, return_indices=True)
X_cleaned, y_cleaned, idx_cleaned = tl.fit_sample(X_resampled,
y_resampled)
idx_under = safe_indexing(idx_under, idx_cleaned)
if self.return_indices:
return (X_cleaned, y_cleaned, idx_under)
else:
return X_cleaned, y_cleaned
|
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
parse_iso8601,
float_or_none,
int_or_none,
compat_str,
determine_ext,
)
class HitboxIE(InfoExtractor):
IE_NAME = 'hitbox'
_VALID_URL = r'https?://(?:www\.)?hitbox\.tv/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.hitbox.tv/video/203213',
'info_dict': {
'id': '203213',
'title': 'hitbox @ gamescom, Sub Button Hype extended, Giveaway - hitbox News Update with Oxy',
'alt_title': 'hitboxlive - Aug 9th #6',
'description': '',
'ext': 'mp4',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 215.1666,
'resolution': 'HD 720p',
'uploader': 'hitboxlive',
'view_count': int,
'timestamp': 1407576133,
'upload_date': '20140809',
'categories': ['Live Show'],
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _extract_metadata(self, url, video_id):
thumb_base = 'https://edge.sf.hitbox.tv'
metadata = self._download_json(
'%s/%s' % (url, video_id), video_id,
'Downloading metadata JSON')
date = 'media_live_since'
media_type = 'livestream'
if metadata.get('media_type') == 'video':
media_type = 'video'
date = 'media_date_added'
video_meta = metadata.get(media_type, [])[0]
title = video_meta.get('media_status')
alt_title = video_meta.get('media_title')
description = clean_html(
video_meta.get('media_description') or
video_meta.get('media_description_md'))
duration = float_or_none(video_meta.get('media_duration'))
uploader = video_meta.get('media_user_name')
views = int_or_none(video_meta.get('media_views'))
timestamp = parse_iso8601(video_meta.get(date), ' ')
categories = [video_meta.get('category_name')]
thumbs = [
{'url': thumb_base + video_meta.get('media_thumbnail'),
'width': 320,
'height': 180},
{'url': thumb_base + video_meta.get('media_thumbnail_large'),
'width': 768,
'height': 432},
]
return {
'id': video_id,
'title': title,
'alt_title': alt_title,
'description': description,
'ext': 'mp4',
'thumbnails': thumbs,
'duration': duration,
'uploader': uploader,
'view_count': views,
'timestamp': timestamp,
'categories': categories,
}
def _real_extract(self, url):
video_id = self._match_id(url)
player_config = self._download_json(
'https://www.hitbox.tv/api/player/config/video/%s' % video_id,
video_id, 'Downloading video JSON')
formats = []
for video in player_config['clip']['bitrates']:
label = video.get('label')
if label == 'Auto':
continue
video_url = video.get('url')
if not video_url:
continue
bitrate = int_or_none(video.get('bitrate'))
if determine_ext(video_url) == 'm3u8':
if not video_url.startswith('http'):
continue
formats.append({
'url': video_url,
'ext': 'mp4',
'tbr': bitrate,
'format_note': label,
'protocol': 'm3u8_native',
})
else:
formats.append({
'url': video_url,
'tbr': bitrate,
'format_note': label,
})
self._sort_formats(formats)
metadata = self._extract_metadata(
'https://www.hitbox.tv/api/media/video',
video_id)
metadata['formats'] = formats
return metadata
class HitboxLiveIE(HitboxIE):
IE_NAME = 'hitbox:live'
_VALID_URL = r'https?://(?:www\.)?hitbox\.tv/(?!video)(?P<id>.+)'
_TEST = {
'url': 'http://www.hitbox.tv/dimak',
'info_dict': {
'id': 'dimak',
'ext': 'mp4',
'description': 'md5:c9f80fa4410bc588d7faa40003fc7d0e',
'timestamp': int,
'upload_date': compat_str,
'title': compat_str,
'uploader': 'Dimak',
},
'params': {
# live
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
player_config = self._download_json(
'https://www.hitbox.tv/api/player/config/live/%s' % video_id,
video_id)
formats = []
cdns = player_config.get('cdns')
servers = []
for cdn in cdns:
# Subscribe URLs are not playable
if cdn.get('rtmpSubscribe') is True:
continue
base_url = cdn.get('netConnectionUrl')
host = re.search('.+\.([^\.]+\.[^\./]+)/.+', base_url).group(1)
if base_url not in servers:
servers.append(base_url)
for stream in cdn.get('bitrates'):
label = stream.get('label')
if label == 'Auto':
continue
stream_url = stream.get('url')
if not stream_url:
continue
bitrate = int_or_none(stream.get('bitrate'))
if stream.get('provider') == 'hls' or determine_ext(stream_url) == 'm3u8':
if not stream_url.startswith('http'):
continue
formats.append({
'url': stream_url,
'ext': 'mp4',
'tbr': bitrate,
'format_note': label,
'rtmp_live': True,
})
else:
formats.append({
'url': '%s/%s' % (base_url, stream_url),
'ext': 'mp4',
'tbr': bitrate,
'rtmp_live': True,
'format_note': host,
'page_url': url,
'player_url': 'http://www.hitbox.tv/static/player/flowplayer/flowplayer.commercial-3.2.16.swf',
})
self._sort_formats(formats)
metadata = self._extract_metadata(
'https://www.hitbox.tv/api/media/live',
video_id)
metadata['formats'] = formats
metadata['is_live'] = True
metadata['title'] = self._live_title(metadata.get('title'))
return metadata
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function, unicode_literals)
import argparse
import datetime
import time
import os
import re
from dateutil.parser import parse
from collections import defaultdict
from sqlalchemy import create_engine
import pandas as pd
from movie_collection_app.trakt_instance import TraktInstance
from movie_collection_app.movie_collection import MovieCollection
from movie_collection_app.parse_imdb import (parse_imdb_episode_list, parse_imdb_tv_listings,
parse_imdb_main)
from movie_collection_app.util import POSTGRESTRING
list_of_commands = ('list', 'search', 'wl', 'tv')
help_text = 'commands=%s,[number]' % ','.join(list_of_commands)
pg_db = '%s:5432/movie_queue' % POSTGRESTRING
engine = create_engine(pg_db)
def find_upcoming_episodes(df=None, do_update=False):
cache_file = '/tmp/parse_imdb_tv_listings.csv.gz'
if os.path.exists(cache_file) and os.stat(cache_file).st_mtime > time.time() - 86400:
df = pd.read_csv(cache_file, compression='gzip')
if df is None:
df = parse_imdb_tv_listings()
df.to_csv(cache_file, compression='gzip', encoding='utf-8')
with engine.connect() as db:
query = """
SELECT
t1.show,
t1.season,
t1.episode,
t2.title,
t2.link as imdb_url,
t1.epurl as ep_url
FROM imdb_episodes t1
JOIN imdb_ratings t2 ON t1.show = t2.show
"""
rating_df = pd.read_sql(query, db)
imdb_urls = set(rating_df.imdb_url.unique())
ep_urls = set(rating_df.ep_url.unique())
def clean_string(x):
try:
x = x.encode(errors='ignore').lower().split('(')[0].strip().replace(' ', '_')
except:
x = x.decode(errors='ignore').lower().split('(')[0].strip().replace(' ', '_')
return x.replace("'", '').replace('&', 'and').replace(':', '')
titles = set(map(clean_string, rating_df.title.unique()))
df.title = df.title.apply(clean_string)
cond0 = df.imdb_url.isin(imdb_urls)
cond0 &= -df.ep_url.isin(ep_urls)
cond1 = -df.imdb_url.isin(imdb_urls)
cond1 &= df.title.isin(titles)
df = df[cond0 | cond1].reset_index(drop=True)
mq_ = MovieCollection()
ti_ = TraktInstance()
trakt_watchlist_shows = ti_.get_watchlist_shows()
trakt_watched_shows = ti_.get_watched_shows()
max_season = {}
current_shows = set()
imdb_show_map = {v['link']: k for k, v in mq_.imdb_ratings.items()}
for row in mq_.current_queue:
show = row['show']
fname = row['path']
season, episode = mq_.get_season_episode_from_name(fname, show)
if season == -1 or episode == -1:
continue
imdb_url = mq_.imdb_ratings[show]['link']
max_s = max_season.get(imdb_url, -1)
current_shows.add(imdb_url)
max_season[imdb_url] = max(max_s, season)
for imdb_url, showinfo in trakt_watchlist_shows.items():
if imdb_url in current_shows:
continue
current_shows.add(imdb_url)
show = imdb_show_map.get(imdb_url, showinfo.title.lower().replace(' ', '_'))
if imdb_url not in imdb_show_map:
imdb_show_map[imdb_url] = show
max_season[imdb_url] = -1
for imdb_url in current_shows:
if imdb_url in trakt_watched_shows:
for s, e in sorted(trakt_watched_shows[imdb_url]):
max_s = max_season.get(imdb_url, -1)
max_season[imdb_url] = max(s, max_s)
imdb_urls = set(df.imdb_url.dropna().unique())
titles = set(df.title.unique())
for imdb_url in sorted(current_shows):
show = imdb_show_map[imdb_url]
max_s = max_season[imdb_url]
if imdb_url not in imdb_urls and show not in titles and not any(x in show for x in titles):
continue
print(show, imdb_url, max_s)
season_episode_ratings = defaultdict(dict)
for (s, e), v in mq_.imdb_episode_ratings[show].items():
season_episode_ratings[s][e] = float(v['rating'])
if not imdb_url:
continue
for item in parse_imdb_episode_list(imdb_url, season=-1, proxy=True):
season = item[0]
nepisodes = item[3]
if season < max_s:
continue
if nepisodes == len([k for k, v in season_episode_ratings[season].items() if v > 0]):
continue
parse_imdb_main(show, do_tv=True, do_update=do_update, season=season, proxy=True)
return df
def find_new_episodes(search=(), do_update=False, trakt=False, source=None, shows=False):
output = {}
mq_ = MovieCollection()
ti_ = TraktInstance()
trakt_watchlist_shows = ti_.get_watchlist_shows()
trakt_watched_shows = ti_.get_watched_shows()
trakt_cal_shows = ti_.get_calendar()
if trakt_cal_shows is None:
trakt_cal_shows = {}
else:
trakt_cal_shows = {x.show.get_key('imdb'): x.show for x in trakt_cal_shows}
current_shows = set()
max_season = {}
max_episode = defaultdict(dict)
current_seasons = defaultdict(set)
current_episodes = defaultdict(set)
maxdate = datetime.date.today()
imdb_show_map = {v['link']: k for k, v in mq_.imdb_ratings.items()}
try:
if len(search) > 0:
maxdate = parse(search[0]).date()
search = ()
except (TypeError, ValueError):
pass
for row in mq_.current_queue:
show = row['show']
if search and any(x not in show for x in search):
continue
fname = row['path']
season, episode = mq_.get_season_episode_from_name(fname, show)
if season == -1 or episode == -1:
continue
imdb_url = mq_.imdb_ratings[show]['link']
max_s = max_season.get(imdb_url, -1)
max_e = max_episode.get(imdb_url, {}).get(season, -1)
current_shows.add(imdb_url)
max_season[imdb_url] = max(max_s, season)
max_episode[imdb_url][season] = max(max_e, episode)
current_seasons[imdb_url].add(season)
current_episodes[imdb_url].add((season, episode))
for imdb_url, showinfo in trakt_watchlist_shows.items():
if imdb_url in current_shows:
continue
if imdb_url not in imdb_show_map:
show = re.sub('[^A-Za-z0-9 ]', '', showinfo.title).lower().replace(' ', '_')
mq_.imdb_ratings[show] = ti_.get_imdb_rating(show, imdb_url)
else:
show = imdb_show_map[imdb_url]
if search and any(x not in show for x in search):
continue
current_shows.add(imdb_url)
if imdb_url not in imdb_show_map:
imdb_show_map[imdb_url] = show
max_season[imdb_url] = -1
max_episode[imdb_url][-1] = -1
for imdb_url in current_shows:
if imdb_url in trakt_watched_shows:
for s, e in sorted(trakt_watched_shows[imdb_url]):
max_s = max_season.get(imdb_url, -1)
max_e = max_episode.get(imdb_url, {}).get(s, -1)
max_season[imdb_url] = max(s, max_s)
max_episode[imdb_url][s] = max(e, max_e)
current_seasons[imdb_url].add(s)
current_episodes[imdb_url].add((s, e))
for imdb_url in sorted(current_shows):
if imdb_url == '':
continue
show = imdb_show_map[imdb_url]
max_s = max_season[imdb_url]
max_e = max_episode[imdb_url][max_s]
title = mq_.imdb_ratings[show]['title']
rating = mq_.imdb_ratings[show]['rating']
if trakt and imdb_url not in trakt_cal_shows:
continue
if (source != 'all' and source in ('hulu', 'netflix', 'amazon') and
mq_.imdb_ratings[show]['source'] != source):
continue
if not source and mq_.imdb_ratings[show]['source'] in ('hulu', 'netflix', 'amazon'):
continue
max_airdate = datetime.date(1950, 1, 1)
if mq_.imdb_episode_ratings[show]:
max_s, max_e = max(mq_.imdb_episode_ratings[show])
max_airdate = mq_.imdb_episode_ratings[show][(max_s, max_e)]['airdate']
if shows:
output[show] = '%s %s %s %s %s %s' % (show, title, max_s, max_e, str(max_airdate),
rating)
continue
if do_update:
if max_airdate > datetime.date.today() - datetime.timedelta(days=30):
print(show, max_s, max_e)
for item in parse_imdb_episode_list(imdb_url, season=-1):
season = item[0]
if season < max_s:
continue
mq_.get_imdb_episode_ratings(show, season)
for season, episode in sorted(mq_.imdb_episode_ratings[show]):
row = mq_.imdb_episode_ratings[show][(season, episode)]
if season < max_s:
continue
if episode <= max_episode[imdb_url].get(season, -1):
continue
if not search and row['airdate'] < (maxdate - datetime.timedelta(days=10)):
continue
if row['airdate'] > maxdate:
continue
if (season, episode) in current_episodes[imdb_url]:
continue
eptitle = row['eptitle']
eprating = row['rating']
airdate = row['airdate']
output[(airdate,
show)] = '%s %s %s %d %d %0.2f/%0.2f %s' % (show, title, eptitle, season,
episode, eprating, rating, airdate)
for key in sorted(output):
val = output[key]
print(val)
def find_new_episodes_parse():
parser = argparse.ArgumentParser(description='find_new_episodes script')
parser.add_argument('command', nargs='*', help=help_text)
args = parser.parse_args()
_command = 'list'
do_update = False
do_hulu = False
do_source = False
do_shows = False
do_trakt = False
_args = []
if hasattr(args, 'command'):
for arg in args.command:
if arg in list_of_commands:
_command = arg
elif arg == 'update':
do_update = True
elif arg in ('hulu', 'netflix', 'amazon'):
do_source = arg
elif arg == 'all':
do_source = arg
elif arg == 'shows':
do_shows = True
elif arg == 'trakt':
do_trakt = True
else:
_args.append(arg)
if _command == 'tv':
find_upcoming_episodes(do_update=do_update)
else:
find_new_episodes(_args, do_update, source=do_source, shows=do_shows, trakt=do_trakt)
|
|
""":mod:`news.mapping` --- Reporter mapping
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mapping classes that maps from schedule to reporter classes.
"""
import copy
from .models.abstract import AbstractSchedule
from .reporters.abstract import Reporter
from .reporters.url import URLReporter
from .reporters.feed import (
AtomReporter,
RSSReporter
)
def merge_kwargs_factories(kwargs_factories):
"""Merge news type keyed kwargs factory functions into a single factory.
:param kwargs_factories: A dictionary of news type keyed kwargs factory
functions.
:type kwargs_factories: :class:`dict`
:returns: A merged kwargs factory.
:rtype: A factory function that takes an schedule and returns kwargs dict.
"""
def merged_factory(schedule):
try:
return kwargs_factories[schedule.type](schedule)
except KeyError:
return {}
return merged_factory
class Mapping(object):
"""Mapping from news type/schedule to reporter classes.
Implements :meth:`__setitem__` and :meth:`__getitem__` magic methods to
support both mapping from :class:`str` and from
:class:`~news.models.AbstractSchedule` subclasses to
:class:`~news.reporters.Reporter`.
:param mapping: A mapping to inherit from.
:type mapping: :class:`~news.scheduler.Mapping` or `dict`
:param kwargs_factory: A kwargs factory function that takes an schedule
and returns appropriate reporter kwargs. Factory functions will be
merged if a dictionary which maps from news types to kwargs factory is
given. Defaults to a function that simply returns empty dictionary.
:type kwargs_factory: function
*Example*::
from news.mapping import DefaultMapping
from news.reporters.url import URLReporter
# both two formats are legal. later one will be merged into a single
# factory based on it's news types.
kwargs_factory = (lambda schedule: {'some_kwarg': 1})
kwargs_factory = {
'url': (lambda schedule: {'some_kwarg': 1})
}
# create an mapping
mapping = DefaultMapping(kwargs_factory=kwargs_factory)
...
# get reporter by news type string (returns empty kwargs)
reporter_class, kwargs = mapping['url']
assert(reporter_class == URLReporter)
assert(not kwargs)
# get reporter by schedule instance (returns kwargs from factory)
reporter_class, kwargs = mapping[schedule]
assert(reporter_class == URLReporter)
assert(kwargs['some_kwarg'] == 1)
# our main purpose to use mapping
reporter = reporter_class(meta=meta, backend=backend, **kwargs)
"""
def __init__(self, mapping=None, kwargs_factory=None):
if mapping is None:
self.__map = {}
elif isinstance(mapping, dict):
self.__map = mapping
elif isinstance(mapping, Mapping):
self.__map = mapping.as_dict()
self.__kwargs_factory = mapping.kwargs_factory
else:
raise TypeError('Only dictionary or `Mapping` instance is allowed')
if kwargs_factory is None:
self.__kwargs_factory = (lambda schedule: {})
elif callable(kwargs_factory):
self.__kwargs_factory = kwargs_factory
elif isinstance(kwargs_factory, dict):
self.__kwargs_factory = merge_kwargs_factories(kwargs_factory)
else:
raise TypeError('Only factory or dictionary of factories are ' +
'allowed')
def __setitem__(self, key, value):
# only reporters can be mapped to
if not isinstance(value, Reporter):
raise ValueError('Only reporter subclasses can be mapped to')
if isinstance(key, str):
self.__map[key] = value
elif isinstance(key, AbstractSchedule):
self.__map[key.type] = value
# only string or schedule can be mapped from
else:
raise KeyError('Mapping key is only allowed for Schedule ' +
'subclass or string')
def __getitem__(self, key):
# only string or schedule can be mapped from.
if isinstance(key, str):
return self.__map[key], {}
elif isinstance(key, AbstractSchedule):
return self.__map[key.type], self._make_kwargs(key)
else:
raise KeyError('Only Schedule subclass or string are allowed ' +
'as mapping key')
def __contains__(self, key):
try:
self[key]
return True
except KeyError:
return False
def _make_kwargs(self, schedule):
if self.__kwargs_factory:
return self.__kwargs_factory(schedule)
else:
return {}
def map(self, key, value):
"""Add mapping from a news type or a schedule to a reporter class.
:param key: Schedule or an string to be mapped from.
:type key: A :class:`~news.models.AbstractSchedule` implementation or
:class:`str`
:param value: Reporter to be mapped to.
:type value: :class:`~news.reporters.Reporter`
:returns: Modified mapping itself
:rtype: :class:`Mapping`
"""
self[key] = value
return self
def unmap(self, key):
"""Remove mapping from a news type or a schedule to a reporter class.
:param key: Schedule or an string mapped from.
:type key: A :class:`~news.models.AbstractSchedule` implementation or
:class:`str`
:returns: Modified mapping itself
:rtype: :class:`Mapping`
"""
del self[key]
return self
def merge(self, mapping, kwargs_factory=None):
"""Merge another mapping.
:param mapping: A mapping to merge.
:type mapping: :class:`dict` or :class:`Mapping`
:param kwargs_factory: A kwargs factory function to set.
:type kwargs_factory: function
:returns: The merged mapping itself
:rtype: :class:`Mapping`
"""
if isinstance(mapping, dict):
self.__map.update(mapping)
elif isinstance(mapping, Mapping):
self.__map.update(mapping.as_dict())
self.__kwargs_factory = mapping.kwargs_factory
else:
raise TypeError('Only dictionary or `Mapping` instance is allowed')
if kwargs_factory:
self.__kwargs_factory = kwargs_factory
return self
@classmethod
def from_dict(cls, mapping, kwargs_factory=None):
"""Create a mapping from a dictionary.
:param mapping: Mapping dictionary to use.
:type mapping: :class:`dict`
:param kwargs_factory: A kwargs factory function.
:type kwargs_factory: A function that takes an schedule and returns
appropirate reporter kwargs dict.
"""
assert(isinstance(mapping, dict)), 'Only `dict` type is allowed'
return cls(mapping=mapping, kwargs_factory=kwargs_factory)
def as_dict(self):
"""Returns internal mapping dictionary as a copied dictionary.
:returns: Mapping dictionary.
:rtype: :class:`dict`
"""
return copy.deepcopy(self.__map)
@property
def kwargs_factory(self):
"""kwargs factory function of the mapping.
:returns: A kwargs factory function.
:rtype: A function that takes an schedule and returns reporter kwargs
dict.
"""
return self.__kwargs_factory
class DefaultMapping(Mapping):
"""Default mapping implementation for convenience.
Maps *url*, *atom* and *rss* news types to
:class:`~news.reporters.url.URLReporter`,
:class:`~news.reporters.feed.AtomReporter` and
:class:`~news.reporters.feed.RSSReporter`.
:param mapping: A mapping to merge into default mapping.
:type mapping: :class:`~news.scheduler.Mapping` or `dict`
"""
__default = {
'url': URLReporter,
'atom': AtomReporter,
'rss': RSSReporter,
}
def __init__(self, mapping=None, kwargs_factory=None):
mapping = Mapping(mapping=self.__default).merge(mapping or {})
super().__init__(mapping=mapping, kwargs_factory=kwargs_factory)
|
|
# Copyright (c) 2013 Galah Group LLC
# Copyright (c) 2013 Other contributers as noted in the CONTRIBUTERS file
#
# This file is part of galah-interact-python.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains very useful functions you can use while unittesting
student's code.
.. note::
In order to use the :mod:`unittest` module, you need to make sure that you
have SWIG installed, and that you have *Python development headers*
installed, both of which are probably available through your distribution's
package manager (``apt-get`` or ``yum`` for example).
"""
import interact._utils as _utils
import os
import imp
import inspect
import atexit
import shutil
import tempfile
import subprocess
import os.path
import distutils.core
import capture
#: The absolute path to the swig executable. When this module is imported, the
#: environmental variable ``PATH`` is searched for a file named ``swig``, this
#: variable will be set to the first one that is found. This variable will equal
#: ``None`` if no such file could be found.
swig_path = _utils.which("swig")
class CouldNotCompile(RuntimeError):
"""
Exception raised when a student's code could not be compiled into a single
library file.
:ivar message: A short message describing the exception.
:ivar stderr: The output that was received through standard error. This is
output by ``distutils.core.setup``.
"""
def __init__(self, message, stderr):
self.message = message
self.stderr = stderr
RuntimeError.__init__(self)
def __str__(self):
output = [
self.message,
"---BEGIN STDERR---",
self.stderr,
"---END STDERR---"
]
return "\n".join(output)
def _build_extension(module, mod_ext, working_directory):
os.chdir(working_directory)
distutils.core.setup(
name = module,
ext_modules = [mod_ext],
py_modules = [module],
script_name = "setup.py",
script_args = ["build_ext", "--inplace"]
)
def _generate_shared_libraries(modules, wrapper_directory):
"""
Compiles modules and wrappers to shared libraries using distutils.
:raises: :class:`CouldNotCompile` if the extension could not be compiled.
"""
wrapper_directory = _utils.resolve_path(wrapper_directory)
for module in modules:
so_name = "_%s" % (module, )
wrapper_file = os.path.join(wrapper_directory, module + "_wrap.cxx")
mod_ext = distutils.core.Extension(
str(so_name), sources = [str(wrapper_file)]
)
try:
captured = capture.capture_function(
_build_extension, str(module), mod_ext, str(wrapper_directory)
)
captured.wait()
except SystemExit:
# Setup will call exit which can make the running script exit rather
# suddenly. At least give the user an error with a traceback.
raise CouldNotCompile(
"Could not compile extension module.",
stderr = captured.stderr.read()
)
def _generate_swig_wrappers(interface_files, output_directory):
"""
Generates SWIG Wrapper files (.cxx) and python modules that can be
compiled into a shared library by distutils.
:raises: ``EnvironmentError`` if swig is not installed.
"""
if swig_path is None:
raise EnvironmentError("No swig executable found.")
output_directory = _utils.resolve_path(output_directory)
for current_file in interface_files:
module_name = _utils.file_name(current_file)
output_file = os.path.join(
output_directory, "%s_wrap.cxx" % (module_name, )
)
# Let swig generate the wrapper files.
subprocess.check_call(
[swig_path, "-c++", "-python", "-o", output_file, current_file],
cwd = output_directory,
stdout = _utils.DEVNULL,
stderr = subprocess.STDOUT
)
# These are necessary to allow STL types in python
STD_INTERFACES = [
"std_deque.i", "std_list.i", "std_map.i", "std_pair.i", "std_set.i",
"std_string.i", "std_vector.i", "std_sstream.i"
]
# C++ Directives that expose extra functionality in the underlying C++ code.
EXPOSURE_DIRECTIVES = [
"#define private public", # Expose private member variables to module
"#define protected public",
"#define class struct" # Expose unmarked private member variables
]
def _generate_swig_interface(file_path, output_directory):
"""
Generates a SWIG Interface file (.i) that can be compiled with SWIG to
a shared library file that can be imported into python for testing.
"""
file_path = _utils.resolve_path(file_path)
output_directory = _utils.resolve_path(output_directory)
# Figure out what this module will be named by getting just the filename
# (minus extension) of the code file.
module_name = _utils.file_name(file_path)
# -MM flag returns all dependencies needed to compile file.
gpp_process = subprocess.Popen(
["g++", "-MM", file_path],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT
)
gpp_output = gpp_process.communicate()[0]
# Get dependencies, minus the .o file and the white space
gpp_output = gpp_output.split(":")[1].strip()
dependencies = [i.strip() for i in gpp_output.split(" ") if i.strip() != "\\"]
necessary_includes = []
for include in dependencies:
necessary_includes.append("#include \"%s\"" % (include))
# TODO: Add comment describing what's going on here.
if ".h" in include:
include = include.replace(".hpp", ".h")
include = include.replace(".h", ".cpp")
if file_path not in include and os.path.isfile(include):
necessary_includes.append("#include \"%s\"" % (include))
with open(os.path.join(output_directory, module_name + ".i"), "w") as f:
f.write("%%module %s\n\n" % (module_name, ))
# Ensure we include all of the special swig interface files that allow
# us to interop with the C++ Standard Library.
for interface in STD_INTERFACES:
f.write("%%include \"%s\"\n" % (interface, ))
# Write directives inside and out of wrapper for consistency in wrapped
# file.
f.write("\n".join(EXPOSURE_DIRECTIVES) + "\n")
f.write("using namespace std;\n\n")
f.write("%{\n")
f.write("\n".join(EXPOSURE_DIRECTIVES) + "\n")
for include in necessary_includes:
f.write("%s\n" % include)
f.write("%}\n\n")
# SWIG cannot import global include like iostream, but it does need
# all local includes
local_includes = \
(include for include in necessary_includes if '<' not in include)
for include in local_includes:
f.write("%s\n" % include.replace("#", "%"))
return module_name
to_delete = []
def _cleanup():
for i in to_delete:
shutil.rmtree(i)
atexit.register(_cleanup)
def load_files(files):
"""
Compiles and loads functions and classes in code files and makes them
callable from within Python.
:param files: A list of file paths. All of the files will be compiled and
loaded together. These must be absolute paths, see
:meth:`Harness.student_files <interact.core.Harness.student_files>`.
:returns: A ``dict`` where every file that was passed in is a key in the
dictionary (without its file extension) and the value is another
``dict`` where each key is the name of a function or class in the
file and the value is a callable you can use to actually execute
or create an instance of that function or class.
:raises: ``EnvironmentError`` if swig is not properly installed.
:raises: :class:`CouldNotCompile` if the student's code could not be
compiled into a library file.
.. warning::
During testing, oftentimes the execution of loaded code's ``main()``
function failed. We haven't determined what the problem is yet so for
now don't use this function to test ``main()`` functions (the
:mod:`interact.execute` module should work well instead).
.. code-block:: python
>>> print open("main.cpp").read()
#include <iostream>
using namespace std;
class Foo {
int a_;
public:
Foo(int a);
int get_a() const;
};
Foo::Foo(int a) : a_(a) {
// Do nothing
}
int Foo::get_a() const {
return a_;
}
int bar() {
Foo foo(3);
cout << "foo.get_a() = " << foo.get_a() << endl;
return 2;
}
int main() {
return 0;
}
>>> students_code = interact.unittest.load_files(["main.cpp"])
>>> Foo = students_code["main"]["Foo"]
>>> bar = students_code["main"]["bar"]
>>> b = Foo(3)
>>> b.get_a()
3
>>> rvalue = b.bar()
foo.get_a() = 3
>>> print rvalue
2
If you want to test a function that prints things to stdout or reads from
stdin (like the ``bar()`` function in the above example) you can use the
:mod:`interact.capture` module.
"""
module_dict = {}
# Get a directory we can work within.
temp_dir = tempfile.mkdtemp()
modules = []
for f in files:
modules.append(_generate_swig_interface(f, temp_dir))
interface_files = ((module + ".i") for module in modules)
_generate_swig_wrappers(interface_files, temp_dir)
_generate_shared_libraries(modules, temp_dir)
for module in modules:
module_dict[module] = {}
# Load up the python module we created whose function will let us access
# the C++ ones.
created_module = os.path.join(temp_dir, module + ".py")
mod = imp.load_source(module, created_module)
# Get all functions and classes in this module
filter_func = lambda a: inspect.isbuiltin(a) or inspect.isclass(a)
for name, impl in inspect.getmembers(mod, filter_func):
module_dict[module][name] = impl
to_delete.append(temp_dir)
return module_dict
|
|
# Copyright (c) 2010 matt
# Copyright (c) 2010-2011 Paul Colomiets
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Julien Iguchi-Cartigny
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 dequis
# Copyright (c) 2018 Nazar Mokrynskyi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
from libqtile.backend.base import Window
from libqtile.config import Match
from libqtile.layout.base import Layout
class Floating(Layout):
"""
Floating layout, which does nothing with windows but handles focus order
"""
default_float_rules = [
Match(wm_type="utility"),
Match(wm_type="notification"),
Match(wm_type="toolbar"),
Match(wm_type="splash"),
Match(wm_type="dialog"),
Match(wm_class="file_progress"),
Match(wm_class="confirm"),
Match(wm_class="dialog"),
Match(wm_class="download"),
Match(wm_class="error"),
Match(wm_class="notification"),
Match(wm_class="splash"),
Match(wm_class="toolbar"),
Match(func=lambda c: c.has_fixed_size()),
Match(func=lambda c: c.has_fixed_ratio()),
]
defaults = [
("border_focus", "#0000ff", "Border colour(s) for the focused window."),
("border_normal", "#000000", "Border colour(s) for un-focused windows."),
("border_width", 1, "Border width."),
("max_border_width", 0, "Border width for maximize."),
("fullscreen_border_width", 0, "Border width for fullscreen."),
]
def __init__(
self, float_rules: list[Match] | None = None, no_reposition_rules=None, **config
):
"""
If you have certain apps that you always want to float you can provide
``float_rules`` to do so. ``float_rules`` are a list of
Match objects::
from libqtile.config import Match
Match(title=WM_NAME, wm_class=WM_CLASS, role=WM_WINDOW_ROLE)
When a new window is opened its ``match`` method is called with each of
these rules. If one matches, the window will float. The following
will float GIMP and Skype::
from libqtile.config import Match
float_rules=[Match(wm_class="skype"), Match(wm_class="gimp")]
The following ``Match`` will float all windows that are transient windows for a
parent window:
Match(func=lambda c: bool(c.is_transient_for()))
Specify these in the ``floating_layout`` in your config.
Floating layout will try to center most of floating windows by default,
but if you don't want this to happen for certain windows that are
centered by mistake, you can use ``no_reposition_rules`` option to
specify them and layout will rely on windows to position themselves in
correct location on the screen.
"""
Layout.__init__(self, **config)
self.clients: list[Window] = []
self.focused = None
self.group = None
if float_rules is None:
float_rules = self.default_float_rules
self.float_rules = float_rules
self.no_reposition_rules = no_reposition_rules or []
self.add_defaults(Floating.defaults)
def match(self, win):
"""Used to default float some windows"""
return any(win.match(rule) for rule in self.float_rules)
def find_clients(self, group):
"""Find all clients belonging to a given group"""
return [c for c in self.clients if c.group is group]
def to_screen(self, group, new_screen):
"""Adjust offsets of clients within current screen"""
for win in self.find_clients(group):
if win.maximized:
win.maximized = True
elif win.fullscreen:
win.fullscreen = True
else:
# If the window hasn't been floated before, it will be configured in
# .configure()
if win.float_x is not None and win.float_y is not None:
# By default, place window at same offset from top corner
new_x = new_screen.x + win.float_x
new_y = new_screen.y + win.float_y
# make sure window isn't off screen left/right...
new_x = min(new_x, new_screen.x + new_screen.width - win.width)
new_x = max(new_x, new_screen.x)
# and up/down
new_y = min(new_y, new_screen.y + new_screen.height - win.height)
new_y = max(new_y, new_screen.y)
win.x = new_x
win.y = new_y
win.group = new_screen.group
def focus_first(self, group=None):
if group is None:
clients = self.clients
else:
clients = self.find_clients(group)
if clients:
return clients[0]
def focus_next(self, win):
if win not in self.clients or win.group is None:
return
clients = self.find_clients(win.group)
idx = clients.index(win)
if len(clients) > idx + 1:
return clients[idx + 1]
def focus_last(self, group=None):
if group is None:
clients = self.clients
else:
clients = self.find_clients(group)
if clients:
return clients[-1]
def focus_previous(self, win):
if win not in self.clients or win.group is None:
return
clients = self.find_clients(win.group)
idx = clients.index(win)
if idx > 0:
return clients[idx - 1]
def focus(self, client):
self.focused = client
def blur(self):
self.focused = None
def on_screen(self, client, screen_rect):
if client.x < screen_rect.x: # client's left edge
return False
if screen_rect.x + screen_rect.width < client.x + client.width: # right
return False
if client.y < screen_rect.y: # top
return False
if screen_rect.y + screen_rect.width < client.y + client.height: # bottom
return False
return True
def compute_client_position(self, client, screen_rect):
"""recompute client.x and client.y, returning whether or not to place
this client above other windows or not"""
above = True
if client.has_user_set_position() and not self.on_screen(client, screen_rect):
# move to screen
client.x = screen_rect.x + client.x
client.y = screen_rect.y + client.y
if not client.has_user_set_position() or not self.on_screen(client, screen_rect):
# client has not been properly placed before or it is off screen
transient_for = client.is_transient_for()
if transient_for is not None:
# if transient for a window, place in the center of the window
center_x = transient_for.x + transient_for.width / 2
center_y = transient_for.y + transient_for.height / 2
above = False
else:
center_x = screen_rect.x + screen_rect.width / 2
center_y = screen_rect.y + screen_rect.height / 2
x = center_x - client.width / 2
y = center_y - client.height / 2
# don't go off the right...
x = min(x, screen_rect.x + screen_rect.width - client.width)
# or left...
x = max(x, screen_rect.x)
# or bottom...
y = min(y, screen_rect.y + screen_rect.height - client.height)
# or top
y = max(y, screen_rect.y)
client.x = int(round(x))
client.y = int(round(y))
return above
def configure(self, client, screen_rect):
if client.has_focus:
bc = self.border_focus
else:
bc = self.border_normal
if client.maximized:
bw = self.max_border_width
elif client.fullscreen:
bw = self.fullscreen_border_width
else:
bw = self.border_width
# 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,
# don't reposition it anywhere, let Java app to control it
cls = client.get_wm_class() or ""
is_java_dropdown = "sun-awt-X11-XWindowPeer" in cls
if is_java_dropdown:
client.paint_borders(bc, bw)
client.cmd_bring_to_front()
# alternatively, users may have asked us explicitly to leave the client alone
elif any(m.compare(client) for m in self.no_reposition_rules):
client.paint_borders(bc, bw)
client.cmd_bring_to_front()
else:
above = False
# We definitely have a screen here, so let's be sure we'll float on screen
if client.float_x is None or client.float_y is None:
# this window hasn't been placed before, let's put it in a sensible spot
above = self.compute_client_position(client, screen_rect)
client.place(
client.x,
client.y,
client.width,
client.height,
bw,
bc,
above,
respect_hints=True,
)
client.unhide()
def add(self, client):
self.clients.append(client)
self.focused = client
def remove(self, client):
if client not in self.clients:
return
next_focus = self.focus_next(client)
if client is self.focused:
self.blur()
self.clients.remove(client)
return next_focus
def get_windows(self):
return self.clients
def info(self):
d = Layout.info(self)
d["clients"] = [c.name for c in self.clients]
return d
def cmd_next(self):
# This can't ever be called, but implement the abstract method
pass
def cmd_previous(self):
# This can't ever be called, but implement the abstract method
pass
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class PhoneNumberTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trunking.v1.trunks(sid="TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://trunking.twilio.com/v1/Trunks/TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PhoneNumbers/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2010-12-10T17:27:34Z",
"date_updated": "2015-10-09T11:36:32Z",
"friendly_name": "(415) 867-5309",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"phone_number": "+14158675309",
"api_version": "2010-04-01",
"voice_caller_id_lookup": null,
"voice_url": "",
"voice_method": "POST",
"voice_fallback_url": null,
"voice_fallback_method": null,
"status_callback": "",
"status_callback_method": "POST",
"voice_application_sid": null,
"trunk_sid": "TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_url": "",
"sms_method": "POST",
"sms_fallback_url": "",
"sms_fallback_method": "POST",
"sms_application_sid": "",
"address_requirements": "none",
"beta": false,
"url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"capabilities": {
"voice": true,
"sms": true,
"mms": true
},
"links": {
"phone_number": "https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
}
'''
))
actual = self.client.trunking.v1.trunks(sid="TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trunking.v1.trunks(sid="TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://trunking.twilio.com/v1/Trunks/TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PhoneNumbers/PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.trunking.v1.trunks(sid="TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers(sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trunking.v1.trunks(sid="TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.create(phone_number_sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
values = {'PhoneNumberSid': "PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", }
self.holodeck.assert_has_request(Request(
'post',
'https://trunking.twilio.com/v1/Trunks/TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PhoneNumbers',
data=values,
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2010-12-10T17:27:34Z",
"date_updated": "2015-10-09T11:36:32Z",
"friendly_name": "(415) 867-5309",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"phone_number": "+14158675309",
"api_version": "2010-04-01",
"voice_caller_id_lookup": null,
"voice_url": "",
"voice_method": "POST",
"voice_fallback_url": null,
"voice_fallback_method": null,
"status_callback": "",
"status_callback_method": "POST",
"voice_application_sid": null,
"trunk_sid": "TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_url": "",
"sms_method": "POST",
"sms_fallback_url": "",
"sms_fallback_method": "POST",
"sms_application_sid": "",
"address_requirements": "none",
"beta": false,
"url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"capabilities": {
"voice": true,
"sms": true,
"mms": true
},
"links": {
"phone_number": "https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
}
'''
))
actual = self.client.trunking.v1.trunks(sid="TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.create(phone_number_sid="PNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.trunking.v1.trunks(sid="TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.list()
self.holodeck.assert_has_request(Request(
'get',
'https://trunking.twilio.com/v1/Trunks/TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/PhoneNumbers',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://trunking.twilio.com/v1/Trunks/TRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers?PageSize=1&Page=0",
"key": "phone_numbers",
"next_page_url": null,
"page": 0,
"page_size": 1,
"previous_page_url": null,
"url": "https://trunking.twilio.com/v1/Trunks/TRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers?PageSize=1&Page=0"
},
"phone_numbers": [
{
"sid": "PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "2010-12-10T17:27:34Z",
"date_updated": "2015-10-09T11:36:32Z",
"friendly_name": "(415) 867-5309",
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"phone_number": "+14158675309",
"api_version": "2010-04-01",
"voice_caller_id_lookup": null,
"voice_url": "",
"voice_method": "POST",
"voice_fallback_url": null,
"voice_fallback_method": null,
"status_callback": "",
"status_callback_method": "POST",
"voice_application_sid": null,
"trunk_sid": "TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sms_url": "",
"sms_method": "POST",
"sms_fallback_url": "",
"sms_fallback_method": "POST",
"sms_application_sid": "",
"address_requirements": "none",
"beta": false,
"url": "https://trunking.twilio.com/v1/Trunks/TKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"capabilities": {
"voice": true,
"sms": true,
"mms": true
},
"links": {
"phone_number": "https://api.twilio.com/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers/PNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
}
]
}
'''
))
actual = self.client.trunking.v1.trunks(sid="TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"first_page_url": "https://trunking.twilio.com/v1/Trunks/TRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers?PageSize=1&Page=0",
"key": "phone_numbers",
"next_page_url": null,
"page": 0,
"page_size": 1,
"previous_page_url": null,
"url": "https://trunking.twilio.com/v1/Trunks/TRaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/PhoneNumbers?PageSize=1&Page=0"
},
"phone_numbers": []
}
'''
))
actual = self.client.trunking.v1.trunks(sid="TRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.phone_numbers.list()
self.assertIsNotNone(actual)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The TIR serialization specification for Arm(R) Ethos(TM)-U NPU."""
from typing import Union
from typing import get_type_hints
from inspect import isclass
import tvm
from tvm.relay.backend.contrib.ethosu import util
def create_serial_object(serialized_type, deserialized_elements):
"""
This function will create serialized type that is one of the subclasses
of tvm.relay.backend.contrib.ethosu.tir.spec.SerializableFormat
Parameters
----------
serialized_type : a subclass type of SerializableFormat
deserialized_elements : list
The list of arguments that needs to packed to create SerializableFormat objects
Returns
-------
The constructed object of type serialized_type
"""
def _create_serial_object(internal_serialized_type, read_element_idx=0):
"""The internal function that increments the read_element_idx
when creating nested serial objects"""
arg_len = util.get_arg_count(internal_serialized_type.__init__) - 1
serial_init_types = get_type_hints(internal_serialized_type.__init__)
serial_init_arg_names = list(serial_init_types.keys())
serial_init_args = []
assert arg_len == len(serial_init_arg_names)
for si_arg_name in serial_init_arg_names:
si_arg_type = serial_init_types[si_arg_name]
if isclass(si_arg_type) and issubclass(si_arg_type, SerializableFormat):
sia, read_element_idx = _create_serial_object(si_arg_type, read_element_idx)
serial_init_args.append(sia)
else:
serial_init_args.append(deserialized_elements[read_element_idx])
read_element_idx += 1
return internal_serialized_type(*serial_init_args), read_element_idx
# Just return the primary serial object
return _create_serial_object(serialized_type)[0]
class SerializableFormat:
"""Base class to retrieve arguments on a predefined ordering"""
def __iter__(self):
# Note class attribute definition order is preserved - see PEP 520
for name in self.__dict__:
value = self.__getattribute__(name)
if isinstance(value, SerializableFormat):
yield from list(value)
else:
yield value
def __getitem__(self, index):
# Note class attribute definition order is preserved - see PEP 520
name = list(self.__dict__.keys())[index]
return self.__getattribute__(name)
class SerialFeatureMap(SerializableFormat):
"""Specialization class to retrieve arguments of a Feature Map
(similiar to NpuFeatureMap of Vela) on a predefined ordering"""
def __init__(
self,
data_type: str,
height: int,
width: int,
channels: int,
tile_height_0: int,
tile_height_1: int,
tile_width_0: int,
tile_address_0: tvm.tir.expr.Load,
tile_address_1: Union[tvm.tir.expr.Load, int],
tile_address_2: Union[tvm.tir.expr.Load, int],
tile_address_3: Union[tvm.tir.expr.Load, int],
scale: float,
zero_point: int,
layout: str,
stride_h: int,
stride_w: int,
stride_c: int,
):
self.data_type = data_type
self.height = height
self.width = width
self.channels = channels
self.tile_height_0 = tile_height_0
self.tile_height_1 = tile_height_1
self.tile_width_0 = tile_width_0
self.tile_address_0 = tile_address_0
self.tile_address_1 = tile_address_1
self.tile_address_2 = tile_address_2
self.tile_address_3 = tile_address_3
self.scale = scale
self.zero_point = zero_point
self.layout = layout
self.stride_h = stride_h
self.stride_w = stride_w
self.stride_c = stride_c
class SerialKernel(SerializableFormat):
"""Specialization class to retrieve arguments of a Kernel
(similiar to NpuKernel of Vela) on a predefined ordering"""
def __init__(
self,
width: int,
height: int,
stride_w: int,
stride_h: int,
dilation_w: int,
dilation_h: int,
):
self.width = width
self.height = height
self.stride_w = stride_w
self.stride_h = stride_h
self.dilation_w = dilation_w
self.dilation_h = dilation_h
class SerialAddressRange(SerializableFormat):
"""Specialization class to retrieve arguments of a AddressRange
(similiar to NpuAddressRange of Vela) on a predefined ordering"""
def __init__(self, address: tvm.tir.expr.Load, length: int):
self.address = address
self.length = length
class SerialPadding(SerializableFormat):
"""Specialization class to retrieve arguments of a Padding
(similiar to NpuPadding of Vela) on a predefined ordering"""
def __init__(self, top: int, left: int, bottom: int, right: int):
self.top = top
self.left = left
self.bottom = bottom
self.right = right
class SerialActivation(SerializableFormat):
"""Specialization class to retrieve arguments of a Activation
(similiar to NpuActivation of Vela) on a predefined ordering"""
def __init__(self, op: str, clip_min: int, clip_max: int):
self.op = op
self.clip_min = clip_min
self.clip_max = clip_max
class Serial2DConvolution(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.conv2d tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
kernel: SerialKernel,
weight: SerialAddressRange,
weight_zero_point: int,
scale_bias: SerialAddressRange,
padding: SerialPadding,
activation: SerialActivation,
rounding_mode: str,
upscale: str,
):
self.ifm = ifm
self.ofm = ofm
self.kernel = kernel
self.weight = weight
self.weight_zero_point = weight_zero_point
self.scale_bias = scale_bias
self.padding = padding
self.activation = activation
self.rounding_mode = rounding_mode
self.upscale = upscale
class Serial2DDepthwise(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.depthwise_conv2d TIR extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
kernel: SerialKernel,
weight: SerialAddressRange,
weight_zero_point: int,
scale_bias: SerialAddressRange,
padding: SerialPadding,
activation: SerialActivation,
rounding_mode: str,
upscale: str,
):
self.ifm = ifm
self.ofm = ofm
self.kernel = kernel
self.weight = weight
self.weight_zero_point = weight_zero_point
self.scale_bias = scale_bias
self.padding = padding
self.activation = activation
self.rounding_mode = rounding_mode
self.upscale = upscale
class SerialCopy(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.copy tir extern call on a predefined ordering"""
def __init__(
self, read_address: tvm.tir.expr.Load, length: int, write_address: tvm.tir.expr.Load
):
self.read_address = read_address
self.length = length
self.write_address = write_address
class SerialPooling(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.pooling tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
pooling_type: str,
pool_shape: SerialKernel,
padding: SerialPadding,
activation: SerialActivation,
rounding_mode: str,
upscale: str,
):
self.ifm = ifm
self.ofm = ofm
self.pooling_type = pooling_type
self.pool_shape = pool_shape
self.padding = padding
self.activation = activation
self.rounding_mode = rounding_mode
self.upscale = upscale
class SerialBinaryElementwise(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.binary_elementwise tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ifm2: SerialFeatureMap,
ofm: SerialFeatureMap,
operator_type: str,
reversed_operands: bool,
activation: SerialActivation,
rounding_mode: str,
):
self.ifm = ifm
self.ifm2 = ifm2
self.ofm = ofm
self.operator_type = operator_type
self.reversed_operands = reversed_operands
self.activation = activation
self.rounding_mode = rounding_mode
class SerialUnaryElementwise(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.unary_elementwise tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
operator_type: str,
activation: SerialActivation,
rounding_mode: str,
):
self.ifm = ifm
self.ofm = ofm
self.operator_type = operator_type
self.activation = activation
self.rounding_mode = rounding_mode
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 jem@seethis.link
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
import ruamel.yaml as yaml
import time
import colorama
import intelhex
from pprint import pprint
from hexdump import hexdump as hexdump
import keyplus
from keyplus.constants import *
from keyplus.exceptions import KeyplusUSBCommandError
from keyplus.layout import *
from keyplus.device_info import KeyboardPinMapping
from keyplus.keycodes import *
import keyplus.cdata_types
if __name__ == '__main__':
colorama.init()
dev_list = keyplus.find_devices(vid_pid="6666:1111")
print(dev_list)
kb = dev_list[0]
kb.connect()
if 0:
led = 0
led_state = 0
for _ in range(6):
led_state = not led_state
kb.set_indicator_led(led, led_state)
time.sleep(0.5)
kb.reset()
time.sleep(3)
kb.reconnect()
kb.set_indicator_led(led, not led_state)
if 1:
for kb_id in range(64):
try:
print("layer info: ", kb.get_layers(kb_id))
except KeyplusUSBCommandError as err:
if err.code == CMD_ERROR_KEYBOARD_INACTIVE:
continue
else:
raise err
print("name:", kb.device_info.device_name)
print("nrf24_disabled:", kb.device_info.nrf24_disabled)
print("i2c_disabled:", kb.device_info.i2c_disabled)
scan_mode = ScanMode()
scan_mode.set_scan_mode('row_col')
scan_mode.add_row_pins(['D0', 'D1', 'D2', 'D3'])
if 1:
scan_mode.add_column_pins(['A0', 'A1', 'A2', 'A3', 'A4', 'A5'])
for row in range(4):
for col in range(6):
if col == row:
continue
scan_mode.add_key_to_matrix_map(row*6+col, row, col)
else:
scan_mode.add_column_pins(['A5', 'A4', 'A3', 'A2', 'A1', 'A0'])
for row in range(4):
for col in range(6):
# Note: reverse column position in row
scan_mode.add_key_to_matrix_map(row*6+(5-col), row, col)
scan_mode.set_debounce_profile("cherry_mx")
print("scan_mode.to_json:", scan_mode.to_json())
target = kb.get_device_target()
scan_plan = scan_mode.generate_scan_plan(target)
scan_plan_bytes = scan_plan.pack()
print(scan_plan_bytes)
print(repr(scan_plan_bytes))
print(type(scan_plan_bytes))
hexdump(scan_plan_bytes)
new_scan_plan = keyplus.cdata_types.scan_plan_t()
new_scan_plan.unpack(scan_plan_bytes)
pprint(("Matches: {}".format(scan_plan == new_scan_plan), new_scan_plan))
pin_mapping = scan_mode.generate_pin_mapping(target)
pin_mapping_raw = pin_mapping.pack()
hexdump(pin_mapping_raw)
new_pin_mapping = KeyboardPinMapping()
new_pin_mapping.unpack(pin_mapping_raw, new_scan_plan, target)
hexdump(new_pin_mapping.pack())
new_scan_mode = ScanMode()
new_scan_mode.load_raw_data(new_scan_plan, new_pin_mapping)
pprint(vars(scan_mode))
pprint(vars(new_scan_mode))
layout_settings = kb.get_layout_info()
hexdump(layout_settings.pack())
pprint(vars(layout_settings))
layout_device = LayoutDevice()
layout_device.load_raw_data(
kb.device_info, layout_settings, new_pin_mapping
)
pprint(vars(layout_device))
print(("#"*80 + "\n")*3)
scan_mode_test = ScanMode()
test_scan_mode_dict = {
'mode': 'col_row',
'rows': ['D0', 'D1', 'D2', 'D3'],
'cols': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5'],
'matrix_map': [
'r0c0', 'r0c1', 'r0c2', 'r0c3', 'r0c4', 'r0c5',
'r1c0', 'r1c1', 'r1c2', 'r1c3', 'r1c4', 'r1c5',
'r2c0', 'r2c1', 'r2c2', 'r2c3', 'r2c4', 'r2c5',
'r3c0', 'r3c1', 'r3c2', 'r3c3', 'r3c4', 'r3c5',
],
# 'debounce': 'kailh_box',
'debounce': {
"debounce_time_press": 5,
"debounce_time_release": 10,
"trigger_time_press": 1,
"trigger_time_release": 3,
"parasitic_discharge_delay_idle": 2.0,
"parasitic_discharge_delay_debouncing": 10.0,
}
}
scan_mode_test.parse_json(test_scan_mode_dict)
hexdump(scan_mode_test.generate_scan_plan(target).pack())
hexdump(scan_mode_test.generate_pin_mapping(target).pack())
pprint(vars(scan_mode_test))
print(("@"*80 + "\n")*3)
test_layout_device_dict = {
'id': 0,
'layout': 0,
'layout_offset': 0,
'scan_mode': test_scan_mode_dict,
}
layout_device = LayoutDevice()
layout_device.parse_json("test_device", json_obj=test_layout_device_dict)
pprint(vars(layout_device))
print(("*"*80 + "\n")*3)
rf_settings = LayoutRFSettings()
rf_settings.load_random()
pprint(vars(rf_settings))
rf_settings = LayoutRFSettings()
test_rf_settings = {
"aes_encryption_key": "9febeb27209e131ceaf812f73feed577",
"rf_channel": 0x08,
"auto_retransmit_count": 8, # options: 0-15
# TODO: should include retransmit delay option
"data_rate": "2mbps", # options: 2mbps, 1mbps, 250kbps
"transmit_power": "0dB", # options: 0dB, -6dB, -12dB, -18dB
"pipe0": '2aef63473c',
"pipe1": '168d715956',
"pipe2": 'c1',
"pipe3": 'fc',
"pipe4": '63',
"pipe5": '00',
"pipe6": '00',
}
rf_settings.parse_json(test_rf_settings)
pprint(vars(rf_settings))
new_json = rf_settings.to_json()
print(rf_settings, new_json)
new_rf_settings = LayoutRFSettings()
new_rf_settings.parse_json(new_json)
newest_rf_settings = LayoutRFSettings()
newest_rf_settings.load_raw_data(kb.rf_info)
thingy = newest_rf_settings.to_json()
print("newest_rf_settings:", thingy)
newest_raw = newest_rf_settings.generate_rf_settings()
something = newest_raw
print(something.pack)
print(something.pack())
keycode_mapper = KeycodeMapper()
layout = LayoutKeyboard(
layout_id = "foo",
number_layers = 3,
device_sizes = [3, 5],
)
layout.set_keycode(
layer = 0,
device = 0,
key_number = 0,
keycode = "ca-up"
)
layout.set_keycode(
layer = 1,
device = 0,
key_number = 0,
keycode = "a"
)
layout.set_keycode(
layer = 1,
device = 0,
key_number = 1,
keycode = "b"
)
pprint(vars(layout))
for layer in layout.layer_list:
pprint(vars(layer))
for device in layer.device_list:
pprint(vars(device))
keycode_mapper = KeycodeMapper()
pprint(layout.to_json())
pprint(layout.to_keycodes())
new_layout = LayoutKeyboard('new')
new_layout.load_keycodes(layout.to_keycodes())
print("new_layout: ", end="")
pprint(new_layout.to_json())
print("new_layout: ", end="")
pprint(new_layout.to_keycodes())
print(kb.read_layout_data(0, 63))
print()
layout_data = kb.read_whole_layout()
print("Full layout data: ")
hexdump(layout_data)
unpacked_layout_data = kb.unpack_layout_data()
for (i, layout) in enumerate(unpacked_layout_data):
# pprint(vars(layout))
print("This is layout {}:".format(i))
print(layout.to_json())
print()
keyplus_layout2 = KeyplusLayout()
keyplus_layout2.from_yaml_file(
"../layouts/arbitrary_map_tester.yaml",
"../layouts/test_rf_config.yaml",
)
pprint(vars(keyplus_layout2))
keyplus_layout = KeyplusLayout()
with open("../layouts/basic_split_test.yaml") as f:
layout_json = yaml.load(f.read())
with open("./_ignore_rf_settings.yaml") as f:
rf_json = yaml.load(f.read())
keyplus_layout.parse_json(layout_json, rf_json)
# keyplus_layout.get_layout_by_id(2).set_keycode(0, 0, 3, 's-W')
raw_layout = keyplus_layout.build_layout_section(kb.get_device_target())
hexdump(raw_layout)
ihex = intelhex.IntelHex()
ihex.frombytes(raw_layout, 0x7800)
ihex.write_hex_file(
"test_layout_out.hex"
)
print(ihex)
raw_settings = keyplus_layout.build_settings_section(kb.get_device_target())
hexdump(raw_settings)
ihex = intelhex.IntelHex()
ihex.frombytes(raw_settings, 0x7600)
ihex.write_hex_file(
"temp_new.hex"
)
kb.update_settings_section(raw_settings, keep_rf=1)
kb.update_layout_section(raw_layout)
#[len(chunk_list)] kb.set_passthrough_mode(True)
kb.disconnect()
|
|
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import dihedral
import dihedral_fast
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 11
num_epochs_train = 50 * AV_SLICE_PER_PAT
# - learning rate and method
base_lr = .0001
learning_rate_schedule = {
0: base_lr,
num_epochs_train*9/10: base_lr/10,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
use_hough_roi = True # use roi to center patches
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(100,100)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 64
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 20 * AV_SLICE_PER_PAT # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# Architecture
def rms(x, axis=None, epsilon=1e-12):
return T.sqrt(T.mean(T.sqr(x), axis=axis) + epsilon)
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
def build_model(input_layer = None):
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:singleslice"]
if input_layer:
l0 = input_layer
else:
l0 = nn.layers.InputLayer(input_size)
l0c = dihedral.CyclicSliceLayer(l0)
l1a = nn.layers.dnn.Conv2DDNNLayer(l0c, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l1r = dihedral_fast.CyclicConvRollLayer(l1)
l2a = nn.layers.dnn.Conv2DDNNLayer(l1r, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l2r = dihedral_fast.CyclicConvRollLayer(l2)
l3a = nn.layers.dnn.Conv2DDNNLayer(l2r, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l3r = dihedral_fast.CyclicConvRollLayer(l3)
l4a = nn.layers.dnn.Conv2DDNNLayer(l3r, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l4r = dihedral_fast.CyclicConvRollLayer(l4)
l5a = nn.layers.dnn.Conv2DDNNLayer(l4r, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.very_leaky_rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
l5r = dihedral_fast.CyclicConvRollLayer(l5)
l5f = nn.layers.FlattenLayer(l5r)
l5m = dihedral.CyclicPoolLayer(l5f)
# l5drop = nn.layers.dropout(l5m, p=0.5)
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5m, num_units=256, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3mu = nn.layers.DenseLayer(ldsys2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(200.0), nonlinearity=None)
ldsys3sigma = nn.layers.DenseLayer(ldsys2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(100.0), nonlinearity=lb_softplus(3))
ldsys3musigma = nn.layers.ConcatLayer([ldsys3mu, ldsys3sigma], axis=1)
l_systole = layers.MuSigmaErfLayer(ldsys3musigma)
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5m, num_units=256, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3mu = nn.layers.DenseLayer(lddia2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(200.0), nonlinearity=None)
lddia3sigma = nn.layers.DenseLayer(lddia2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(100.0), nonlinearity=lb_softplus(3))
lddia3musigma = nn.layers.ConcatLayer([lddia3mu, lddia3sigma], axis=1)
l_diastole = layers.MuSigmaErfLayer(lddia3musigma)
return {
"inputs":{
"sliced:data:singleslice": l0
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
ldsys3mu: l2_weight_out,
ldsys3sigma: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
lddia3mu: l2_weight_out,
lddia3sigma: l2_weight_out,
},
"meta_outputs": {
"systole:mu": ldsys3mu,
"systole:sigma": ldsys3sigma,
"diastole:mu": lddia3mu,
"diastole:sigma": lddia3sigma,
}
}
|
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .util import *
from electroncash.i18n import _
from electroncash.plugins import run_hook
from electroncash.address import Address
from electroncash.bitcoin import COINBASE_MATURITY
from electroncash import cashacct
from collections import defaultdict
from functools import wraps
from enum import IntEnum
class UTXOList(MyTreeWidget):
class Col(IntEnum):
'''Column numbers. This is to make code in on_update easier to read.
If you modify these, make sure to modify the column header names in
the MyTreeWidget constructor.'''
address = 0
label = 1
amount = 2
height = 3
output_point = 4
class DataRoles(IntEnum):
'''Data roles. Again, to make code in on_update easier to read.'''
name = Qt.UserRole + 0
frozen_flags = Qt.UserRole + 1
address = Qt.UserRole + 2
cash_account = Qt.UserRole + 3 # this may not always be there for a particular item
slp_token = Qt.UserRole + 4 # this is either a tuple of (token_id, qty) or None
filter_columns = [Col.address, Col.label]
default_sort = MyTreeWidget.SortSpec(Col.amount, Qt.DescendingOrder) # sort by amount, descending
def __init__(self, parent=None):
columns = [ _('Address'), _('Label'), _('Amount'), _('Height'), _('Output point') ]
MyTreeWidget.__init__(self, parent, self.create_menu, columns,
stretch_column = UTXOList.Col.label,
deferred_updates = True, save_sort_settings = True)
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
self.wallet = self.parent.wallet
self.parent.ca_address_default_changed_signal.connect(self._ca_on_address_default_change)
self.parent.gui_object.cashaddr_toggled_signal.connect(self.update)
self.utxos = list()
# cache some values to avoid constructing Qt objects for every pass through self.on_update (this is important for large wallets)
self.monospaceFont = QFont(MONOSPACE_FONT)
self.lightBlue = QColor('lightblue') if not ColorScheme.dark_scheme else QColor('blue')
self.blue = ColorScheme.BLUE.as_color(True)
self.cyanBlue = QColor('#3399ff')
self.slpBG = ColorScheme.SLPGREEN.as_color(True)
self.immatureColor = ColorScheme.BLUE.as_color(False)
self.output_point_prefix_text = columns[self.Col.output_point]
self.cleaned_up = False
def clean_up(self):
self.cleaned_up = True
try: self.parent.ca_address_default_changed_signal.disconnect(self._ca_on_address_default_change)
except TypeError: pass
try: self.parent.gui_object.cashaddr_toggled_signal.disconnect(self.update)
except TypeError: pass
def if_not_dead(func):
'''Boilerplate: Check if cleaned up, and if so, don't execute method'''
@wraps(func)
def wrapper(self, *args, **kwargs):
if self.cleaned_up or not self.wallet or not self.parent:
return
else:
func(self, *args, **kwargs)
return wrapper
def get_name(self, x):
return x.get('prevout_hash') + ":%d"%x.get('prevout_n')
def get_name_short(self, x):
return x.get('prevout_hash')[:10] + '...' + ":%d"%x.get('prevout_n')
@rate_limited(1.0, ts_after=True) # performance tweak -- limit updates to no more than oncer per second
def update(self):
if self.cleaned_up:
# short-cut return if window was closed and wallet is stopped
return
super().update()
@if_not_dead
def on_update(self):
local_maturity_height = (self.wallet.get_local_height()+1) - COINBASE_MATURITY
prev_selection = self.get_selected() # cache previous selection, if any
self.clear()
ca_by_addr = defaultdict(list)
if self.show_cash_accounts:
addr_set = set()
self.utxos = self.wallet.get_utxos(addr_set_out=addr_set, exclude_slp=False)
# grab all cash accounts so that we may add the emoji char
for info in self.wallet.cashacct.get_cashaccounts(addr_set):
ca_by_addr[info.address].append(info)
del info
for ca_list in ca_by_addr.values():
ca_list.sort(key=lambda info: ((info.number or 0), str(info.collision_hash))) # sort the ca_lists by number, required by cashacct.get_address_default
del ca_list # reference still exists inside ca_by_addr dict, this is just deleted here because we re-use this name below.
del addr_set # clean-up. We don't want the below code to ever depend on the existence of this cell.
else:
self.utxos = self.wallet.get_utxos(exclude_slp=False)
for x in self.utxos:
address = x['address']
address_text = address.to_ui_string()
ca_info = None
ca_list = ca_by_addr.get(address)
tool_tip0 = None
if ca_list:
ca_info = self.wallet.cashacct.get_address_default(ca_list)
address_text = f'{ca_info.emoji} {address_text}' # prepend the address emoji char
tool_tip0 = self.wallet.cashacct.fmt_info(ca_info, emoji=True)
height = x['height']
is_immature = x['coinbase'] and height > local_maturity_height
name = self.get_name(x)
name_short = self.get_name_short(x)
label = self.wallet.get_label(x['prevout_hash'])
amount = self.parent.format_amount(x['value'], is_diff=False, whitespaces=True)
utxo_item = SortableTreeWidgetItem([address_text, label, amount,
str(height), name_short])
if label:
utxo_item.setToolTip(1, label) # just in case it doesn't fit horizontally, we also provide it as a tool tip where hopefully it won't be elided
if tool_tip0:
utxo_item.setToolTip(0, tool_tip0)
utxo_item.setToolTip(4, name) # just in case they like to see lots of hex digits :)
utxo_item.DataRole = Qt.UserRole+100 # set this here to avoid sorting based on Qt.UserRole+1
utxo_item.setFont(0, self.monospaceFont)
utxo_item.setFont(2, self.monospaceFont)
utxo_item.setFont(4, self.monospaceFont)
utxo_item.setData(0, self.DataRoles.name, name)
a_frozen = self.wallet.is_frozen(address)
c_frozen = x['is_frozen_coin']
toolTipMisc = ''
slp_token = x['slp_token']
if is_immature:
for colNum in range(self.columnCount()):
if colNum == self.Col.label:
continue # don't color the label column
utxo_item.setForeground(colNum, self.immatureColor)
toolTipMisc = _('Coin is not yet mature')
elif slp_token:
utxo_item.setBackground(0, self.slpBG)
toolTipMisc = _('Coin contains an SLP token')
elif a_frozen and not c_frozen:
# address is frozen, coin is not frozen
# emulate the "Look" off the address_list .py's frozen entry
utxo_item.setBackground(0, self.lightBlue)
toolTipMisc = _("Address is frozen")
elif c_frozen and not a_frozen:
# coin is frozen, address is not frozen
utxo_item.setBackground(0, self.blue)
toolTipMisc = _("Coin is frozen")
elif c_frozen and a_frozen:
# both coin and address are frozen so color-code it to indicate that.
utxo_item.setBackground(0, self.lightBlue)
utxo_item.setForeground(0, self.cyanBlue)
toolTipMisc = _("Coin & Address are frozen")
# save the address-level-frozen and coin-level-frozen flags to the data item for retrieval later in create_menu() below.
utxo_item.setData(0, self.DataRoles.frozen_flags, "{}{}{}{}".format(("a" if a_frozen else ""), ("c" if c_frozen else ""), ("s" if slp_token else ""), ("i" if is_immature else "")))
# store the address
utxo_item.setData(0, self.DataRoles.address, address)
# store the ca_info for this address -- if any
if ca_info:
utxo_item.setData(0, self.DataRoles.cash_account, ca_info)
# store the slp_token
utxo_item.setData(0, self.DataRoles.slp_token, slp_token)
if toolTipMisc:
utxo_item.setToolTip(0, toolTipMisc)
run_hook("utxo_list_item_setup", self, utxo_item, x, name)
self.addChild(utxo_item)
if name in prev_selection:
# NB: This needs to be here after the item is added to the widget. See #979.
utxo_item.setSelected(True) # restore previous selection
self._update_utxo_count_display(len(self.utxos))
def _update_utxo_count_display(self, num_utxos: int):
headerItem = self.headerItem()
if headerItem:
if num_utxos:
output_point_text = self.output_point_prefix_text + f" ({num_utxos})"
else:
output_point_text = self.output_point_prefix_text
headerItem.setText(self.Col.output_point, output_point_text)
def get_selected(self):
return { x.data(0, self.DataRoles.name) : x.data(0, self.DataRoles.frozen_flags) # dict of "name" -> frozen flags string (eg: "ac")
for x in self.selectedItems() }
@if_not_dead
def create_menu(self, position):
menu = QMenu()
selected = self.get_selected()
def create_menu_inner():
if not selected:
return
coins = filter(lambda x: self.get_name(x) in selected, self.utxos)
if not coins:
return
spendable_coins = list(filter(lambda x: not selected.get(self.get_name(x), ''), coins))
# Unconditionally add the "Spend" option but leave it disabled if there are no spendable_coins
spend_action = menu.addAction(_("Spend"), lambda: self.parent.spend_coins(spendable_coins))
spend_action.setEnabled(bool(spendable_coins))
if len(selected) == 1:
# "Copy ..."
item = self.itemAt(position)
if not item:
return
col = self.currentColumn()
column_title = self.headerItem().text(col)
alt_column_title, alt_copy_text = None, None
slp_token = item.data(0, self.DataRoles.slp_token)
ca_info = None
if col == self.Col.output_point:
copy_text = item.data(0, self.DataRoles.name)
elif col == self.Col.address:
addr = item.data(0, self.DataRoles.address)
# Determine the "alt copy text" "Legacy Address" or "Cash Address"
copy_text = addr.to_full_ui_string()
if Address.FMT_UI == Address.FMT_LEGACY:
alt_copy_text, alt_column_title = addr.to_full_string(Address.FMT_CASHADDR), _('Cash Address')
else:
alt_copy_text, alt_column_title = addr.to_full_string(Address.FMT_LEGACY), _('Legacy Address')
ca_info = item.data(0, self.DataRoles.cash_account) # may be None
del addr
else:
copy_text = item.text(col)
if copy_text:
copy_text = copy_text.strip() # make sure formatted amount is not whitespaced
menu.addAction(_("Copy {}").format(column_title), lambda: QApplication.instance().clipboard().setText(copy_text))
if alt_copy_text and alt_column_title:
menu.addAction(_("Copy {}").format(alt_column_title), lambda: QApplication.instance().clipboard().setText(alt_copy_text))
if ca_info:
self.wallet.cashacct.fmt_info(ca_info) # paranoia: pre-cache minimal chash (may go out to network)
menu.addAction(_("Copy Cash Account"), lambda: self.wallet and QApplication.instance().clipboard().setText(self.wallet.cashacct.fmt_info(ca_info, emoji=True)))
# single selection, offer them the "Details" option and also coin/address "freeze" status, if any
txid = list(selected.keys())[0].split(':')[0]
frozen_flags = list(selected.values())[0]
tx = self.wallet.transactions.get(txid)
if tx:
label = self.wallet.get_label(txid) or None
menu.addAction(_("Details"), lambda: self.parent.show_transaction(tx, label))
act = None
needsep = True
if 'c' in frozen_flags:
menu.addSeparator()
menu.addAction(_("Coin is frozen"), lambda: None).setEnabled(False)
menu.addAction(_("Unfreeze Coin"), lambda: self.set_frozen_coins(list(selected.keys()), False))
menu.addSeparator()
needsep = False
else:
menu.addAction(_("Freeze Coin"), lambda: self.set_frozen_coins(list(selected.keys()), True))
if 'a' in frozen_flags:
if needsep: menu.addSeparator()
menu.addAction(_("Address is frozen"), lambda: None).setEnabled(False)
menu.addAction(_("Unfreeze Address"), lambda: self.set_frozen_addresses_for_coins(list(selected.keys()), False))
else:
menu.addAction(_("Freeze Address"), lambda: self.set_frozen_addresses_for_coins(list(selected.keys()), True))
if not spend_action.isEnabled():
if slp_token:
spend_action.setText(_("SLP Token: Spend Locked"))
elif 'i' in frozen_flags: # immature coinbase
spend_action.setText(_("Immature Coinbase: Spend Locked"))
else:
# multi-selection
menu.addSeparator()
if any(['c' not in flags for flags in selected.values()]):
# they have some coin-level non-frozen in the selection, so add the menu action "Freeze coins"
menu.addAction(_("Freeze Coins"), lambda: self.set_frozen_coins(list(selected.keys()), True))
if any(['c' in flags for flags in selected.values()]):
# they have some coin-level frozen in the selection, so add the menu action "Unfreeze coins"
menu.addAction(_("Unfreeze Coins"), lambda: self.set_frozen_coins(list(selected.keys()), False))
if any(['a' not in flags for flags in selected.values()]):
# they have some address-level non-frozen in the selection, so add the menu action "Freeze addresses"
menu.addAction(_("Freeze Addresses"), lambda: self.set_frozen_addresses_for_coins(list(selected.keys()), True))
if any(['a' in flags for flags in selected.values()]):
# they have some address-level frozen in the selection, so add the menu action "Unfreeze addresses"
menu.addAction(_("Unfreeze Addresses"), lambda: self.set_frozen_addresses_for_coins(list(selected.keys()), False))
create_menu_inner()
run_hook('utxo_list_context_menu_setup', self, menu, selected)
# add optional toggle actions
menu.addSeparator()
def toggle():
self.show_cash_accounts = not self.show_cash_accounts
a = menu.addAction(_("Show Cash Accounts"), toggle)
a.setCheckable(True)
a.setChecked(self.show_cash_accounts)
menu.exec_(self.viewport().mapToGlobal(position))
def on_permit_edit(self, item, column):
# disable editing fields in this tab (labels)
return False
@if_not_dead
def set_frozen_coins(self, coins, b):
self.parent.set_frozen_coin_state(coins, b)
@if_not_dead
def set_frozen_addresses_for_coins(self, coins, b):
addrs = set()
for utxo in self.utxos:
name = self.get_name(utxo)
if name in coins:
addrs.add(utxo['address'])
if addrs:
self.parent.set_frozen_state(list(addrs), b)
@if_not_dead
def update_labels(self):
if self.should_defer_update_incr():
return
root = self.invisibleRootItem()
child_count = root.childCount()
for i in range(child_count):
item = root.child(i)
try:
txid = item.data(0, self.DataRoles.name).split(':', 1)[0]
except IndexError:
continue # name is iinvalid. should be txid:prevout_n
label = self.wallet.get_label(txid)
item.setText(1, label)
def _ca_on_address_default_change(self, info):
if self.show_cash_accounts:
self.update()
@property
def show_cash_accounts(self):
return bool(self.wallet.storage.get('utxo_list_show_cash_accounts', False))
@show_cash_accounts.setter
def show_cash_accounts(self, b):
b = bool(b)
was = self.show_cash_accounts
if was != b:
self.wallet.storage.put('utxo_list_show_cash_accounts', b)
self.update()
|
|
#!/usr/bin/env python3
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2018
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
'''
OPAL sysfs Tests
----------------
This pokes a few bits of functionality accessible through sysfs that OPAL
provides.
It includes:
- System powercaps, i.e. `/sys/firmware/opal/powercap/`
- Power Shift Ratio, i.e. `/sys/firmware/opal/psr`
- Sensor Groups, i.e. `/sys/firmware/opal/sensor_groups/`
- The OPAL symbol map, i.e. `/sys/firmware/opal/symbol_map`
- Exporting of random bits of memory, i.e. `/sys/firmware/opal/exports/`
'''
import time
import random
import unittest
import OpTestConfiguration
from common.OpTestSystem import OpSystemState
import logging
import OpTestLogger
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
POWERCAP_CURRENT = "/sys/firmware/opal/powercap/system-powercap/powercap-current"
POWERCAP_MAX = "/sys/firmware/opal/powercap/system-powercap/powercap-max"
POWERCAP_MIN = "/sys/firmware/opal/powercap/system-powercap/powercap-min"
OPAL_PSR = "/sys/firmware/opal/psr"
OPAL_SENSOR_GROUPS = "/sys/firmware/opal/sensor_groups/"
OPAL_SYMBOL_MAP = "/sys/firmware/opal/symbol_map"
OPAL_EXPORTS = "/sys/firmware/opal/exports/"
class OpalSysfsTests():
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
self.bmc_type = conf.args.bmc_type
def get_proc_gen(self):
try:
if self.cpu:
pass
except AttributeError:
cmd = "grep '^cpu' /proc/cpuinfo |uniq|sed -e 's/^.*: //;s/[,]* .*//;'"
self.cpu = ''.join(self.c.run_command(cmd))
return self.cpu
def set_psr_value(self, entry, psr_val):
self.c.run_command("echo %s > %s/%s" % (psr_val, str(OPAL_PSR), entry))
for i in range(21):
value = self.c.run_command("cat %s/%s" % (str(OPAL_PSR), entry))
if int(value[-1]) == int(psr_val):
break
time.sleep(1)
self.assertTrue((int(value[-1]) == int(psr_val)),
"OPAL failed to set psr value")
def get_power_cap(self):
return int(self.c.run_command("cat %s" % str(POWERCAP_CURRENT))[-1])
def set_power_cap(self, value):
valid_powercap_values = [self.get_power_cap(), value]
self.c.run_command("echo %s > %s" % (value, str(POWERCAP_CURRENT)))
for i in range(21):
cur_powercap = self.get_power_cap()
self.assertIn(cur_powercap, valid_powercap_values,
"Retrieved powercap was not either the previous one "
"({}) or the one we're trying to set ({}). Got {}, "
"expected in {}".format(
valid_powercap_values[0],
value,
cur_powercap,
repr(valid_powercap_values)))
if int(cur_powercap) == int(value):
break
time.sleep(2)
self.assertEqual(int(cur_powercap), int(value),
"OPAL failed to set power cap value. "
"Got {} when trying to set {}.".format(
cur_powercap, value))
def test_opal_powercap(self):
self.setup_test()
self.get_proc_gen()
if self.cpu not in ["POWER9", "POWER9P"]:
return
if "qemu" in self.bmc_type:
self.skipTest("Qemu doesn't support OCC-based tests")
cur_powercap = int(self.c.run_command("cat %s" %
str(POWERCAP_CURRENT))[-1])
max_powercap = int(self.c.run_command(
"cat %s" % str(POWERCAP_MAX))[-1])
min_powercap = int(self.c.run_command(
"cat %s" % str(POWERCAP_MIN))[-1])
log.debug("Powercap cur:{} max:{} min:{}".format(
cur_powercap, max_powercap, min_powercap))
self.set_power_cap(max_powercap)
self.set_power_cap(min_powercap)
self.set_power_cap(cur_powercap)
self.set_power_cap(max_powercap)
for i in range(3):
value = random.randint(min_powercap, max_powercap)
self.set_power_cap(value)
# Set back to cur_powercap
self.set_power_cap(cur_powercap)
def test_opal_psr(self):
self.setup_test()
self.get_proc_gen()
if self.cpu not in ["POWER9", "POWER9P"]:
return
if "qemu" in self.bmc_type:
self.skipTest("Qemu doesn't support OCC-based tests")
list = self.c.run_command("ls --color=never -1 %s" % str(OPAL_PSR))
for entry in list:
value = self.c.run_command("cat %s/%s" % (str(OPAL_PSR), entry))
self.assertTrue(
(0 <= int(value[-1]) <= 100), "Out-of-range psr value")
self.set_psr_value(entry, 50)
self.set_psr_value(entry, 25)
self.set_psr_value(entry, 100)
def test_opal_sensor_groups(self):
self.setup_test()
self.get_proc_gen()
log.debug(repr(self.cpu))
if self.cpu not in ["POWER9", "POWER9P"]:
return
if "qemu" in self.bmc_type:
self.skipTest("Qemu doesn't support OCC-based tests")
list = self.c.run_command(
"ls --color=never -1 %s" % str(OPAL_SENSOR_GROUPS))
for entry in list:
self.c.run_command("ls --color=never /%s/%s/clear" %
(OPAL_SENSOR_GROUPS, entry))
if self.test == "skiroot":
self.c.run_command("echo 1 > /%s/%s/clear" %
(OPAL_SENSOR_GROUPS, entry))
continue
# clearing min/max for hwmon sensors
self.c.run_command("sensors")
self.c.run_command("ppc64_cpu --frequency")
self.c.run_command("sensors")
self.c.run_command("echo 1 > /%s/%s/clear" %
(OPAL_SENSOR_GROUPS, entry))
self.c.run_command("ppc64_cpu --frequency")
self.c.run_command("sensors")
self.c.run_command("echo 1 > /%s/%s/clear" %
(OPAL_SENSOR_GROUPS, entry))
def test_opal_symbol_map(self):
self.setup_test()
self.c.run_command("ls --color=never -1 %s" % str(OPAL_SYMBOL_MAP))
# It may fail due to timeout
self.c.run_command("grep opal_ %s" % str(OPAL_SYMBOL_MAP), 120)
def test_opal_exports(self):
self.setup_test()
# Not all kernel's won't create exports sysfs
self.c.run_command_ignore_fail(
"ls --color=never -1 %s" % str(OPAL_EXPORTS))
class Skiroot(OpalSysfsTests, unittest.TestCase):
def setup_test(self):
self.test = 'skiroot'
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
class Host(OpalSysfsTests, unittest.TestCase):
def setup_test(self):
self.test = 'host'
self.cv_SYSTEM.goto_state(OpSystemState.OS)
self.c = self.cv_SYSTEM.cv_HOST.get_ssh_connection()
|
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_common_admin_state_pac import TapiCommonAdminStatePac # noqa: F401,E501
from tapi_server.models.tapi_common_administrative_state import TapiCommonAdministrativeState # noqa: F401,E501
from tapi_server.models.tapi_common_capacity import TapiCommonCapacity # noqa: F401,E501
from tapi_server.models.tapi_common_capacity_pac import TapiCommonCapacityPac # noqa: F401,E501
from tapi_server.models.tapi_common_global_class import TapiCommonGlobalClass # noqa: F401,E501
from tapi_server.models.tapi_common_layer_protocol_name import TapiCommonLayerProtocolName # noqa: F401,E501
from tapi_server.models.tapi_common_lifecycle_state import TapiCommonLifecycleState # noqa: F401,E501
from tapi_server.models.tapi_common_name_and_value import TapiCommonNameAndValue # noqa: F401,E501
from tapi_server.models.tapi_common_operational_state import TapiCommonOperationalState # noqa: F401,E501
from tapi_server.models.tapi_topology_cost_characteristic import TapiTopologyCostCharacteristic # noqa: F401,E501
from tapi_server.models.tapi_topology_latency_characteristic import TapiTopologyLatencyCharacteristic # noqa: F401,E501
from tapi_server.models.tapi_topology_node_edge_point_ref import TapiTopologyNodeEdgePointRef # noqa: F401,E501
from tapi_server.models.tapi_topology_node_owned_node_edge_point import TapiTopologyNodeOwnedNodeEdgePoint # noqa: F401,E501
from tapi_server.models.tapi_topology_node_rule_group import TapiTopologyNodeRuleGroup # noqa: F401,E501
from tapi_server.models.tapi_topology_topology_ref import TapiTopologyTopologyRef # noqa: F401,E501
from tapi_server.models.tapi_topology_transfer_cost_pac import TapiTopologyTransferCostPac # noqa: F401,E501
from tapi_server.models.tapi_topology_transfer_integrity_pac import TapiTopologyTransferIntegrityPac # noqa: F401,E501
from tapi_server.models.tapi_topology_transfer_timing_pac import TapiTopologyTransferTimingPac # noqa: F401,E501
from tapi_server import util
class TapiTopologyTopologyNode(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, operational_state=None, lifecycle_state=None, administrative_state=None, available_capacity=None, total_potential_capacity=None, name=None, uuid=None, cost_characteristic=None, error_characteristic=None, unavailable_time_characteristic=None, server_integrity_process_characteristic=None, delivery_order_characteristic=None, repeat_delivery_characteristic=None, loss_characteristic=None, latency_characteristic=None, layer_protocol_name=None, encap_topology=None, owned_node_edge_point=None, node_rule_group=None, aggregated_node_edge_point=None): # noqa: E501
"""TapiTopologyTopologyNode - a model defined in OpenAPI
:param operational_state: The operational_state of this TapiTopologyTopologyNode. # noqa: E501
:type operational_state: TapiCommonOperationalState
:param lifecycle_state: The lifecycle_state of this TapiTopologyTopologyNode. # noqa: E501
:type lifecycle_state: TapiCommonLifecycleState
:param administrative_state: The administrative_state of this TapiTopologyTopologyNode. # noqa: E501
:type administrative_state: TapiCommonAdministrativeState
:param available_capacity: The available_capacity of this TapiTopologyTopologyNode. # noqa: E501
:type available_capacity: TapiCommonCapacity
:param total_potential_capacity: The total_potential_capacity of this TapiTopologyTopologyNode. # noqa: E501
:type total_potential_capacity: TapiCommonCapacity
:param name: The name of this TapiTopologyTopologyNode. # noqa: E501
:type name: List[TapiCommonNameAndValue]
:param uuid: The uuid of this TapiTopologyTopologyNode. # noqa: E501
:type uuid: str
:param cost_characteristic: The cost_characteristic of this TapiTopologyTopologyNode. # noqa: E501
:type cost_characteristic: List[TapiTopologyCostCharacteristic]
:param error_characteristic: The error_characteristic of this TapiTopologyTopologyNode. # noqa: E501
:type error_characteristic: str
:param unavailable_time_characteristic: The unavailable_time_characteristic of this TapiTopologyTopologyNode. # noqa: E501
:type unavailable_time_characteristic: str
:param server_integrity_process_characteristic: The server_integrity_process_characteristic of this TapiTopologyTopologyNode. # noqa: E501
:type server_integrity_process_characteristic: str
:param delivery_order_characteristic: The delivery_order_characteristic of this TapiTopologyTopologyNode. # noqa: E501
:type delivery_order_characteristic: str
:param repeat_delivery_characteristic: The repeat_delivery_characteristic of this TapiTopologyTopologyNode. # noqa: E501
:type repeat_delivery_characteristic: str
:param loss_characteristic: The loss_characteristic of this TapiTopologyTopologyNode. # noqa: E501
:type loss_characteristic: str
:param latency_characteristic: The latency_characteristic of this TapiTopologyTopologyNode. # noqa: E501
:type latency_characteristic: List[TapiTopologyLatencyCharacteristic]
:param layer_protocol_name: The layer_protocol_name of this TapiTopologyTopologyNode. # noqa: E501
:type layer_protocol_name: List[TapiCommonLayerProtocolName]
:param encap_topology: The encap_topology of this TapiTopologyTopologyNode. # noqa: E501
:type encap_topology: TapiTopologyTopologyRef
:param owned_node_edge_point: The owned_node_edge_point of this TapiTopologyTopologyNode. # noqa: E501
:type owned_node_edge_point: List[TapiTopologyNodeOwnedNodeEdgePoint]
:param node_rule_group: The node_rule_group of this TapiTopologyTopologyNode. # noqa: E501
:type node_rule_group: List[TapiTopologyNodeRuleGroup]
:param aggregated_node_edge_point: The aggregated_node_edge_point of this TapiTopologyTopologyNode. # noqa: E501
:type aggregated_node_edge_point: List[TapiTopologyNodeEdgePointRef]
"""
self.openapi_types = {
'operational_state': TapiCommonOperationalState,
'lifecycle_state': TapiCommonLifecycleState,
'administrative_state': TapiCommonAdministrativeState,
'available_capacity': TapiCommonCapacity,
'total_potential_capacity': TapiCommonCapacity,
'name': List[TapiCommonNameAndValue],
'uuid': str,
'cost_characteristic': List[TapiTopologyCostCharacteristic],
'error_characteristic': str,
'unavailable_time_characteristic': str,
'server_integrity_process_characteristic': str,
'delivery_order_characteristic': str,
'repeat_delivery_characteristic': str,
'loss_characteristic': str,
'latency_characteristic': List[TapiTopologyLatencyCharacteristic],
'layer_protocol_name': List[TapiCommonLayerProtocolName],
'encap_topology': TapiTopologyTopologyRef,
'owned_node_edge_point': List[TapiTopologyNodeOwnedNodeEdgePoint],
'node_rule_group': List[TapiTopologyNodeRuleGroup],
'aggregated_node_edge_point': List[TapiTopologyNodeEdgePointRef]
}
self.attribute_map = {
'operational_state': 'operational-state',
'lifecycle_state': 'lifecycle-state',
'administrative_state': 'administrative-state',
'available_capacity': 'available-capacity',
'total_potential_capacity': 'total-potential-capacity',
'name': 'name',
'uuid': 'uuid',
'cost_characteristic': 'cost-characteristic',
'error_characteristic': 'error-characteristic',
'unavailable_time_characteristic': 'unavailable-time-characteristic',
'server_integrity_process_characteristic': 'server-integrity-process-characteristic',
'delivery_order_characteristic': 'delivery-order-characteristic',
'repeat_delivery_characteristic': 'repeat-delivery-characteristic',
'loss_characteristic': 'loss-characteristic',
'latency_characteristic': 'latency-characteristic',
'layer_protocol_name': 'layer-protocol-name',
'encap_topology': 'encap-topology',
'owned_node_edge_point': 'owned-node-edge-point',
'node_rule_group': 'node-rule-group',
'aggregated_node_edge_point': 'aggregated-node-edge-point'
}
self._operational_state = operational_state
self._lifecycle_state = lifecycle_state
self._administrative_state = administrative_state
self._available_capacity = available_capacity
self._total_potential_capacity = total_potential_capacity
self._name = name
self._uuid = uuid
self._cost_characteristic = cost_characteristic
self._error_characteristic = error_characteristic
self._unavailable_time_characteristic = unavailable_time_characteristic
self._server_integrity_process_characteristic = server_integrity_process_characteristic
self._delivery_order_characteristic = delivery_order_characteristic
self._repeat_delivery_characteristic = repeat_delivery_characteristic
self._loss_characteristic = loss_characteristic
self._latency_characteristic = latency_characteristic
self._layer_protocol_name = layer_protocol_name
self._encap_topology = encap_topology
self._owned_node_edge_point = owned_node_edge_point
self._node_rule_group = node_rule_group
self._aggregated_node_edge_point = aggregated_node_edge_point
@classmethod
def from_dict(cls, dikt) -> 'TapiTopologyTopologyNode':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.topology.topology.Node of this TapiTopologyTopologyNode. # noqa: E501
:rtype: TapiTopologyTopologyNode
"""
return util.deserialize_model(dikt, cls)
@property
def operational_state(self):
"""Gets the operational_state of this TapiTopologyTopologyNode.
:return: The operational_state of this TapiTopologyTopologyNode.
:rtype: TapiCommonOperationalState
"""
return self._operational_state
@operational_state.setter
def operational_state(self, operational_state):
"""Sets the operational_state of this TapiTopologyTopologyNode.
:param operational_state: The operational_state of this TapiTopologyTopologyNode.
:type operational_state: TapiCommonOperationalState
"""
self._operational_state = operational_state
@property
def lifecycle_state(self):
"""Gets the lifecycle_state of this TapiTopologyTopologyNode.
:return: The lifecycle_state of this TapiTopologyTopologyNode.
:rtype: TapiCommonLifecycleState
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""Sets the lifecycle_state of this TapiTopologyTopologyNode.
:param lifecycle_state: The lifecycle_state of this TapiTopologyTopologyNode.
:type lifecycle_state: TapiCommonLifecycleState
"""
self._lifecycle_state = lifecycle_state
@property
def administrative_state(self):
"""Gets the administrative_state of this TapiTopologyTopologyNode.
:return: The administrative_state of this TapiTopologyTopologyNode.
:rtype: TapiCommonAdministrativeState
"""
return self._administrative_state
@administrative_state.setter
def administrative_state(self, administrative_state):
"""Sets the administrative_state of this TapiTopologyTopologyNode.
:param administrative_state: The administrative_state of this TapiTopologyTopologyNode.
:type administrative_state: TapiCommonAdministrativeState
"""
self._administrative_state = administrative_state
@property
def available_capacity(self):
"""Gets the available_capacity of this TapiTopologyTopologyNode.
:return: The available_capacity of this TapiTopologyTopologyNode.
:rtype: TapiCommonCapacity
"""
return self._available_capacity
@available_capacity.setter
def available_capacity(self, available_capacity):
"""Sets the available_capacity of this TapiTopologyTopologyNode.
:param available_capacity: The available_capacity of this TapiTopologyTopologyNode.
:type available_capacity: TapiCommonCapacity
"""
self._available_capacity = available_capacity
@property
def total_potential_capacity(self):
"""Gets the total_potential_capacity of this TapiTopologyTopologyNode.
:return: The total_potential_capacity of this TapiTopologyTopologyNode.
:rtype: TapiCommonCapacity
"""
return self._total_potential_capacity
@total_potential_capacity.setter
def total_potential_capacity(self, total_potential_capacity):
"""Sets the total_potential_capacity of this TapiTopologyTopologyNode.
:param total_potential_capacity: The total_potential_capacity of this TapiTopologyTopologyNode.
:type total_potential_capacity: TapiCommonCapacity
"""
self._total_potential_capacity = total_potential_capacity
@property
def name(self):
"""Gets the name of this TapiTopologyTopologyNode.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:return: The name of this TapiTopologyTopologyNode.
:rtype: List[TapiCommonNameAndValue]
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TapiTopologyTopologyNode.
List of names. A property of an entity with a value that is unique in some namespace but may change during the life of the entity. A name carries no semantics with respect to the purpose of the entity. # noqa: E501
:param name: The name of this TapiTopologyTopologyNode.
:type name: List[TapiCommonNameAndValue]
"""
self._name = name
@property
def uuid(self):
"""Gets the uuid of this TapiTopologyTopologyNode.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:return: The uuid of this TapiTopologyTopologyNode.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this TapiTopologyTopologyNode.
UUID: An identifier that is universally unique within an identifier space, where the identifier space is itself globally unique, and immutable. An UUID carries no semantics with respect to the purpose or state of the entity. UUID here uses string representation as defined in RFC 4122. The canonical representation uses lowercase characters. Pattern: [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12} Example of a UUID in string representation: f81d4fae-7dec-11d0-a765-00a0c91e6bf6 # noqa: E501
:param uuid: The uuid of this TapiTopologyTopologyNode.
:type uuid: str
"""
self._uuid = uuid
@property
def cost_characteristic(self):
"""Gets the cost_characteristic of this TapiTopologyTopologyNode.
The list of costs where each cost relates to some aspect of the TopologicalEntity. # noqa: E501
:return: The cost_characteristic of this TapiTopologyTopologyNode.
:rtype: List[TapiTopologyCostCharacteristic]
"""
return self._cost_characteristic
@cost_characteristic.setter
def cost_characteristic(self, cost_characteristic):
"""Sets the cost_characteristic of this TapiTopologyTopologyNode.
The list of costs where each cost relates to some aspect of the TopologicalEntity. # noqa: E501
:param cost_characteristic: The cost_characteristic of this TapiTopologyTopologyNode.
:type cost_characteristic: List[TapiTopologyCostCharacteristic]
"""
self._cost_characteristic = cost_characteristic
@property
def error_characteristic(self):
"""Gets the error_characteristic of this TapiTopologyTopologyNode.
Describes the degree to which the signal propagated can be errored. Applies to TDM systems as the errored signal will be propagated and not packet as errored packets will be discarded. # noqa: E501
:return: The error_characteristic of this TapiTopologyTopologyNode.
:rtype: str
"""
return self._error_characteristic
@error_characteristic.setter
def error_characteristic(self, error_characteristic):
"""Sets the error_characteristic of this TapiTopologyTopologyNode.
Describes the degree to which the signal propagated can be errored. Applies to TDM systems as the errored signal will be propagated and not packet as errored packets will be discarded. # noqa: E501
:param error_characteristic: The error_characteristic of this TapiTopologyTopologyNode.
:type error_characteristic: str
"""
self._error_characteristic = error_characteristic
@property
def unavailable_time_characteristic(self):
"""Gets the unavailable_time_characteristic of this TapiTopologyTopologyNode.
Describes the duration for which there may be no valid signal propagated. # noqa: E501
:return: The unavailable_time_characteristic of this TapiTopologyTopologyNode.
:rtype: str
"""
return self._unavailable_time_characteristic
@unavailable_time_characteristic.setter
def unavailable_time_characteristic(self, unavailable_time_characteristic):
"""Sets the unavailable_time_characteristic of this TapiTopologyTopologyNode.
Describes the duration for which there may be no valid signal propagated. # noqa: E501
:param unavailable_time_characteristic: The unavailable_time_characteristic of this TapiTopologyTopologyNode.
:type unavailable_time_characteristic: str
"""
self._unavailable_time_characteristic = unavailable_time_characteristic
@property
def server_integrity_process_characteristic(self):
"""Gets the server_integrity_process_characteristic of this TapiTopologyTopologyNode.
Describes the effect of any server integrity enhancement process on the characteristics of the TopologicalEntity. # noqa: E501
:return: The server_integrity_process_characteristic of this TapiTopologyTopologyNode.
:rtype: str
"""
return self._server_integrity_process_characteristic
@server_integrity_process_characteristic.setter
def server_integrity_process_characteristic(self, server_integrity_process_characteristic):
"""Sets the server_integrity_process_characteristic of this TapiTopologyTopologyNode.
Describes the effect of any server integrity enhancement process on the characteristics of the TopologicalEntity. # noqa: E501
:param server_integrity_process_characteristic: The server_integrity_process_characteristic of this TapiTopologyTopologyNode.
:type server_integrity_process_characteristic: str
"""
self._server_integrity_process_characteristic = server_integrity_process_characteristic
@property
def delivery_order_characteristic(self):
"""Gets the delivery_order_characteristic of this TapiTopologyTopologyNode.
Describes the degree to which packets will be delivered out of sequence. Does not apply to TDM as the TDM protocols maintain strict order. # noqa: E501
:return: The delivery_order_characteristic of this TapiTopologyTopologyNode.
:rtype: str
"""
return self._delivery_order_characteristic
@delivery_order_characteristic.setter
def delivery_order_characteristic(self, delivery_order_characteristic):
"""Sets the delivery_order_characteristic of this TapiTopologyTopologyNode.
Describes the degree to which packets will be delivered out of sequence. Does not apply to TDM as the TDM protocols maintain strict order. # noqa: E501
:param delivery_order_characteristic: The delivery_order_characteristic of this TapiTopologyTopologyNode.
:type delivery_order_characteristic: str
"""
self._delivery_order_characteristic = delivery_order_characteristic
@property
def repeat_delivery_characteristic(self):
"""Gets the repeat_delivery_characteristic of this TapiTopologyTopologyNode.
Primarily applies to packet systems where a packet may be delivered more than once (in fault recovery for example). It can also apply to TDM where several frames may be received twice due to switching in a system with a large differential propagation delay. # noqa: E501
:return: The repeat_delivery_characteristic of this TapiTopologyTopologyNode.
:rtype: str
"""
return self._repeat_delivery_characteristic
@repeat_delivery_characteristic.setter
def repeat_delivery_characteristic(self, repeat_delivery_characteristic):
"""Sets the repeat_delivery_characteristic of this TapiTopologyTopologyNode.
Primarily applies to packet systems where a packet may be delivered more than once (in fault recovery for example). It can also apply to TDM where several frames may be received twice due to switching in a system with a large differential propagation delay. # noqa: E501
:param repeat_delivery_characteristic: The repeat_delivery_characteristic of this TapiTopologyTopologyNode.
:type repeat_delivery_characteristic: str
"""
self._repeat_delivery_characteristic = repeat_delivery_characteristic
@property
def loss_characteristic(self):
"""Gets the loss_characteristic of this TapiTopologyTopologyNode.
Describes the acceptable characteristic of lost packets where loss may result from discard due to errors or overflow. Applies to packet systems and not TDM (as for TDM errored signals are propagated unless grossly errored and overflow/underflow turns into timing slips). # noqa: E501
:return: The loss_characteristic of this TapiTopologyTopologyNode.
:rtype: str
"""
return self._loss_characteristic
@loss_characteristic.setter
def loss_characteristic(self, loss_characteristic):
"""Sets the loss_characteristic of this TapiTopologyTopologyNode.
Describes the acceptable characteristic of lost packets where loss may result from discard due to errors or overflow. Applies to packet systems and not TDM (as for TDM errored signals are propagated unless grossly errored and overflow/underflow turns into timing slips). # noqa: E501
:param loss_characteristic: The loss_characteristic of this TapiTopologyTopologyNode.
:type loss_characteristic: str
"""
self._loss_characteristic = loss_characteristic
@property
def latency_characteristic(self):
"""Gets the latency_characteristic of this TapiTopologyTopologyNode.
The effect on the latency of a queuing process. This only has significant effect for packet based systems and has a complex characteristic. # noqa: E501
:return: The latency_characteristic of this TapiTopologyTopologyNode.
:rtype: List[TapiTopologyLatencyCharacteristic]
"""
return self._latency_characteristic
@latency_characteristic.setter
def latency_characteristic(self, latency_characteristic):
"""Sets the latency_characteristic of this TapiTopologyTopologyNode.
The effect on the latency of a queuing process. This only has significant effect for packet based systems and has a complex characteristic. # noqa: E501
:param latency_characteristic: The latency_characteristic of this TapiTopologyTopologyNode.
:type latency_characteristic: List[TapiTopologyLatencyCharacteristic]
"""
self._latency_characteristic = latency_characteristic
@property
def layer_protocol_name(self):
"""Gets the layer_protocol_name of this TapiTopologyTopologyNode.
none # noqa: E501
:return: The layer_protocol_name of this TapiTopologyTopologyNode.
:rtype: List[TapiCommonLayerProtocolName]
"""
return self._layer_protocol_name
@layer_protocol_name.setter
def layer_protocol_name(self, layer_protocol_name):
"""Sets the layer_protocol_name of this TapiTopologyTopologyNode.
none # noqa: E501
:param layer_protocol_name: The layer_protocol_name of this TapiTopologyTopologyNode.
:type layer_protocol_name: List[TapiCommonLayerProtocolName]
"""
self._layer_protocol_name = layer_protocol_name
@property
def encap_topology(self):
"""Gets the encap_topology of this TapiTopologyTopologyNode.
:return: The encap_topology of this TapiTopologyTopologyNode.
:rtype: TapiTopologyTopologyRef
"""
return self._encap_topology
@encap_topology.setter
def encap_topology(self, encap_topology):
"""Sets the encap_topology of this TapiTopologyTopologyNode.
:param encap_topology: The encap_topology of this TapiTopologyTopologyNode.
:type encap_topology: TapiTopologyTopologyRef
"""
self._encap_topology = encap_topology
@property
def owned_node_edge_point(self):
"""Gets the owned_node_edge_point of this TapiTopologyTopologyNode.
none # noqa: E501
:return: The owned_node_edge_point of this TapiTopologyTopologyNode.
:rtype: List[TapiTopologyNodeOwnedNodeEdgePoint]
"""
return self._owned_node_edge_point
@owned_node_edge_point.setter
def owned_node_edge_point(self, owned_node_edge_point):
"""Sets the owned_node_edge_point of this TapiTopologyTopologyNode.
none # noqa: E501
:param owned_node_edge_point: The owned_node_edge_point of this TapiTopologyTopologyNode.
:type owned_node_edge_point: List[TapiTopologyNodeOwnedNodeEdgePoint]
"""
self._owned_node_edge_point = owned_node_edge_point
@property
def node_rule_group(self):
"""Gets the node_rule_group of this TapiTopologyTopologyNode.
none # noqa: E501
:return: The node_rule_group of this TapiTopologyTopologyNode.
:rtype: List[TapiTopologyNodeRuleGroup]
"""
return self._node_rule_group
@node_rule_group.setter
def node_rule_group(self, node_rule_group):
"""Sets the node_rule_group of this TapiTopologyTopologyNode.
none # noqa: E501
:param node_rule_group: The node_rule_group of this TapiTopologyTopologyNode.
:type node_rule_group: List[TapiTopologyNodeRuleGroup]
"""
self._node_rule_group = node_rule_group
@property
def aggregated_node_edge_point(self):
"""Gets the aggregated_node_edge_point of this TapiTopologyTopologyNode.
none # noqa: E501
:return: The aggregated_node_edge_point of this TapiTopologyTopologyNode.
:rtype: List[TapiTopologyNodeEdgePointRef]
"""
return self._aggregated_node_edge_point
@aggregated_node_edge_point.setter
def aggregated_node_edge_point(self, aggregated_node_edge_point):
"""Sets the aggregated_node_edge_point of this TapiTopologyTopologyNode.
none # noqa: E501
:param aggregated_node_edge_point: The aggregated_node_edge_point of this TapiTopologyTopologyNode.
:type aggregated_node_edge_point: List[TapiTopologyNodeEdgePointRef]
"""
self._aggregated_node_edge_point = aggregated_node_edge_point
|
|
from __future__ import absolute_import, unicode_literals
from mock import patch
from oauthlib import signals
from oauthlib.oauth2.rfc6749.errors import *
from oauthlib.oauth2.rfc6749.parameters import *
from ...unittest import TestCase
@patch('time.time', new=lambda: 1000)
class ParameterTests(TestCase):
state = 'xyz'
auth_base = {
'uri': 'https://server.example.com/authorize',
'client_id': 's6BhdRkqt3',
'redirect_uri': 'https://client.example.com/cb',
'state': state,
'scope': 'photos'
}
list_scope = ['list', 'of', 'scopes']
auth_grant = {'response_type': 'code'}
auth_grant_list_scope = {}
auth_implicit = {'response_type': 'token', 'extra': 'extra'}
auth_implicit_list_scope = {}
def setUp(self):
self.auth_grant.update(self.auth_base)
self.auth_implicit.update(self.auth_base)
self.auth_grant_list_scope.update(self.auth_grant)
self.auth_grant_list_scope['scope'] = self.list_scope
self.auth_implicit_list_scope.update(self.auth_implicit)
self.auth_implicit_list_scope['scope'] = self.list_scope
auth_base_uri = ('https://server.example.com/authorize?response_type={0}'
'&client_id=s6BhdRkqt3&redirect_uri=https%3A%2F%2F'
'client.example.com%2Fcb&scope={1}&state={2}{3}')
auth_grant_uri = auth_base_uri.format('code', 'photos', state, '')
auth_grant_uri_list_scope = auth_base_uri.format('code', 'list+of+scopes', state, '')
auth_implicit_uri = auth_base_uri.format('token', 'photos', state, '&extra=extra')
auth_implicit_uri_list_scope = auth_base_uri.format('token', 'list+of+scopes', state, '&extra=extra')
grant_body = {
'grant_type': 'authorization_code',
'code': 'SplxlOBeZQQYbYS6WxSbIA',
'redirect_uri': 'https://client.example.com/cb'
}
grant_body_scope = {'scope': 'photos'}
grant_body_list_scope = {'scope': list_scope}
auth_grant_body = ('grant_type=authorization_code&'
'code=SplxlOBeZQQYbYS6WxSbIA&'
'redirect_uri=https%3A%2F%2Fclient.example.com%2Fcb')
auth_grant_body_scope = auth_grant_body + '&scope=photos'
auth_grant_body_list_scope = auth_grant_body + '&scope=list+of+scopes'
pwd_body = {
'grant_type': 'password',
'username': 'johndoe',
'password': 'A3ddj3w'
}
password_body = 'grant_type=password&username=johndoe&password=A3ddj3w'
cred_grant = {'grant_type': 'client_credentials'}
cred_body = 'grant_type=client_credentials'
grant_response = 'https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA&state=xyz'
grant_dict = {'code': 'SplxlOBeZQQYbYS6WxSbIA', 'state': state}
error_nocode = 'https://client.example.com/cb?state=xyz'
error_nostate = 'https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA'
error_wrongstate = 'https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA&state=abc'
error_response = 'https://client.example.com/cb?error=access_denied&state=xyz'
implicit_base = 'https://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA&scope=abc&'
implicit_response = implicit_base + 'state={0}&token_type=example&expires_in=3600'.format(state)
implicit_notype = implicit_base + 'state={0}&expires_in=3600'.format(state)
implicit_wrongstate = implicit_base + 'state={0}&token_type=exampleexpires_in=3600'.format('invalid')
implicit_nostate = implicit_base + 'token_type=example&expires_in=3600'
implicit_notoken = 'https://example.com/cb#state=xyz&token_type=example&expires_in=3600'
implicit_dict = {
'access_token': '2YotnFZFEjr1zCsicMWpAA',
'state': state,
'token_type': 'example',
'expires_in': '3600',
'expires_at': 4600,
'scope': ['abc']
}
json_response = ('{ "access_token": "2YotnFZFEjr1zCsicMWpAA",'
' "token_type": "example",'
' "expires_in": 3600,'
' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
' "example_parameter": "example_value",'
' "scope":"abc def"}')
json_response_noscope = ('{ "access_token": "2YotnFZFEjr1zCsicMWpAA",'
' "token_type": "example",'
' "expires_in": 3600,'
' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
' "example_parameter": "example_value" }')
json_error = '{ "error": "access_denied" }'
json_notoken = ('{ "token_type": "example",'
' "expires_in": 3600,'
' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
' "example_parameter": "example_value" }')
json_notype = ('{ "access_token": "2YotnFZFEjr1zCsicMWpAA",'
' "expires_in": 3600,'
' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
' "example_parameter": "example_value" }')
json_expires = ('{ "access_token": "2YotnFZFEjr1zCsicMWpAA",'
' "token_type": "example",'
' "expires": 3600,'
' "refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",'
' "example_parameter": "example_value",'
' "scope":"abc def"}')
json_dict = {
'access_token': '2YotnFZFEjr1zCsicMWpAA',
'token_type': 'example',
'expires_in': 3600,
'expires_at': 4600,
'refresh_token': 'tGzv3JOkF0XG5Qx2TlKWIA',
'example_parameter': 'example_value',
'scope': ['abc', 'def']
}
json_noscope_dict = {
'access_token': '2YotnFZFEjr1zCsicMWpAA',
'token_type': 'example',
'expires_in': 3600,
'expires_at': 4600,
'refresh_token': 'tGzv3JOkF0XG5Qx2TlKWIA',
'example_parameter': 'example_value'
}
json_notype_dict = {
'access_token': '2YotnFZFEjr1zCsicMWpAA',
'expires_in': 3600,
'expires_at': 4600,
'refresh_token': 'tGzv3JOkF0XG5Qx2TlKWIA',
'example_parameter': 'example_value',
}
url_encoded_response = ('access_token=2YotnFZFEjr1zCsicMWpAA'
'&token_type=example'
'&expires_in=3600'
'&refresh_token=tGzv3JOkF0XG5Qx2TlKWIA'
'&example_parameter=example_value'
'&scope=abc def')
url_encoded_error = 'error=access_denied'
url_encoded_notoken = ('token_type=example'
'&expires_in=3600'
'&refresh_token=tGzv3JOkF0XG5Qx2TlKWIA'
'&example_parameter=example_value')
def test_prepare_grant_uri(self):
"""Verify correct authorization URI construction."""
self.assertURLEqual(prepare_grant_uri(**self.auth_grant), self.auth_grant_uri)
self.assertURLEqual(prepare_grant_uri(**self.auth_grant_list_scope), self.auth_grant_uri_list_scope)
self.assertURLEqual(prepare_grant_uri(**self.auth_implicit), self.auth_implicit_uri)
self.assertURLEqual(prepare_grant_uri(**self.auth_implicit_list_scope), self.auth_implicit_uri_list_scope)
def test_prepare_token_request(self):
"""Verify correct access token request body construction."""
self.assertFormBodyEqual(prepare_token_request(**self.grant_body), self.auth_grant_body)
self.assertFormBodyEqual(prepare_token_request(**self.pwd_body), self.password_body)
self.assertFormBodyEqual(prepare_token_request(**self.cred_grant), self.cred_body)
def test_grant_response(self):
"""Verify correct parameter parsing and validation for auth code responses."""
params = parse_authorization_code_response(self.grant_response)
self.assertEqual(params, self.grant_dict)
params = parse_authorization_code_response(self.grant_response, state=self.state)
self.assertEqual(params, self.grant_dict)
self.assertRaises(MissingCodeError, parse_authorization_code_response,
self.error_nocode)
self.assertRaises(MissingCodeError, parse_authorization_code_response,
self.error_response)
self.assertRaises(MismatchingStateError, parse_authorization_code_response,
self.error_nostate, state=self.state)
self.assertRaises(MismatchingStateError, parse_authorization_code_response,
self.error_wrongstate, state=self.state)
def test_implicit_token_response(self):
"""Verify correct parameter parsing and validation for implicit responses."""
self.assertEqual(parse_implicit_response(self.implicit_response),
self.implicit_dict)
self.assertRaises(MissingTokenError, parse_implicit_response,
self.implicit_notoken)
self.assertRaises(ValueError, parse_implicit_response,
self.implicit_nostate, state=self.state)
self.assertRaises(ValueError, parse_implicit_response,
self.implicit_wrongstate, state=self.state)
def test_json_token_response(self):
"""Verify correct parameter parsing and validation for token responses. """
self.assertEqual(parse_token_response(self.json_response), self.json_dict)
self.assertRaises(AccessDeniedError, parse_token_response, self.json_error)
self.assertRaises(MissingTokenError, parse_token_response, self.json_notoken)
self.assertEqual(parse_token_response(self.json_response_noscope,
scope=['all', 'the', 'scopes']), self.json_noscope_dict)
scope_changes_recorded = []
def record_scope_change(sender, message, old, new):
scope_changes_recorded.append((message, old, new))
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
signals.scope_changed.connect(record_scope_change)
try:
parse_token_response(self.json_response, scope='aaa')
self.assertEqual(len(scope_changes_recorded), 1)
message, old, new = scope_changes_recorded[0]
for scope in new + old:
self.assertIn(scope, message)
self.assertEqual(old, ['aaa'])
self.assertEqual(set(new), set(['abc', 'def']))
finally:
signals.scope_changed.disconnect(record_scope_change)
del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
def test_json_token_notype(self):
"""Verify strict token type parsing only when configured. """
self.assertEqual(parse_token_response(self.json_notype), self.json_notype_dict)
try:
os.environ['OAUTHLIB_STRICT_TOKEN_TYPE'] = '1'
self.assertRaises(MissingTokenTypeError, parse_token_response, self.json_notype)
finally:
del os.environ['OAUTHLIB_STRICT_TOKEN_TYPE']
def test_url_encoded_token_response(self):
"""Verify fallback parameter parsing and validation for token responses. """
self.assertEqual(parse_token_response(self.url_encoded_response), self.json_dict)
self.assertRaises(AccessDeniedError, parse_token_response, self.url_encoded_error)
self.assertRaises(MissingTokenError, parse_token_response, self.url_encoded_notoken)
scope_changes_recorded = []
def record_scope_change(sender, message, old, new):
scope_changes_recorded.append((message, old, new))
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
signals.scope_changed.connect(record_scope_change)
try:
token = parse_token_response(self.url_encoded_response, scope='aaa')
self.assertEqual(len(scope_changes_recorded), 1)
message, old, new = scope_changes_recorded[0]
for scope in new + old:
self.assertIn(scope, message)
self.assertEqual(old, ['aaa'])
self.assertEqual(set(new), set(['abc', 'def']))
finally:
signals.scope_changed.disconnect(record_scope_change)
del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
def test_token_response_with_expires(self):
"""Verify fallback for alternate spelling of expires_in. """
self.assertEqual(parse_token_response(self.json_expires), self.json_dict)
|
|
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, RegexLexer, bygroups, words, \
include, default, this, using, combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Whitespace
__all__ = ['ProtoBufLexer', 'ZeekLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer',
'CrmshLexer', 'ThriftLexer', 'FlatlineLexer', 'SnowballLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Whitespace),
(r'[,;{}\[\]()<>]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated',
'reserved', 'default', 'packed', 'ctype', 'extensions', 'to',
'max', 'rpc', 'returns', 'oneof', 'syntax'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Whitespace), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Whitespace, Operator)),
(r'[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class ThriftLexer(RegexLexer):
"""
For `Thrift <https://thrift.apache.org/>`__ interface definitions.
.. versionadded:: 2.1
"""
name = 'Thrift'
aliases = ['thrift']
filenames = ['*.thrift']
mimetypes = ['application/x-thrift']
tokens = {
'root': [
include('whitespace'),
include('comments'),
(r'"', String.Double, combined('stringescape', 'dqs')),
(r'\'', String.Single, combined('stringescape', 'sqs')),
(r'(namespace)(\s+)',
bygroups(Keyword.Namespace, Whitespace), 'namespace'),
(r'(enum|union|struct|service|exception)(\s+)',
bygroups(Keyword.Declaration, Whitespace), 'class'),
(r'((?:(?:[^\W\d]|\$)[\w.\[\]$<>]*\s+)+?)' # return arguments
r'((?:[^\W\d]|\$)[\w$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Whitespace, Operator)),
include('keywords'),
include('numbers'),
(r'[&=]', Operator),
(r'[:;,{}()<>\[\]]', Punctuation),
(r'[a-zA-Z_](\.\w|\w)*', Name),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
],
'comments': [
(r'#.*$', Comment),
(r'//.*?\n', Comment),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
],
'stringescape': [
(r'\\([\\nrt"\'])', String.Escape),
],
'dqs': [
(r'"', String.Double, '#pop'),
(r'[^\\"\n]+', String.Double),
],
'sqs': [
(r"'", String.Single, '#pop'),
(r'[^\\\'\n]+', String.Single),
],
'namespace': [
(r'[a-z*](\.\w|\w)*', Name.Namespace, '#pop'),
default('#pop'),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'keywords': [
(r'(async|oneway|extends|throws|required|optional)\b', Keyword),
(r'(true|false)\b', Keyword.Constant),
(r'(const|typedef)\b', Keyword.Declaration),
(words((
'cpp_namespace', 'cpp_include', 'cpp_type', 'java_package',
'cocoa_prefix', 'csharp_namespace', 'delphi_namespace',
'php_namespace', 'py_module', 'perl_package',
'ruby_namespace', 'smalltalk_category', 'smalltalk_prefix',
'xsd_all', 'xsd_optional', 'xsd_nillable', 'xsd_namespace',
'xsd_attrs', 'include'), suffix=r'\b'),
Keyword.Namespace),
(words((
'void', 'bool', 'byte', 'i16', 'i32', 'i64', 'double',
'string', 'binary', 'map', 'list', 'set', 'slist',
'senum'), suffix=r'\b'),
Keyword.Type),
(words((
'BEGIN', 'END', '__CLASS__', '__DIR__', '__FILE__',
'__FUNCTION__', '__LINE__', '__METHOD__', '__NAMESPACE__',
'abstract', 'alias', 'and', 'args', 'as', 'assert', 'begin',
'break', 'case', 'catch', 'class', 'clone', 'continue',
'declare', 'def', 'default', 'del', 'delete', 'do', 'dynamic',
'elif', 'else', 'elseif', 'elsif', 'end', 'enddeclare',
'endfor', 'endforeach', 'endif', 'endswitch', 'endwhile',
'ensure', 'except', 'exec', 'finally', 'float', 'for',
'foreach', 'function', 'global', 'goto', 'if', 'implements',
'import', 'in', 'inline', 'instanceof', 'interface', 'is',
'lambda', 'module', 'native', 'new', 'next', 'nil', 'not',
'or', 'pass', 'public', 'print', 'private', 'protected',
'raise', 'redo', 'rescue', 'retry', 'register', 'return',
'self', 'sizeof', 'static', 'super', 'switch', 'synchronized',
'then', 'this', 'throw', 'transient', 'try', 'undef',
'unless', 'unsigned', 'until', 'use', 'var', 'virtual',
'volatile', 'when', 'while', 'with', 'xor', 'yield'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
],
'numbers': [
(r'[+-]?(\d+\.\d+([eE][+-]?\d+)?|\.?\d+[eE][+-]?\d+)', Number.Float),
(r'[+-]?0x[0-9A-Fa-f]+', Number.Hex),
(r'[+-]?[0-9]+', Number.Integer),
],
}
class ZeekLexer(RegexLexer):
"""
For `Zeek <https://www.zeek.org/>`_ scripts.
.. versionadded:: 2.5
"""
name = 'Zeek'
aliases = ['zeek', 'bro']
filenames = ['*.zeek', '*.bro']
_hex = r'[0-9a-fA-F]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
include('whitespace'),
include('comments'),
include('directives'),
include('attributes'),
include('types'),
include('keywords'),
include('literals'),
include('operators'),
include('punctuation'),
(r'((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(?=\s*\()',
Name.Function),
include('identifiers'),
],
'whitespace': [
(r'\n', Whitespace),
(r'\s+', Whitespace),
(r'(\\)(\n)', bygroups(Text, Whitespace)),
],
'comments': [
(r'#.*$', Comment),
],
'directives': [
(r'@(load-plugin|load-sigs|load|unload)\b.*$', Comment.Preproc),
(r'@(DEBUG|DIR|FILENAME|deprecated|if|ifdef|ifndef|else|endif)\b', Comment.Preproc),
(r'(@prefixes)(\s*)((\+?=).*)$', bygroups(Comment.Preproc,
Whitespace, Comment.Preproc)),
],
'attributes': [
(words(('redef', 'priority', 'log', 'optional', 'default', 'add_func',
'delete_func', 'expire_func', 'read_expire', 'write_expire',
'create_expire', 'synchronized', 'persistent', 'rotate_interval',
'rotate_size', 'encrypt', 'raw_output', 'mergeable', 'error_handler',
'type_column', 'deprecated'),
prefix=r'&', suffix=r'\b'),
Keyword.Pseudo),
],
'types': [
(words(('any',
'enum', 'record', 'set', 'table', 'vector',
'function', 'hook', 'event',
'addr', 'bool', 'count', 'double', 'file', 'int', 'interval',
'pattern', 'port', 'string', 'subnet', 'time'),
suffix=r'\b'),
Keyword.Type),
(r'(opaque)(\s+)(of)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b',
bygroups(Keyword.Type, Whitespace, Operator.Word, Whitespace, Keyword.Type)),
(r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)(\s*)\b(record|enum)\b',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Operator, Whitespace, Keyword.Type)),
(r'(type)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)(\s*)(:)',
bygroups(Keyword, Whitespace, Name, Whitespace, Operator)),
(r'(redef)(\s+)(record|enum)(\s+)((?:[A-Za-z_]\w*)(?:::(?:[A-Za-z_]\w*))*)\b',
bygroups(Keyword, Whitespace, Keyword.Type, Whitespace, Name.Class)),
],
'keywords': [
(words(('redef', 'export', 'if', 'else', 'for', 'while',
'return', 'break', 'next', 'continue', 'fallthrough',
'switch', 'default', 'case',
'add', 'delete',
'when', 'timeout', 'schedule'),
suffix=r'\b'),
Keyword),
(r'(print)\b', Keyword),
(r'(global|local|const|option)\b', Keyword.Declaration),
(r'(module)(\s+)(([A-Za-z_]\w*)(?:::([A-Za-z_]\w*))*)\b',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
],
'literals': [
(r'"', String, 'string'),
# Not the greatest match for patterns, but generally helps
# disambiguate between start of a pattern and just a division
# operator.
(r'/(?=.*/)', String.Regex, 'regex'),
(r'(T|F)\b', Keyword.Constant),
# Port
(r'\d{1,5}/(udp|tcp|icmp|unknown)\b', Number),
# IPv4 Address
(r'(\d{1,3}.){3}(\d{1,3})\b', Number),
# IPv6 Address
(r'\[([0-9a-fA-F]{0,4}:){2,7}([0-9a-fA-F]{0,4})?((\d{1,3}.){3}(\d{1,3}))?\]', Number),
# Numeric
(r'0[xX]' + _hex + r'+\b', Number.Hex),
(_float + r'\s*(day|hr|min|sec|msec|usec)s?\b', Number.Float),
(_float + r'\b', Number.Float),
(r'(\d+)\b', Number.Integer),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
],
'operators': [
(r'[!%*/+<=>~|&^-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|as|is|of)\b', Operator.Word),
(r'\??\$', Operator),
],
'punctuation': [
(r'[{}()\[\],;.]', Punctuation),
# The "ternary if", which uses '?' and ':', could instead be
# treated as an Operator, but colons are more frequently used to
# separate field/identifier names from their types, so the (often)
# less-prominent Punctuation is used even with '?' for consistency.
(r'[?:]', Punctuation),
],
'identifiers': [
(r'([a-zA-Z_]\w*)(::)', bygroups(Name, Punctuation)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'\\.', String.Escape),
(r'%-?[0-9]*(\.[0-9]+)?[DTd-gsx]', String.Escape),
(r'"', String, '#pop'),
(r'.', String),
],
'regex': [
(r'\\.', String.Escape),
(r'/', String.Regex, '#pop'),
(r'.', String.Regex),
],
}
BroLexer = ZeekLexer
class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'\s+', Whitespace),
],
'comments': [
(r'(\s*)(#.*)$', bygroups(Whitespace, Comment)),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
(r'[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
`RSL <http://en.wikipedia.org/wiki/RAISE>`_ is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^([ \t]*)([\w]+)([ \t]*)(:[^:])', bygroups(Whitespace,
Name.Function, Whitespace, Name.Function)),
(r'(^[ \t]*)([\w]+)([ \t]*)(\([\w\s,]*\))([ \t]*)(is|as)',
bygroups(Whitespace, Name.Function, Whitespace, Text,
Whitespace, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'\s+', Whitespace),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Whitespace, Operator, Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Whitespace),
(r'\s+', Whitespace)
]
}
class AlloyLexer(RegexLexer):
"""
For `Alloy <http://alloy.mit.edu>`_ source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w\']*'
text_tuple = (r'[^\S\n]+', Whitespace)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Whitespace),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Whitespace), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as)\b', Keyword),
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Whitespace), 'fun'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r'\n', Whitespace),
]
}
class PanLexer(RegexLexer):
"""
Lexer for `pan <https://github.com/quattor/pan/>`_ source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final',
'prefix', 'unique', 'object', 'foreach', 'include', 'template',
'function', 'variable', 'structure', 'extensible', 'declaration'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches',
'replace', 'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase',
'debug', 'error', 'traceback', 'deprecated', 'base64_decode',
'base64_encode', 'digest', 'escape', 'unescape', 'append', 'create',
'first', 'nlist', 'key', 'list', 'merge', 'next', 'prepend', 'is_boolean',
'is_defined', 'is_double', 'is_list', 'is_long', 'is_nlist', 'is_null',
'is_number', 'is_property', 'is_resource', 'is_string', 'to_boolean',
'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'),
prefix=r'\b', suffix=r'\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Whitespace, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Whitespace),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}
class CrmshLexer(RegexLexer):
"""
Lexer for `crmsh <http://crmsh.github.io/>`_ configuration files
for Pacemaker clusters.
.. versionadded:: 2.1
"""
name = 'Crmsh'
aliases = ['crmsh', 'pcmk']
filenames = ['*.crmsh', '*.pcmk']
mimetypes = []
elem = words((
'node', 'primitive', 'group', 'clone', 'ms', 'location',
'colocation', 'order', 'fencing_topology', 'rsc_ticket',
'rsc_template', 'property', 'rsc_defaults',
'op_defaults', 'acl_target', 'acl_group', 'user', 'role',
'tag'), suffix=r'(?![\w#$-])')
sub = words((
'params', 'meta', 'operations', 'op', 'rule',
'attributes', 'utilization'), suffix=r'(?![\w#$-])')
acl = words(('read', 'write', 'deny'), suffix=r'(?![\w#$-])')
bin_rel = words(('and', 'or'), suffix=r'(?![\w#$-])')
un_ops = words(('defined', 'not_defined'), suffix=r'(?![\w#$-])')
date_exp = words(('in_range', 'date', 'spec', 'in'), suffix=r'(?![\w#$-])')
acl_mod = (r'(?:tag|ref|reference|attribute|type|xpath)')
bin_ops = (r'(?:lt|gt|lte|gte|eq|ne)')
val_qual = (r'(?:string|version|number)')
rsc_role_action = (r'(?:Master|Started|Slave|Stopped|'
r'start|promote|demote|stop)')
tokens = {
'root': [
(r'^(#.*)(\n)?', bygroups(Comment, Whitespace)),
# attr=value (nvpair)
(r'([\w#$-]+)(=)("(?:""|[^"])*"|\S+)',
bygroups(Name.Attribute, Punctuation, String)),
# need this construct, otherwise numeric node ids
# are matched as scores
# elem id:
(r'(node)(\s+)([\w#$-]+)(:)',
bygroups(Keyword, Whitespace, Name, Punctuation)),
# scores
(r'([+-]?([0-9]+|inf)):', Number),
# keywords (elements and other)
(elem, Keyword),
(sub, Keyword),
(acl, Keyword),
# binary operators
(r'(?:%s:)?(%s)(?![\w#$-])' % (val_qual, bin_ops), Operator.Word),
# other operators
(bin_rel, Operator.Word),
(un_ops, Operator.Word),
(date_exp, Operator.Word),
# builtin attributes (e.g. #uname)
(r'#[a-z]+(?![\w#$-])', Name.Builtin),
# acl_mod:blah
(r'(%s)(:)("(?:""|[^"])*"|\S+)' % acl_mod,
bygroups(Keyword, Punctuation, Name)),
# rsc_id[:(role|action)]
# NB: this matches all other identifiers
(r'([\w#$-]+)(?:(:)(%s))?(?![\w#$-])' % rsc_role_action,
bygroups(Name, Punctuation, Operator.Word)),
# punctuation
(r'(\\(?=\n)|[\[\](){}/:@])', Punctuation),
(r'\s+|\n', Whitespace),
],
}
class FlatlineLexer(RegexLexer):
"""
Lexer for `Flatline <https://github.com/bigmlcom/flatline>`_ expressions.
.. versionadded:: 2.2
"""
name = 'Flatline'
aliases = ['flatline']
filenames = []
mimetypes = ['text/x-flatline']
special_forms = ('let',)
builtins = (
"!=", "*", "+", "-", "<", "<=", "=", ">", ">=", "abs", "acos", "all",
"all-but", "all-with-defaults", "all-with-numeric-default", "and",
"asin", "atan", "avg", "avg-window", "bin-center", "bin-count", "call",
"category-count", "ceil", "cond", "cond-window", "cons", "cos", "cosh",
"count", "diff-window", "div", "ensure-value", "ensure-weighted-value",
"epoch", "epoch-day", "epoch-fields", "epoch-hour", "epoch-millisecond",
"epoch-minute", "epoch-month", "epoch-second", "epoch-weekday",
"epoch-year", "exp", "f", "field", "field-prop", "fields", "filter",
"first", "floor", "head", "if", "in", "integer", "language", "length",
"levenshtein", "linear-regression", "list", "ln", "log", "log10", "map",
"matches", "matches?", "max", "maximum", "md5", "mean", "median", "min",
"minimum", "missing", "missing-count", "missing?", "missing_count",
"mod", "mode", "normalize", "not", "nth", "occurrences", "or",
"percentile", "percentile-label", "population", "population-fraction",
"pow", "preferred", "preferred?", "quantile-label", "rand", "rand-int",
"random-value", "re-quote", "real", "replace", "replace-first", "rest",
"round", "row-number", "segment-label", "sha1", "sha256", "sin", "sinh",
"sqrt", "square", "standard-deviation", "standard_deviation", "str",
"subs", "sum", "sum-squares", "sum-window", "sum_squares", "summary",
"summary-no", "summary-str", "tail", "tan", "tanh", "to-degrees",
"to-radians", "variance", "vectorize", "weighted-random-value", "window",
"winnow", "within-percentiles?", "z-score",
)
valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
tokens = {
'root': [
# whitespaces - usually not relevant
(r'[,]+', Text),
(r'\s+', Whitespace),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0x-?[a-f\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r"\\(.|[a-z]+)", String.Char),
# expression template placeholder
(r'_', String.Symbol),
# highlight the special forms
(words(special_forms, suffix=' '), Keyword),
# highlight the builtins
(words(builtins, suffix=' '), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# parentheses
(r'(\(|\))', Punctuation),
],
}
class SnowballLexer(ExtendedRegexLexer):
"""
Lexer for `Snowball <http://snowballstem.org/>`_ source code.
.. versionadded:: 2.2
"""
name = 'Snowball'
aliases = ['snowball']
filenames = ['*.sbl']
_ws = r'\n\r\t '
def __init__(self, **options):
self._reset_stringescapes()
ExtendedRegexLexer.__init__(self, **options)
def _reset_stringescapes(self):
self._start = "'"
self._end = "'"
def _string(do_string_first):
def callback(lexer, match, ctx):
s = match.start()
text = match.group()
string = re.compile(r'([^%s]*)(.)' % re.escape(lexer._start)).match
escape = re.compile(r'([^%s]*)(.)' % re.escape(lexer._end)).match
pos = 0
do_string = do_string_first
while pos < len(text):
if do_string:
match = string(text, pos)
yield s + match.start(1), String.Single, match.group(1)
if match.group(2) == "'":
yield s + match.start(2), String.Single, match.group(2)
ctx.stack.pop()
break
yield s + match.start(2), String.Escape, match.group(2)
pos = match.end()
match = escape(text, pos)
yield s + match.start(), String.Escape, match.group()
if match.group(2) != lexer._end:
ctx.stack[-1] = 'escape'
break
pos = match.end()
do_string = True
ctx.pos = s + match.end()
return callback
def _stringescapes(lexer, match, ctx):
lexer._start = match.group(3)
lexer._end = match.group(5)
return bygroups(Keyword.Reserved, Whitespace, String.Escape, Whitespace,
String.Escape)(lexer, match, ctx)
tokens = {
'root': [
(words(('len', 'lenof'), suffix=r'\b'), Operator.Word),
include('root1'),
],
'root1': [
(r'[%s]+' % _ws, Whitespace),
(r'\d+', Number.Integer),
(r"'", String.Single, 'string'),
(r'[()]', Punctuation),
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*', Comment.Single),
(r'[!*+\-/<=>]=|[-=]>|<[+-]|[$*+\-/<=>?\[\]]', Operator),
(words(('as', 'get', 'hex', 'among', 'define', 'decimal',
'backwardmode'), suffix=r'\b'),
Keyword.Reserved),
(words(('strings', 'booleans', 'integers', 'routines', 'externals',
'groupings'), suffix=r'\b'),
Keyword.Reserved, 'declaration'),
(words(('do', 'or', 'and', 'for', 'hop', 'non', 'not', 'set', 'try',
'fail', 'goto', 'loop', 'next', 'test', 'true',
'false', 'unset', 'atmark', 'attach', 'delete', 'gopast',
'insert', 'repeat', 'sizeof', 'tomark', 'atleast',
'atlimit', 'reverse', 'setmark', 'tolimit', 'setlimit',
'backwards', 'substring'), suffix=r'\b'),
Operator.Word),
(words(('size', 'limit', 'cursor', 'maxint', 'minint'),
suffix=r'\b'),
Name.Builtin),
(r'(stringdef\b)([%s]*)([^%s]+)' % (_ws, _ws),
bygroups(Keyword.Reserved, Whitespace, String.Escape)),
(r'(stringescapes\b)([%s]*)(.)([%s]*)(.)' % (_ws, _ws),
_stringescapes),
(r'[A-Za-z]\w*', Name),
],
'declaration': [
(r'\)', Punctuation, '#pop'),
(words(('len', 'lenof'), suffix=r'\b'), Name,
('root1', 'declaration')),
include('root1'),
],
'string': [
(r"[^']*'", _string(True)),
],
'escape': [
(r"[^']*'", _string(False)),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
self._reset_stringescapes()
return ExtendedRegexLexer.get_tokens_unprocessed(self, text, context)
|
|
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Pgen imports
from . import grammar, token, tokenize
from typing import (
Any,
Dict,
IO,
Iterator,
List,
Optional,
Text,
Tuple,
Union,
Sequence,
NoReturn,
)
from blib2to3.pgen2 import grammar
from blib2to3.pgen2.tokenize import GoodTokenInfo
import os
Path = Union[str, "os.PathLike[str]"]
class PgenGrammar(grammar.Grammar):
pass
class ParserGenerator(object):
filename: Path
stream: IO[Text]
generator: Iterator[GoodTokenInfo]
first: Dict[Text, Optional[Dict[Text, int]]]
def __init__(self, filename: Path, stream: Optional[IO[Text]] = None) -> None:
close_stream = None
if stream is None:
stream = open(filename)
close_stream = stream.close
self.filename = filename
self.stream = stream
self.generator = tokenize.generate_tokens(stream.readline)
self.gettoken() # Initialize lookahead
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
self.first = {} # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self) -> PgenGrammar:
c = PgenGrammar()
names = list(self.dfas.keys())
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
for name in names:
i = 256 + len(c.symbol2number)
c.symbol2number[name] = i
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
states = []
for state in dfa:
arcs = []
for label, next in sorted(state.arcs.items()):
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
states.append(arcs)
c.states.append(states)
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
c.start = c.symbol2number[self.startsymbol]
return c
def make_first(self, c: PgenGrammar, name: Text) -> Dict[int, int]:
rawfirst = self.first[name]
assert rawfirst is not None
first = {}
for label in sorted(rawfirst):
ilabel = self.make_label(c, label)
##assert ilabel not in first # XXX failed on <> ... !=
first[ilabel] = 1
return first
def make_label(self, c: PgenGrammar, label: Text) -> int:
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# Either a symbol name or a named token
if label in c.symbol2number:
# A symbol name (a non-terminal)
if label in c.symbol2label:
return c.symbol2label[label]
else:
c.labels.append((c.symbol2number[label], None))
c.symbol2label[label] = ilabel
return ilabel
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
assert isinstance(itoken, int), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
if label[0] == '"':
keywords = c.soft_keywords
else:
keywords = c.keywords
# A keyword
if value in keywords:
return keywords[value]
else:
c.labels.append((token.NAME, value))
keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = grammar.opmap[value] # Fails if unknown token
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def addfirstsets(self) -> None:
names = list(self.dfas.keys())
names.sort()
for name in names:
if name not in self.first:
self.calcfirst(name)
# print name, self.first[name].keys()
def calcfirst(self, name: Text) -> None:
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset: Dict[str, int] = {}
overlapcheck = {}
for label, next in state.arcs.items():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
if fset is None:
raise ValueError("recursion for rule %r" % name)
else:
self.calcfirst(label)
fset = self.first[label]
assert fset is not None
totalset.update(fset)
overlapcheck[label] = fset
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse: Dict[str, str] = {}
for label, itsfirst in overlapcheck.items():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError(
"rule %s is ambiguous; %s is in the first sets of %s as well"
" as %s" % (name, symbol, label, inverse[symbol])
)
inverse[symbol] = label
self.first[name] = totalset
def parse(self) -> Tuple[Dict[Text, List["DFAState"]], Text]:
dfas = {}
startsymbol: Optional[str] = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self.gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self.expect(token.NAME)
self.expect(token.OP, ":")
a, z = self.parse_rhs()
self.expect(token.NEWLINE)
# self.dump_nfa(name, a, z)
dfa = self.make_dfa(a, z)
# self.dump_dfa(name, dfa)
oldlen = len(dfa)
self.simplify_dfa(dfa)
newlen = len(dfa)
dfas[name] = dfa
# print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
assert startsymbol is not None
return dfas, startsymbol
def make_dfa(self, start: "NFAState", finish: "NFAState") -> List["DFAState"]:
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction. Let's represent sets as dicts with 1 for
# values.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state: NFAState) -> Dict[NFAState, int]:
base: Dict[NFAState, int] = {}
addclosure(state, base)
return base
def addclosure(state: NFAState, base: Dict[NFAState, int]) -> None:
assert isinstance(state, NFAState)
if state in base:
return
base[state] = 1
for label, next in state.arcs:
if label is None:
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
arcs: Dict[str, Dict[NFAState, int]] = {}
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in sorted(arcs.items()):
for st in states:
if st.nfaset == nfaset:
break
else:
st = DFAState(nfaset, finish)
states.append(st)
state.addarc(st, label)
return states # List of DFAState instances; first one is start
def dump_nfa(self, name: Text, start: "NFAState", finish: "NFAState") -> None:
print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
else:
j = len(todo)
todo.append(next)
if label is None:
print(" -> %d" % j)
else:
print(" %s -> %d" % (label, j))
def dump_dfa(self, name: Text, dfa: Sequence["DFAState"]) -> None:
print("Dump of DFA for", name)
for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "")
for label, next in sorted(state.arcs.items()):
print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa: List["DFAState"]) -> None:
# This is not theoretically optimal, but works well enough.
# Algorithm: repeatedly look for two states that have the same
# set of arcs (same labels pointing to the same nodes) and
# unify them, until things stop changing.
# dfa is a list of DFAState instances
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfa):
for j in range(i + 1, len(dfa)):
state_j = dfa[j]
if state_i == state_j:
# print " unify", i, j
del dfa[j]
for state in dfa:
state.unifystate(state_j, state_i)
changes = True
break
def parse_rhs(self) -> Tuple["NFAState", "NFAState"]:
# RHS: ALT ('|' ALT)*
a, z = self.parse_alt()
if self.value != "|":
return a, z
else:
aa = NFAState()
zz = NFAState()
aa.addarc(a)
z.addarc(zz)
while self.value == "|":
self.gettoken()
a, z = self.parse_alt()
aa.addarc(a)
z.addarc(zz)
return aa, zz
def parse_alt(self) -> Tuple["NFAState", "NFAState"]:
# ALT: ITEM+
a, b = self.parse_item()
while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
c, d = self.parse_item()
b.addarc(c)
b = d
return a, b
def parse_item(self) -> Tuple["NFAState", "NFAState"]:
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
if self.value == "[":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, "]")
a.addarc(z)
return a, z
else:
a, z = self.parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self.gettoken()
z.addarc(a)
if value == "+":
return a, z
else:
return a, a
def parse_atom(self) -> Tuple["NFAState", "NFAState"]:
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, ")")
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self.gettoken()
return a, z
else:
self.raise_error(
"expected (...) or NAME or STRING, got %s/%s", self.type, self.value
)
assert False
def expect(self, type: int, value: Optional[Any] = None) -> Text:
if self.type != type or (value is not None and self.value != value):
self.raise_error(
"expected %s/%s, got %s/%s", type, value, self.type, self.value
)
value = self.value
self.gettoken()
return value
def gettoken(self) -> None:
tup = next(self.generator)
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = next(self.generator)
self.type, self.value, self.begin, self.end, self.line = tup
# print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg: str, *args: Any) -> NoReturn:
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + list(map(str, args)))
raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
class NFAState(object):
arcs: List[Tuple[Optional[Text], "NFAState"]]
def __init__(self) -> None:
self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next: "NFAState", label: Optional[Text] = None) -> None:
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
class DFAState(object):
nfaset: Dict[NFAState, Any]
isfinal: bool
arcs: Dict[Text, "DFAState"]
def __init__(self, nfaset: Dict[NFAState, Any], final: NFAState) -> None:
assert isinstance(nfaset, dict)
assert isinstance(next(iter(nfaset)), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
def addarc(self, next: "DFAState", label: Text) -> None:
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
self.arcs[label] = next
def unifystate(self, old: "DFAState", new: "DFAState") -> None:
for label, next in self.arcs.items():
if next is old:
self.arcs[label] = new
def __eq__(self, other: Any) -> bool:
# Equality test -- ignore the nfaset instance variable
assert isinstance(other, DFAState)
if self.isfinal != other.isfinal:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in self.arcs.items():
if next is not other.arcs.get(label):
return False
return True
__hash__: Any = None # For Py3 compatibility.
def generate_grammar(filename: Path = "Grammar.txt") -> PgenGrammar:
p = ParserGenerator(filename)
return p.make_grammar()
|
|
import json
import datetime
import reversion
from django.db import models
from django.core.exceptions import ValidationError
from django.core import validators
from django.utils import timezone, encoding
from common.models import AbstractBase, Contact, SequenceMixin
from common.fields import SequenceField
from facilities.models import Facility
@reversion.register
@encoding.python_2_unicode_compatible
class Status(AbstractBase):
"""
Indicates the operation status of a community health unit.
e.g fully-functional, semi-functional, functional
"""
name = models.CharField(max_length=50)
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.name
class Meta(AbstractBase.Meta):
verbose_name_plural = 'statuses'
@reversion.register(follow=['health_unit', 'contact'])
@encoding.python_2_unicode_compatible
class CommunityHealthUnitContact(AbstractBase):
"""
The contacts of the health unit may be email, fax mobile etc.
"""
health_unit = models.ForeignKey('CommunityHealthUnit')
contact = models.ForeignKey(Contact)
def __str__(self):
return "{}: ({})".format(self.health_unit, self.contact)
class Meta(object):
unique_together = ('health_unit', 'contact', )
# a hack since the view_communityhealthunitcontact
# is disappearing into thin air
permissions = (
(
"view_communityhealthunitcontact",
"Can view community health_unit contact"
),
)
@reversion.register(follow=['facility', 'status'])
@encoding.python_2_unicode_compatible
class CommunityHealthUnit(SequenceMixin, AbstractBase):
"""
This is a health service delivery structure within a defined geographical
area covering a population of approximately 5,000 people.
Each unit is assigned 2 Community Health Extension Workers (CHEWs) and
community health volunteers who offer promotive, preventative and basic
curative health services
"""
name = models.CharField(max_length=100)
code = SequenceField(unique=True)
facility = models.ForeignKey(
Facility,
help_text='The facility on which the health unit is tied to.')
status = models.ForeignKey(Status)
households_monitored = models.PositiveIntegerField(
default=0,
help_text='The number of house holds a CHU is in-charge of')
date_established = models.DateField(default=timezone.now)
date_operational = models.DateField(null=True, blank=True)
is_approved = models.BooleanField(default=False)
approval_comment = models.TextField(null=True, blank=True)
approval_date = models.DateTimeField(null=True, blank=True)
location = models.CharField(max_length=255, null=True, blank=True)
is_closed = models.BooleanField(default=False)
closing_comment = models.TextField(null=True, blank=True)
is_rejected = models.BooleanField(default=False)
rejection_reason = models.TextField(null=True, blank=True)
has_edits = models.BooleanField(
default=False,
help_text='Indicates that a community health unit has updates that are'
' pending approval')
number_of_chvs = models.PositiveIntegerField(
default=0,
help_text='Number of Community Health volunteers in the CHU')
def __str__(self):
return self.name
@property
def workers(self):
from .serializers import CommunityHealthWorkerPostSerializer
return CommunityHealthWorkerPostSerializer(
self.health_unit_workers, many=True).data
def validate_facility_is_not_closed(self):
if self.facility.closed:
raise ValidationError(
{
"facility":
[
"A Community Unit cannot be attached to a closed "
"facility"
]
}
)
def validate_either_approved_or_rejected_and_not_both(self):
error = {
"approve/reject": [
"A Community Unit cannot be approved and"
" rejected at the same time "]
}
values = [self.is_approved, self.is_rejected]
if values.count(True) > 1:
raise ValidationError(error)
def validate_date_operation_is_less_than_date_established(self):
if self.date_operational and self.date_established:
if self.date_established > self.date_operational:
raise ValidationError(
{
"date_operational": [
"Date operation cannot be greater than date "
"established"
]
})
def validate_date_established_not_in_future(self):
"""
Only the date operational needs to be validated.
date_established should always be less then the date_operational.
Thus is the date_operational is not in future the date_established
is also not in future
"""
today = datetime.datetime.now().date()
if self.date_operational and self.date_operational > today:
raise ValidationError(
{
"date_operational": [
"The date operational cannot be in the future"
]
})
@property
def contacts(self):
return [
{
"id": con.id,
"contact_id": con.contact.id,
"contact": con.contact.contact,
"contact_type": con.contact.contact_type.id,
"contact_type_name": con.contact.contact_type.name
}
for con in CommunityHealthUnitContact.objects.filter(
health_unit=self)
]
@property
def json_features(self):
return {
"geometry": {
"coordinates": [
self.facility.facility_coordinates_through.coordinates[0],
self.facility.facility_coordinates_through.coordinates[1]
]
},
"properties": {
"ward": self.facility.ward.id,
"constituency": self.facility.ward.constituency.id,
"county": self.facility.ward.county.id
}
}
def clean(self):
super(CommunityHealthUnit, self).clean()
self.validate_facility_is_not_closed()
self.validate_either_approved_or_rejected_and_not_both()
self.validate_date_operation_is_less_than_date_established()
self.validate_date_established_not_in_future()
@property
def pending_updates(self):
try:
chu = ChuUpdateBuffer.objects.get(
is_approved=False,
is_rejected=False,
health_unit=self
)
return chu.updates
except ChuUpdateBuffer.DoesNotExist:
return {}
@property
def latest_update(self):
try:
chu = ChuUpdateBuffer.objects.get(
is_approved=False,
is_rejected=False,
health_unit=self
)
return chu
except ChuUpdateBuffer.DoesNotExist:
return None
def save(self, *args, **kwargs):
if not self.code:
self.code = self.generate_next_code_sequence()
super(CommunityHealthUnit, self).save(*args, **kwargs)
@property
def average_rating(self):
return self.chu_ratings.aggregate(r=models.Avg('rating'))['r'] or 0
@property
def rating_count(self):
return self.chu_ratings.count()
class Meta(AbstractBase.Meta):
unique_together = ('name', 'facility', )
permissions = (
(
"view_rejected_chus",
"Can see the rejected community health units"
),
(
"can_approve_chu",
"Can approve or reject a Community Health Unit"
),
)
@reversion.register(follow=['health_worker', 'contact'])
@encoding.python_2_unicode_compatible
class CommunityHealthWorkerContact(AbstractBase):
"""
The contacts of the health worker.
They may be as many as the health worker has.
"""
health_worker = models.ForeignKey('CommunityHealthWorker')
contact = models.ForeignKey(Contact)
def __str__(self):
return "{}: ({})".format(self.health_worker, self.contact)
@reversion.register(follow=['health_unit'])
@encoding.python_2_unicode_compatible
class CommunityHealthWorker(AbstractBase):
"""
A person who is in-charge of a certain community health area.
The status of the worker that is whether still active or not will be
shown by the active field inherited from abstract base.
"""
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50, null=True, blank=True)
is_incharge = models.BooleanField(default=False)
health_unit = models.ForeignKey(
CommunityHealthUnit,
help_text='The health unit the worker is in-charge of',
related_name='health_unit_workers')
def __str__(self):
return "{} ({})".format(self.first_name, self.health_unit.name)
@property
def name(self):
if self.first_name and self.last_name:
return "{} {}".format(self.first_name, self.last_name).strip()
else:
return self.first_name
@reversion.register
@encoding.python_2_unicode_compatible
class CHUService(AbstractBase):
"""
The services offered by the Community Health Units
Examples:
1. First Aid Administration
2. De-worming e.t.c.
All the community health units offer these services. Hence, there is
no need to link a COmmunity Health Unit to a CHUService instance
"""
name = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.name
@reversion.register
@encoding.python_2_unicode_compatible
class CHURating(AbstractBase):
"""Rating of a CHU"""
chu = models.ForeignKey(CommunityHealthUnit, related_name='chu_ratings')
rating = models.PositiveIntegerField(
validators=[
validators.MaxValueValidator(5),
validators.MinValueValidator(0)
]
)
comment = models.TextField(null=True, blank=True)
def __str__(self):
return "{} - {}".format(self.chu, self.rating)
class ChuUpdateBuffer(AbstractBase):
"""
Buffers a community units updates until they are approved by the CHRIO
"""
health_unit = models.ForeignKey(CommunityHealthUnit)
workers = models.TextField(null=True, blank=True)
contacts = models.TextField(null=True, blank=True)
basic = models.TextField(null=True, blank=True)
is_approved = models.BooleanField(default=False)
is_rejected = models.BooleanField(default=False)
is_new = models.BooleanField(default=False)
def validate_atleast_one_attribute_updated(self):
if not self.workers and not self.contacts and not \
self.basic and not self.is_new:
raise ValidationError({"__all__": ["Nothing was edited"]})
def update_basic_details(self):
basic_details = json.loads(self.basic)
if 'status' in basic_details:
basic_details['status_id'] = basic_details.get(
'status').get('status_id')
basic_details.pop('status')
if 'facility' in basic_details:
basic_details['facility_id'] = basic_details.get(
'facility').get('facility_id')
basic_details.pop('facility')
for key, value in basic_details.iteritems():
setattr(self.health_unit, key, value)
self.health_unit.save()
def update_workers(self):
chews = json.loads(self.workers)
for chew in chews:
chew['health_unit'] = self.health_unit
chew['created_by_id'] = self.created_by_id
chew['updated_by_id'] = self.updated_by_id
chew.pop('created_by', None)
chew.pop('updated_by', None)
if 'id' in chew:
chew_obj = CommunityHealthWorker.objects.get(
id=chew['id'])
chew_obj.first_name = chew['first_name']
chew_obj.last_name = chew['last_name']
if 'is_incharge' in chew:
chew_obj.is_incharge = chew['is_incharge']
chew_obj.save()
else:
CommunityHealthWorker.objects.create(**chew)
def update_contacts(self):
contacts = json.loads(self.contacts)
for contact in contacts:
contact['updated_by_id'] = self.updated_by_id
contact['created_by_id'] = self.created_by_id
contact['contact_type_id'] = contact['contact_type']
contact.pop('contact_type', None)
contact.pop('contact_id', None)
contact.pop('contact_type_name', None)
contact['contact'] = contact['contact']
contact_data = {
'contact_type_id': contact['contact_type_id'],
'contact': contact['contact']
}
try:
contact_obj = Contact.objects.get(**contact_data)
except Contact.DoesNotExist:
contact_obj = Contact.objects.create(**contact)
try:
CommunityHealthUnitContact.objects.filter(
contact=contact_obj)[0]
except IndexError:
CommunityHealthUnitContact.objects.create(
contact=contact_obj,
health_unit=self.health_unit,
created_by_id=self.created_by_id,
updated_by_id=self.updated_by_id)
@property
def updates(self):
updates = {}
if self.basic:
updates['basic'] = json.loads(self.basic)
if self.contacts:
updates['contacts'] = json.loads(self.contacts)
if self.workers:
updates['workers'] = json.loads(self.workers)
updates['updated_by'] = self.updated_by.get_full_name
return updates
def clean(self, *args, **kwargs):
if not self.is_approved and not self.is_rejected:
self.health_unit.has_edits = True
self.health_unit.save()
if self.is_approved and self.contacts:
self.update_contacts()
self.health_unit.has_edits = False
self.health_unit.save()
if self.is_approved and self.workers:
self.update_workers()
self.health_unit.has_edits = False
self.health_unit.save()
if self.is_approved and self.basic:
self.update_basic_details()
self.health_unit.has_edits = False
self.health_unit.save()
if self.is_rejected:
self.health_unit.has_edits = False
self.health_unit.save()
self.validate_atleast_one_attribute_updated()
def __str__(self):
return self.health_unit.name
|
|
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
import unittest
from datetime import datetime, timedelta
from hamcrest import *
from mock import Mock
from ArchiverAccess.archive_data_file_creator import DataFileCreationError
from ArchiverAccess.archive_time_period import ArchiveTimePeriod
from ArchiverAccess.archiver_data_source import ArchiverDataValue
from ArchiverAccess.archive_access_configuration import ArchiveAccessConfigBuilder
from ArchiverAccess.log_file_initiator import LogFileInitiatorOnPVChange, SAMPLING_BEHIND_REAL_TIME
from ArchiverAccess.test_modules.stubs import ArchiverDataStub
class TestLogFileInitiatorForContinousLogging(unittest.TestCase):
def test_GIVEN_config_with_pv_WHEN_logging_pv_has_changed_0_to_1_THEN_log_file_created(self):
# logging pv changes to 1 then back to 0
expected_logging_start = datetime(2017, 1, 1, 1, 1, 2)
data_changes = [[(expected_logging_start, 0, 1)]]
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[0], data_changes=data_changes,
logging_start_times=[datetime(2017, 1, 1, 1, 1, 1)])
write_file_header_mock = Mock()
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, write_file_header_mock=write_file_header_mock)
log_file_initiator.check_initiated()
write_file_header_mock.assert_called_once_with(expected_logging_start)
def test_GIVEN_config_with_pv_WHEN_logging_pv_has_swicthed_off_in_changes_THEN_log_file_body_written_and_file_made_readonly(self):
log_period_in_second = 1
expected_period = timedelta(seconds=log_period_in_second)
# logging pv changes to 1 then back to 0
expected_logging_start = datetime(2017, 1, 1, 1, 1, 2)
expected_logging_stop = datetime(2017, 1, 1, 1, 2, 2)
data_changes = [[(expected_logging_start, 0, 1),
(expected_logging_stop, 0, 0)]]
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[0], data_changes=data_changes,
logging_start_times=[datetime(2017, 1, 1, 1, 1, 1)])
write_data_lines_mock = Mock()
finished_log_file_mock = Mock()
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, write_data_lines_mock=write_data_lines_mock, finish_log_file_mock=finished_log_file_mock)
log_file_initiator.check_initiated()
write_data_lines_mock.assert_called_once_with(ArchiveTimePeriod(expected_logging_start, expected_period, finish_time=expected_logging_stop))
finished_log_file_mock.assert_called_once()
def test_GIVEN_config_with_pv_WHEN_logging_switches_on_in_changes_period_THEN_log_file_body_written_to_end_of_period_finish_not_written(self):
log_period_in_second = 1
expected_period = timedelta(seconds=log_period_in_second)
expected_end_time = datetime(2017, 1, 1, 1, 1, 2)
# logging pv changes to 1 then back to 0
expected_logging_start = datetime(2017, 1, 1, 1, 1, 1)
data_changes = [[(expected_logging_start, 0, 1)]]
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[0], data_changes=data_changes,
logging_start_times=[datetime(2017, 1, 1, 1, 1, 1)], sample_times=[expected_end_time])
write_data_lines_mock = Mock()
finished_log_file_mock = Mock()
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, write_data_lines_mock=write_data_lines_mock, finish_log_file_mock=finished_log_file_mock)
log_file_initiator.check_initiated()
write_data_lines_mock.assert_called_once_with(ArchiveTimePeriod(expected_logging_start, expected_period, finish_time=expected_end_time))
finished_log_file_mock.assert_not_called()
def test_GIVEN_config_with_pv_WHEN_pv_has_changed_twice_from_1_to_0_THEN_two_continual_log_files_created(self):
log_period_in_second = 1
expected_period = timedelta(seconds=log_period_in_second)
expected_logging_start1 = datetime(2017, 1, 1, 1, 1, 1)
data_changes = [[(datetime(2017, 1, 1, 1, 1, 2), 0, 0),
(datetime(2017, 1, 1, 1, 2, 2), 0, 1),
(datetime(2017, 1, 1, 1, 3, 2), 0, 0)]]
write_data_lines_mock = Mock()
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[1], data_changes=data_changes, logging_start_times=[expected_logging_start1],)
expected_logging_stop_time1 = datetime(2017, 1, 1, 1, 1, 2)
expected_logging_start_time2 = datetime(2017, 1, 1, 1, 2, 2)
expected_logging_stop_time2 = datetime(2017, 1, 1, 1, 3, 2)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, log_period_in_seconds=[log_period_in_second], write_data_lines_mock=write_data_lines_mock)
log_file_initiator.check_initiated()
arg_list = write_data_lines_mock.call_args_list
logging_time_period1 = arg_list[0][0][0]
self.assert_logging_period_correct(logging_time_period1, expected_logging_start1, expected_logging_stop_time1,
expected_period)
logging_time_period2 = arg_list[1][0][0]
self.assert_logging_period_correct(logging_time_period2, expected_logging_start_time2,
expected_logging_stop_time2,
expected_period)
def assert_logging_period_correct(self, logging_time_period, expected_logging_start, expected_logging_stop_time,
expected_period):
assert_that(logging_time_period.delta, is_(expected_period), "Delta")
assert_that(logging_time_period.start_time, is_(expected_logging_start), "Start time")
assert_that(logging_time_period.end_time, is_(expected_logging_stop_time), "End time")
def test_GIVEN_config_with_pv_WHEN_logging_pv_is_initially_at_1_THEN_log_file_header_created(self):
expected_logging_start = datetime(2017, 1, 1, 1, 1, 2)
initial_values = [1]
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=initial_values,
logging_start_times=[expected_logging_start])
write_file_header_mock = Mock()
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, write_file_header_mock=write_file_header_mock)
log_file_initiator.check_initiated()
write_file_header_mock.assert_called_once_with(expected_logging_start)
class TestLogFileInitiator(unittest.TestCase):
def test_GIVEN_config_with_pv_WHEN_get_data_THEN_correct_sample_times_asked_for(self):
sample_times = [datetime(2001, 2, 3, 4, 5, 36)]
time_last_active = datetime(2001, 2, 3, 4, 5, 6)
sample_id_last_active = 10
archive_data_source = DataSourceMother.set_up_data_source(sample_times=sample_times)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, time_last_actived=time_last_active, sample_id_last_active=sample_id_last_active)
log_file_initiator.check_initiated()
assert_that(archive_data_source.from_sample_time, is_([time_last_active]))
assert_that(archive_data_source.to_sample_time, is_([sample_times[0]]))
assert_that(archive_data_source.from_sample_id, is_([sample_id_last_active]))
def test_GIVEN_config_with_pv_WHEN_check_write_twice_THEN_consecutive_sample_times_are_used(self):
sample_ids = [90, 91]
sample_id_last_active = 10
time_last_active = datetime(2001, 2, 3, 4, 5, 6)
sample_times = [datetime(2001, 2, 3, 4, 5, 36), datetime(2001, 2, 3, 4, 6, 6)]
archive_data_source = DataSourceMother.set_up_data_source(sample_times=sample_times, data_changes=[[], []], sample_ids=sample_ids)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, time_last_actived=time_last_active, sample_id_last_active=sample_id_last_active)
log_file_initiator.check_initiated()
log_file_initiator.check_initiated()
assert_that(archive_data_source.from_sample_time, is_([time_last_active, sample_times[0]]))
assert_that(archive_data_source.to_sample_time, is_([sample_times[0], sample_times[1]]))
assert_that(archive_data_source.from_sample_id, is_([sample_id_last_active, sample_ids[0]]))
def test_GIVEN_config_with_pv_WHEN_pv_has_changed_from_1_to_0_THEN_log_file_created(self):
log_period_in_second = 1
expected_period = timedelta(seconds=log_period_in_second)
expected_logging_start = datetime(2017, 1, 1, 1, 1, 1)
logging_stop_time = datetime(2017, 1, 1, 1, 1, 2)
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[1], final_pv_value=0)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, log_period_in_seconds=[log_period_in_second])
log_file_initiator.check_initiated()
self.log_file_creators[0].write_complete_file.assert_called_once()
logging_time_period = self.log_file_creators[0].write_complete_file.call_args[0][0]
assert_that(logging_time_period.delta, is_(expected_period))
assert_that(logging_time_period.start_time, is_(expected_logging_start))
assert_that(logging_time_period.end_time, is_(logging_stop_time))
def test_GIVEN_config_with_pv_WHEN_pv_has_changed_0_to_X_THEN_log_file_not_created(self):
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[0], final_pv_value=1)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source)
log_file_initiator.check_initiated()
self.log_file_creators[0].write_complete_file.assert_not_called()
def test_GIVEN_config_with_pv_WHEN_pv_has_changed_0_to_0_THEN_log_file_not_created(self):
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[0], final_pv_value=0)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source)
log_file_initiator.check_initiated()
self.log_file_creators[0].write_complete_file.assert_not_called()
def test_GIVEN_config_with_pv_WHEN_pv_has_changed_1_to_disconnect_THEN_log_file_created(self):
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[1], final_pv_value="disconnect")
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source)
log_file_initiator.check_initiated()
self.log_file_creators[0].write_complete_file.assert_called()
def test_GIVEN_config_with_pv_WHEN_pv_has_changed_disconnected_to_0_THEN_log_file_not_created(self):
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=["disconnect"], final_pv_value=1)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source)
log_file_initiator.check_initiated()
self.log_file_creators[0].write_complete_file.assert_not_called()
def test_GIVEN_config_with_pv_WHEN_pv_has_changed_twice_from_1_to_0_THEN_log_file_created(self):
log_period_in_second = 1
expected_period = timedelta(seconds=log_period_in_second)
expected_logging_start1 = datetime(2017, 1, 1, 1, 1, 1)
data_changes = [[(datetime(2017, 1, 1, 1, 1, 2), 0, 0),
(datetime(2017, 1, 1, 1, 2, 2), 0, 1),
(datetime(2017, 1, 1, 1, 3, 2), 0, 0)]]
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[1], data_changes=data_changes, logging_start_times=[expected_logging_start1])
expected_logging_stop_time1 = datetime(2017, 1, 1, 1, 1, 2)
expected_logging_start_time2 = datetime(2017, 1, 1, 1, 2, 2)
expected_logging_stop_time2 = datetime(2017, 1, 1, 1, 3, 2)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, log_period_in_seconds=[log_period_in_second])
log_file_initiator.check_initiated()
arg_list = self.log_file_creators[0].write_complete_file.call_args_list
logging_time_period1 = arg_list[0][0][0]
assert_that(logging_time_period1.delta, is_(expected_period))
assert_that(logging_time_period1.start_time, is_(expected_logging_start1))
assert_that(logging_time_period1.end_time, is_(expected_logging_stop_time1))
logging_time_period2 = arg_list[1][0][0]
assert_that(logging_time_period2.delta, is_(expected_period))
assert_that(logging_time_period2.start_time, is_(expected_logging_start_time2))
assert_that(logging_time_period2.end_time, is_(expected_logging_stop_time2))
def test_GIVEN_config_with_pv_WHEN_pv_has_changed_twice_from_1_to_0_over_two_different_write_checks_THEN_two_log_files_created(self):
log_period_in_second = 1
expected_logging_start1 = datetime(2017, 1, 1, 1, 1, 1)
sample_times = [datetime(2001, 2, 3, 4, 5, 6), datetime(2001, 2, 3, 4, 5, 36), datetime(2001, 2, 3, 4, 6, 6)]
data_changes = [[],
[(datetime(2017, 1, 1, 1, 1, 2), 0, 0),
(datetime(2017, 1, 1, 1, 2, 2), 0, 1),
(datetime(2017, 1, 1, 1, 3, 2), 0, 0)]]
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[1], data_changes=data_changes, sample_times=sample_times, logging_start_times=[expected_logging_start1])
expected_period = timedelta(seconds=log_period_in_second)
expected_logging_stop_time1 = datetime(2017, 1, 1, 1, 1, 2)
expected_logging_start_time2 = datetime(2017, 1, 1, 1, 2, 2)
expected_logging_stop_time2 = datetime(2017, 1, 1, 1, 3, 2)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, log_period_in_seconds=[log_period_in_second])
log_file_initiator.check_initiated()
log_file_initiator.check_initiated()
arg_list = self.log_file_creators[0].write_complete_file.call_args_list
logging_time_period1 = arg_list[0][0][0]
assert_that(logging_time_period1.delta, is_(expected_period))
assert_that(logging_time_period1.start_time, is_(expected_logging_start1))
assert_that(logging_time_period1.end_time, is_(expected_logging_stop_time1))
logging_time_period2 = arg_list[1][0][0]
assert_that(logging_time_period2.delta, is_(expected_period))
assert_that(logging_time_period2.start_time, is_(expected_logging_start_time2))
assert_that(logging_time_period2.end_time, is_(expected_logging_stop_time2))
def test_GIVEN_multiple_configs_with_pv_WHEN_pvs_has_changed_from_1_to_0_THEN_multiple_log_files_created(self):
log_period_in_second1 = 1
log_period_in_second2 = 0.1
expected_period_config_1 = timedelta(seconds=log_period_in_second1)
expected_period_config_2 = timedelta(seconds=log_period_in_second2)
expected_logging_start_config_1 = datetime(2017, 1, 1, 1, 1, 1)
expected_logging_start_config_2 = datetime(2017, 1, 1, 3, 4, 5)
logging_stop_time_config_1 = datetime(2017, 1, 1, 1, 1, 2)
logging_stop_time_config_2 = datetime(2017, 1, 1, 1, 2, 2)
data_changes = [[(datetime(2017, 1, 1, 1, 1, 2), 0, 0),
(datetime(2017, 1, 1, 1, 2, 2), 1, 0)]]
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[1, 1], data_changes=data_changes, logging_start_times=[expected_logging_start_config_1, expected_logging_start_config_2] )
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, log_period_in_seconds=[log_period_in_second1, log_period_in_second2])
log_file_initiator.check_initiated()
self.log_file_creators[0].write_complete_file.assert_called_once()
logging_time_period = self.log_file_creators[0].write_complete_file.call_args[0][0]
assert_that(logging_time_period.delta, is_(expected_period_config_1))
assert_that(logging_time_period.start_time, is_(expected_logging_start_config_1))
assert_that(logging_time_period.end_time, is_(logging_stop_time_config_1))
self.log_file_creators[2].write_complete_file.assert_called_once()
logging_time_period = self.log_file_creators[2].write_complete_file.call_args[0][0]
assert_that(logging_time_period.delta, is_(expected_period_config_2))
assert_that(logging_time_period.start_time, is_(expected_logging_start_config_2))
assert_that(logging_time_period.end_time, is_(logging_stop_time_config_2))
def test_GIVEN_config_with_logging_period_which_is_a_pv_WHEN_log_THEN_logging_period_is_pv_value(self):
log_period_in_second = 2.0
expected_period = timedelta(seconds=log_period_in_second)
pv_name = "myperiodpv"
logging_period_pv_values = {pv_name + ".VAL": log_period_in_second}
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[1], final_pv_value=0, logging_period_pv_values=logging_period_pv_values)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, log_period_pvs=[pv_name])
log_file_initiator.check_initiated()
self.log_file_creators[0].write_complete_file.assert_called_once()
logging_time_period = self.log_file_creators[0].write_complete_file.call_args[0][0]
assert_that(logging_time_period.delta, is_(expected_period))
def test_GIVEN_config_with_pv_WHEN_pv_has_changed_from_1_to_0_and_write_file_throws_THEN_no_error_thrown_log_not_written(self):
log_period_in_second = 1
archive_data_source = DataSourceMother.set_up_data_source(initial_pv_values=[1], final_pv_value=0)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, log_period_in_seconds=[log_period_in_second], throw_on_write_complete_file=True)
try:
log_file_initiator.check_initiated()
except Exception as e:
self.fail("If underlying call throws then this should catch and logs. Error: '{}'".format(e))
self.log_file_creators[0].write_complete_file.assert_called_once()
def test_GIVEN_config_with_pv_WHEN_get_data__and_no_new_sample_time_THEN_sample_time_is_current_time_minus_set_amount(self):
last_sample_time = datetime(2001, 2, 3, 4, 5, 6)
current_time = datetime(2001, 2, 3, 4, 7, 0)
expected_to_sample_time = current_time - SAMPLING_BEHIND_REAL_TIME
sample_times = [last_sample_time, last_sample_time]
archive_data_source = DataSourceMother.set_up_data_source(sample_times=sample_times)
log_file_initiator, self.log_file_creators = DataSourceMother.create_log_file_intiator(archive_data_source, current_time=current_time)
log_file_initiator.check_initiated()
assert_that(archive_data_source.to_sample_time, is_([expected_to_sample_time]))
class DataSourceMother(object):
@staticmethod
def set_up_data_source(initial_pv_values=None,
final_pv_value=0,
logging_start_times=None,
logging_stop_time=datetime(2017, 1, 1, 1, 1, 2),
sample_times=None,
data_changes=None,
logging_period_pv_values=None,
sample_ids=None):
if initial_pv_values is None:
initial_pv_values = [1]
if logging_start_times is None:
logging_start_times = [datetime(2017, 1, 1, 1, 1, 1)] * len(initial_pv_values)
initial_archiver_data_values = []
for initial_pv_value, logging_start_time in zip(initial_pv_values, logging_start_times):
initial_archiver_data_value = ArchiverDataValue()
initial_archiver_data_value.num_val = initial_pv_value
initial_archiver_data_value.sample_time = logging_start_time
initial_archiver_data_values.append(initial_archiver_data_value)
if data_changes is None:
data_changes = [[(logging_stop_time, 0, final_pv_value)]]
if sample_times is None:
sample_times = [datetime(2010, 9, 8, 2, 3, 4), datetime(2010, 9, 8, 2, 3, 34)]
if sample_ids is None:
sample_ids = [datetime(2010, 9, 8, 2, 3, 4), datetime(2010, 9, 8, 2, 3, 34)]
archive_data_source = ArchiverDataStub(initial_archiver_data_value=initial_archiver_data_values,
data_changes=data_changes,
sample_times=sample_times,
initial_values=logging_period_pv_values,
sample_ids=sample_ids)
return archive_data_source
@staticmethod
def create_log_file_intiator(archive_data_source, log_period_in_seconds=None, log_period_pvs=None, throw_on_write_complete_file=False,
write_file_header_mock=Mock(), write_data_lines_mock=Mock(), finish_log_file_mock=Mock(),
current_time=datetime(2000, 1, 1, 1, 1, 2), time_last_actived=datetime(2000, 1, 1, 1, 1, 1),
sample_id_last_active=123):
if log_period_in_seconds is None and log_period_pvs is None:
log_period_in_seconds = [1]
if log_period_pvs is None:
log_period_pvs = [None] * len(log_period_in_seconds)
if log_period_in_seconds is None:
log_period_in_seconds = [None] * len(log_period_pvs)
configs = []
log_file_creators = []
for log_period_in_second, log_period_pv in zip(log_period_in_seconds, log_period_pvs):
log_file_creator = Mock()
if throw_on_write_complete_file:
log_file_creator.write_complete_file = Mock(side_effect=DataFileCreationError("Test problem"))
else:
log_file_creator.write_complete_file = Mock()
log_file_creator.write_file_header = write_file_header_mock
log_file_creator.write_data_lines = write_data_lines_mock
log_file_creator.finish_log_file = finish_log_file_mock
config_builder = ArchiveAccessConfigBuilder("log_file{start_time}").trigger_pv("my_pv")
if log_period_in_second is not None:
config = config_builder.logging_period_seconds(log_period_in_second).build()
else:
config = config_builder.logging_period_pv(log_period_pv).build()
configs.append(config)
log_file_creators.append(log_file_creator) # one for continuous logging
log_file_creators.append(log_file_creator) # one for one end logging
time_last_active = Mock()
sample_id = sample_id_last_active
time_last_active.get = Mock(return_value=(time_last_actived, sample_id))
def get_current_time():
return current_time
data_file_creator_factory = Mock()
data_file_creator_factory.create = Mock(side_effect=log_file_creators)
return LogFileInitiatorOnPVChange(configs, archive_data_source, time_last_active, get_current_time, data_file_creator_factory), log_file_creators
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkVirtualAppliancesOperations:
"""NetworkVirtualAppliancesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkVirtualAppliance":
"""Gets the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualAppliance, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkVirtualAppliance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NetworkVirtualAppliance":
"""Updates a Network Virtual Appliance.
:param resource_group_name: The resource group name of Network Virtual Appliance.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance being updated.
:type network_virtual_appliance_name: str
:param parameters: Parameters supplied to Update Network Virtual Appliance Tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualAppliance, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkVirtualAppliance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
parameters: "_models.NetworkVirtualAppliance",
**kwargs: Any
) -> "_models.NetworkVirtualAppliance":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkVirtualAppliance')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
parameters: "_models.NetworkVirtualAppliance",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkVirtualAppliance"]:
"""Creates or updates the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param parameters: Parameters supplied to the create or update Network Virtual Appliance.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.NetworkVirtualAppliance
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkVirtualAppliance or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.NetworkVirtualAppliance]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkVirtualApplianceListResult"]:
"""Lists all Network Virtual Appliances in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.NetworkVirtualApplianceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.NetworkVirtualApplianceListResult"]:
"""Gets all Network Virtual Appliances in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.NetworkVirtualApplianceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualAppliances'} # type: ignore
|
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1PersistentVolumeClaimInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access_modes': 'list[str]',
'capacity': 'dict(str, K8sIoApimachineryPkgApiResourceQuantity)',
'filesystem_overhead': 'str',
'preallocated': 'bool',
'requests': 'dict(str, K8sIoApimachineryPkgApiResourceQuantity)',
'volume_mode': 'str'
}
attribute_map = {
'access_modes': 'accessModes',
'capacity': 'capacity',
'filesystem_overhead': 'filesystemOverhead',
'preallocated': 'preallocated',
'requests': 'requests',
'volume_mode': 'volumeMode'
}
def __init__(self, access_modes=None, capacity=None, filesystem_overhead=None, preallocated=None, requests=None, volume_mode=None):
"""
V1PersistentVolumeClaimInfo - a model defined in Swagger
"""
self._access_modes = None
self._capacity = None
self._filesystem_overhead = None
self._preallocated = None
self._requests = None
self._volume_mode = None
if access_modes is not None:
self.access_modes = access_modes
if capacity is not None:
self.capacity = capacity
if filesystem_overhead is not None:
self.filesystem_overhead = filesystem_overhead
if preallocated is not None:
self.preallocated = preallocated
if requests is not None:
self.requests = requests
if volume_mode is not None:
self.volume_mode = volume_mode
@property
def access_modes(self):
"""
Gets the access_modes of this V1PersistentVolumeClaimInfo.
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
:return: The access_modes of this V1PersistentVolumeClaimInfo.
:rtype: list[str]
"""
return self._access_modes
@access_modes.setter
def access_modes(self, access_modes):
"""
Sets the access_modes of this V1PersistentVolumeClaimInfo.
AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
:param access_modes: The access_modes of this V1PersistentVolumeClaimInfo.
:type: list[str]
"""
self._access_modes = access_modes
@property
def capacity(self):
"""
Gets the capacity of this V1PersistentVolumeClaimInfo.
Capacity represents the capacity set on the corresponding PVC status
:return: The capacity of this V1PersistentVolumeClaimInfo.
:rtype: dict(str, K8sIoApimachineryPkgApiResourceQuantity)
"""
return self._capacity
@capacity.setter
def capacity(self, capacity):
"""
Sets the capacity of this V1PersistentVolumeClaimInfo.
Capacity represents the capacity set on the corresponding PVC status
:param capacity: The capacity of this V1PersistentVolumeClaimInfo.
:type: dict(str, K8sIoApimachineryPkgApiResourceQuantity)
"""
self._capacity = capacity
@property
def filesystem_overhead(self):
"""
Gets the filesystem_overhead of this V1PersistentVolumeClaimInfo.
Percentage of filesystem's size to be reserved when resizing the PVC
:return: The filesystem_overhead of this V1PersistentVolumeClaimInfo.
:rtype: str
"""
return self._filesystem_overhead
@filesystem_overhead.setter
def filesystem_overhead(self, filesystem_overhead):
"""
Sets the filesystem_overhead of this V1PersistentVolumeClaimInfo.
Percentage of filesystem's size to be reserved when resizing the PVC
:param filesystem_overhead: The filesystem_overhead of this V1PersistentVolumeClaimInfo.
:type: str
"""
self._filesystem_overhead = filesystem_overhead
@property
def preallocated(self):
"""
Gets the preallocated of this V1PersistentVolumeClaimInfo.
Preallocated indicates if the PVC's storage is preallocated or not
:return: The preallocated of this V1PersistentVolumeClaimInfo.
:rtype: bool
"""
return self._preallocated
@preallocated.setter
def preallocated(self, preallocated):
"""
Sets the preallocated of this V1PersistentVolumeClaimInfo.
Preallocated indicates if the PVC's storage is preallocated or not
:param preallocated: The preallocated of this V1PersistentVolumeClaimInfo.
:type: bool
"""
self._preallocated = preallocated
@property
def requests(self):
"""
Gets the requests of this V1PersistentVolumeClaimInfo.
Requests represents the resources requested by the corresponding PVC spec
:return: The requests of this V1PersistentVolumeClaimInfo.
:rtype: dict(str, K8sIoApimachineryPkgApiResourceQuantity)
"""
return self._requests
@requests.setter
def requests(self, requests):
"""
Sets the requests of this V1PersistentVolumeClaimInfo.
Requests represents the resources requested by the corresponding PVC spec
:param requests: The requests of this V1PersistentVolumeClaimInfo.
:type: dict(str, K8sIoApimachineryPkgApiResourceQuantity)
"""
self._requests = requests
@property
def volume_mode(self):
"""
Gets the volume_mode of this V1PersistentVolumeClaimInfo.
VolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
:return: The volume_mode of this V1PersistentVolumeClaimInfo.
:rtype: str
"""
return self._volume_mode
@volume_mode.setter
def volume_mode(self, volume_mode):
"""
Sets the volume_mode of this V1PersistentVolumeClaimInfo.
VolumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.
:param volume_mode: The volume_mode of this V1PersistentVolumeClaimInfo.
:type: str
"""
self._volume_mode = volume_mode
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1PersistentVolumeClaimInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
# -*- coding: utf-8 -*-
from typing import Any, Dict, Generator, Mapping, Union
import mock
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import (
get_client,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import (
get_stream_recipient,
Recipient,
Subscription,
UserPresence,
)
from zerver.tornado.event_queue import (
maybe_enqueue_notifications,
)
class EditMessageSideEffectsTest(ZulipTestCase):
def _assert_update_does_not_notify_anybody(self, message_id: int, content: str) -> None:
url = '/json/messages/' + str(message_id)
request = dict(
message_id=message_id,
content=content,
)
with mock.patch('zerver.tornado.event_queue.maybe_enqueue_notifications') as m:
result = self.client_patch(url, request)
self.assert_json_success(result)
self.assertFalse(m.called)
def test_updates_with_pm_mention(self) -> None:
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
self.login(hamlet.email)
message_id = self.send_personal_message(
hamlet.email,
cordelia.email,
content='no mention'
)
self._assert_update_does_not_notify_anybody(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
)
def _login_and_send_original_stream_message(self, content: str) -> int:
'''
Note our conventions here:
Hamlet is our logged in user (and sender).
Cordelia is the receiver we care about.
Scotland is the stream we send messages to.
'''
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
self.login(hamlet.email)
self.subscribe(hamlet, 'Scotland')
self.subscribe(cordelia, 'Scotland')
message_id = self.send_stream_message(
hamlet.email,
'Scotland',
content=content,
)
return message_id
def _get_queued_data_for_message_update(self, message_id: int, content: str,
expect_short_circuit: bool=False) -> Dict[str, Any]:
'''
This function updates a message with a post to
/json/messages/(message_id).
By using mocks, we are able to capture two pieces of data:
enqueue_kwargs: These are the arguments passed in to
maybe_enqueue_notifications.
queue_messages: These are the messages that
maybe_enqueue_notifications actually
puts on the queue.
Using this helper allows you to construct a test that goes
pretty deep into the missed-messages codepath, without actually
queuing the final messages.
'''
url = '/json/messages/' + str(message_id)
request = dict(
message_id=message_id,
content=content,
)
with mock.patch('zerver.tornado.event_queue.maybe_enqueue_notifications') as m:
result = self.client_patch(url, request)
cordelia = self.example_user('cordelia')
cordelia_calls = [
call_args
for call_args in m.call_args_list
if call_args[1]['user_profile_id'] == cordelia.id
]
if expect_short_circuit:
self.assertEqual(len(cordelia_calls), 0)
return {}
# Normally we expect maybe_enqueue_notifications to be
# called for Cordelia, so continue on.
self.assertEqual(len(cordelia_calls), 1)
enqueue_kwargs = cordelia_calls[0][1]
queue_messages = []
def fake_publish(queue_name: str,
event: Union[Mapping[str, Any], str],
*args: Any) -> None:
queue_messages.append(dict(
queue_name=queue_name,
event=event,
))
with mock.patch('zerver.tornado.event_queue.queue_json_publish') as m:
m.side_effect = fake_publish
maybe_enqueue_notifications(**enqueue_kwargs)
self.assert_json_success(result)
return dict(
enqueue_kwargs=enqueue_kwargs,
queue_messages=queue_messages
)
def test_updates_with_stream_mention(self) -> None:
message_id = self._login_and_send_original_stream_message(
content='no mention',
)
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
)
cordelia = self.example_user('cordelia')
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=True,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=False,
idle=True,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
queue_messages = info['queue_messages']
self.assertEqual(len(queue_messages), 2)
self.assertEqual(queue_messages[0]['queue_name'], 'missedmessage_mobile_notifications')
mobile_event = queue_messages[0]['event']
self.assertEqual(mobile_event['user_profile_id'], cordelia.id)
self.assertEqual(mobile_event['trigger'], 'mentioned')
self.assertEqual(queue_messages[1]['queue_name'], 'missedmessage_emails')
email_event = queue_messages[1]['event']
self.assertEqual(email_event['user_profile_id'], cordelia.id)
self.assertEqual(email_event['trigger'], 'mentioned')
def test_second_mention_is_ignored(self) -> None:
message_id = self._login_and_send_original_stream_message(
content='hello @**Cordelia Lear**'
)
self._get_queued_data_for_message_update(
message_id=message_id,
content='re-mention @**Cordelia Lear**',
expect_short_circuit=True,
)
def _turn_on_stream_push_for_cordelia(self) -> None:
'''
conventions:
Cordelia is the message receiver we care about.
Scotland is our stream.
'''
cordelia = self.example_user('cordelia')
stream = self.subscribe(cordelia, 'Scotland')
recipient = get_stream_recipient(stream.id)
cordelia_subscription = Subscription.objects.get(
user_profile_id=cordelia.id,
recipient=recipient,
)
cordelia_subscription.push_notifications = True
cordelia_subscription.save()
def test_updates_with_stream_push_notify(self) -> None:
self._turn_on_stream_push_for_cordelia()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
# Even though Cordelia configured this stream for pushes,
# we short-ciruit the logic, assuming the original message
# also did a push.
self._get_queued_data_for_message_update(
message_id=message_id,
content='nothing special about updated message',
expect_short_circuit=True,
)
def _cordelia_connected_to_zulip(self) -> Any:
'''
Right now the easiest way to make Cordelia look
connected to Zulip is to mock the function below.
This is a bit blunt, as it affects other users too,
but we only really look at Cordelia's data, anyway.
'''
return mock.patch(
'zerver.tornado.event_queue.receiver_is_off_zulip',
return_value=False
)
def test_stream_push_notify_for_sorta_present_user(self) -> None:
self._turn_on_stream_push_for_cordelia()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
# Simulate Cordelia still has an actively polling client, but
# the lack of presence info should still mark her as offline.
#
# Despite Cordelia being offline, we still short circuit
# offline notifications due to the her stream push setting.
with self._cordelia_connected_to_zulip():
self._get_queued_data_for_message_update(
message_id=message_id,
content='nothing special about updated message',
expect_short_circuit=True,
)
def _make_cordelia_present_on_web(self) -> None:
cordelia = self.example_user('cordelia')
UserPresence.objects.create(
user_profile_id=cordelia.id,
status=UserPresence.ACTIVE,
client=get_client('web'),
timestamp=timezone_now(),
)
def test_stream_push_notify_for_fully_present_user(self) -> None:
self._turn_on_stream_push_for_cordelia()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
self._make_cordelia_present_on_web()
# Simulate Cordelia is FULLY present, not just in term of
# browser activity, but also in terms of her client descriptors.
with self._cordelia_connected_to_zulip():
self._get_queued_data_for_message_update(
message_id=message_id,
content='nothing special about updated message',
expect_short_circuit=True,
)
def test_always_push_notify_for_fully_present_mentioned_user(self) -> None:
cordelia = self.example_user('cordelia')
cordelia.enable_online_push_notifications = True
cordelia.save()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
self._make_cordelia_present_on_web()
# Simulate Cordelia is FULLY present, not just in term of
# browser activity, but also in terms of her client descriptors.
with self._cordelia_connected_to_zulip():
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='newly mention @**Cordelia Lear**',
)
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=True,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=True,
idle=False,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
queue_messages = info['queue_messages']
self.assertEqual(len(queue_messages), 1)
def test_always_push_notify_for_fully_present_boring_user(self) -> None:
cordelia = self.example_user('cordelia')
cordelia.enable_online_push_notifications = True
cordelia.save()
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
self._make_cordelia_present_on_web()
# Simulate Cordelia is FULLY present, not just in term of
# browser activity, but also in terms of her client descriptors.
with self._cordelia_connected_to_zulip():
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='nothing special about updated message',
)
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=False,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=True,
idle=False,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
queue_messages = info['queue_messages']
# Even though Cordelia has enable_online_push_notifications set
# to True, we don't send her any offline notifications, since she
# was not mentioned.
self.assertEqual(len(queue_messages), 0)
def test_updates_with_stream_mention_of_sorta_present_user(self) -> None:
cordelia = self.example_user('cordelia')
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
# We will simulate that the user still has a an active client,
# but they don't have UserPresence rows, so we will still
# send offline notifications.
with self._cordelia_connected_to_zulip():
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
)
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=True,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=False,
idle=True,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
# She will get messages enqueued. (Other tests drill down on the
# actual content of these messages.)
self.assertEqual(len(info['queue_messages']), 2)
def test_updates_with_stream_mention_of_fully_present_user(self) -> None:
cordelia = self.example_user('cordelia')
message_id = self._login_and_send_original_stream_message(
content='no mention'
)
self._make_cordelia_present_on_web()
# Simulate Cordelia is FULLY present, not just in term of
# browser activity, but also in terms of her client descriptors.
with self._cordelia_connected_to_zulip():
info = self._get_queued_data_for_message_update(
message_id=message_id,
content='now we mention @**Cordelia Lear**',
)
expected_enqueue_kwargs = dict(
user_profile_id=cordelia.id,
message_id=message_id,
private_message=False,
mentioned=True,
stream_push_notify=False,
stream_email_notify=False,
stream_name='Scotland',
always_push_notify=False,
idle=False,
already_notified={},
)
self.assertEqual(info['enqueue_kwargs'], expected_enqueue_kwargs)
# Because Cordelia is FULLY present, we don't need to send any offline
# push notifications or missed message emails.
self.assertEqual(len(info['queue_messages']), 0)
|
|
# Copyright 2014 Rackspace, Andrew Melton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fixtures
import mock
from six.moves import StringIO
import nova.privsep.idmapshift
from nova import test
def join_side_effect(root, *args):
path = root
if root != '/':
path += '/'
path += '/'.join(args)
return path
class FakeStat(object):
def __init__(self, uid, gid):
self.st_uid = uid
self.st_gid = gid
class BaseTestCase(test.NoDBTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', StringIO()))
self.uid_maps = [(0, 10000, 10), (10, 20000, 1000)]
self.gid_maps = [(0, 10000, 10), (10, 20000, 1000)]
class FindTargetIDTestCase(BaseTestCase):
def test_find_target_id_range_1_first(self):
actual_target = nova.privsep.idmapshift.find_target_id(
0, self.uid_maps, nova.privsep.idmapshift.NOBODY_ID, dict())
self.assertEqual(10000, actual_target)
def test_find_target_id_inside_range_1(self):
actual_target = nova.privsep.idmapshift.find_target_id(
2, self.uid_maps, nova.privsep.idmapshift.NOBODY_ID, dict())
self.assertEqual(10002, actual_target)
def test_find_target_id_range_2_first(self):
actual_target = nova.privsep.idmapshift.find_target_id(
10, self.uid_maps, nova.privsep.idmapshift.NOBODY_ID, dict())
self.assertEqual(20000, actual_target)
def test_find_target_id_inside_range_2(self):
actual_target = nova.privsep.idmapshift.find_target_id(
100, self.uid_maps, nova.privsep.idmapshift.NOBODY_ID, dict())
self.assertEqual(20090, actual_target)
def test_find_target_id_outside_range(self):
actual_target = nova.privsep.idmapshift.find_target_id(
10000, self.uid_maps, nova.privsep.idmapshift.NOBODY_ID, dict())
self.assertEqual(nova.privsep.idmapshift.NOBODY_ID, actual_target)
def test_find_target_id_no_mappings(self):
actual_target = nova.privsep.idmapshift.find_target_id(
0, [], nova.privsep.idmapshift.NOBODY_ID, dict())
self.assertEqual(nova.privsep.idmapshift.NOBODY_ID, actual_target)
def test_find_target_id_updates_memo(self):
memo = dict()
nova.privsep.idmapshift.find_target_id(
0, self.uid_maps, nova.privsep.idmapshift.NOBODY_ID, memo)
self.assertIn(0, memo)
self.assertEqual(10000, memo[0])
def test_find_target_guest_id_greater_than_count(self):
uid_maps = [(500, 10000, 10)]
# Below range
actual_target = nova.privsep.idmapshift.find_target_id(
499, uid_maps, nova.privsep.idmapshift.NOBODY_ID, dict())
self.assertEqual(nova.privsep.idmapshift.NOBODY_ID, actual_target)
# Match
actual_target = nova.privsep.idmapshift.find_target_id(
501, uid_maps, nova.privsep.idmapshift.NOBODY_ID, dict())
self.assertEqual(10001, actual_target)
# Beyond range
actual_target = nova.privsep.idmapshift.find_target_id(
510, uid_maps, nova.privsep.idmapshift.NOBODY_ID, dict())
self.assertEqual(nova.privsep.idmapshift.NOBODY_ID, actual_target)
class ShiftPathTestCase(BaseTestCase):
@mock.patch('os.lchown')
@mock.patch('os.lstat')
def test_shift_path(self, mock_lstat, mock_lchown):
mock_lstat.return_value = FakeStat(0, 0)
nova.privsep.idmapshift.shift_path(
'/test/path', self.uid_maps, self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID, dict(), dict())
mock_lstat.assert_has_calls([mock.call('/test/path')])
mock_lchown.assert_has_calls([mock.call('/test/path', 10000, 10000)])
class ShiftDirTestCase(BaseTestCase):
@mock.patch('nova.privsep.idmapshift.shift_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_shift_dir(self, mock_walk, mock_join, mock_shift_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
nova.privsep.idmapshift.shift_dir('/', self.uid_maps, self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID)
files = ['a', 'b', 'c', 'd']
mock_walk.assert_has_calls([mock.call('/')])
mock_join_calls = [mock.call('/', x) for x in files]
mock_join.assert_has_calls(mock_join_calls)
args = (self.uid_maps, self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID)
kwargs = dict(uid_memo=dict(), gid_memo=dict())
shift_path_calls = [mock.call('/', *args, **kwargs)]
shift_path_calls += [mock.call('/' + x, *args, **kwargs)
for x in files]
mock_shift_path.assert_has_calls(shift_path_calls)
class ConfirmPathTestCase(test.NoDBTestCase):
@mock.patch('os.lstat')
def test_confirm_path(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(1000, 301)
result = nova.privsep.idmapshift.confirm_path(
'/test/path', uid_ranges, gid_ranges, 50000)
mock_lstat.assert_has_calls([mock.call('/test/path')])
self.assertTrue(result)
@mock.patch('os.lstat')
def test_confirm_path_nobody(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(50000, 50000)
result = nova.privsep.idmapshift.confirm_path(
'/test/path', uid_ranges, gid_ranges, 50000)
mock_lstat.assert_has_calls([mock.call('/test/path')])
self.assertTrue(result)
@mock.patch('os.lstat')
def test_confirm_path_uid_mismatch(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(0, 301)
result = nova.privsep.idmapshift.confirm_path(
'/test/path', uid_ranges, gid_ranges, 50000)
mock_lstat.assert_has_calls([mock.call('/test/path')])
self.assertFalse(result)
@mock.patch('os.lstat')
def test_confirm_path_gid_mismatch(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(1000, 0)
result = nova.privsep.idmapshift.confirm_path(
'/test/path', uid_ranges, gid_ranges, 50000)
mock_lstat.assert_has_calls([mock.call('/test/path')])
self.assertFalse(result)
@mock.patch('os.lstat')
def test_confirm_path_uid_nobody(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(50000, 301)
result = nova.privsep.idmapshift.confirm_path(
'/test/path', uid_ranges, gid_ranges, 50000)
mock_lstat.assert_has_calls([mock.call('/test/path')])
self.assertTrue(result)
@mock.patch('os.lstat')
def test_confirm_path_gid_nobody(self, mock_lstat):
uid_ranges = [(1000, 1999)]
gid_ranges = [(300, 399)]
mock_lstat.return_value = FakeStat(1000, 50000)
result = nova.privsep.idmapshift.confirm_path(
'/test/path', uid_ranges, gid_ranges, 50000)
mock_lstat.assert_has_calls([mock.call('/test/path')])
self.assertTrue(result)
class ConfirmDirTestCase(BaseTestCase):
def setUp(self):
super(ConfirmDirTestCase, self).setUp()
self.uid_map_ranges = nova.privsep.idmapshift.get_ranges(self.uid_maps)
self.gid_map_ranges = nova.privsep.idmapshift.get_ranges(self.gid_maps)
@mock.patch('nova.privsep.idmapshift.confirm_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_confirm_dir(self, mock_walk, mock_join, mock_confirm_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
mock_confirm_path.return_value = True
nova.privsep.idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID)
files = ['a', 'b', 'c', 'd']
mock_walk.assert_has_calls([mock.call('/')])
mock_join_calls = [mock.call('/', x) for x in files]
mock_join.assert_has_calls(mock_join_calls)
args = (self.uid_map_ranges, self.gid_map_ranges,
nova.privsep.idmapshift.NOBODY_ID)
confirm_path_calls = [mock.call('/', *args)]
confirm_path_calls += [mock.call('/' + x, *args)
for x in files]
mock_confirm_path.assert_has_calls(confirm_path_calls)
@mock.patch('nova.privsep.idmapshift.confirm_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_confirm_dir_short_circuit_root(self, mock_walk, mock_join,
mock_confirm_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
mock_confirm_path.return_value = False
nova.privsep.idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID)
args = (self.uid_map_ranges, self.gid_map_ranges,
nova.privsep.idmapshift.NOBODY_ID)
confirm_path_calls = [mock.call('/', *args)]
mock_confirm_path.assert_has_calls(confirm_path_calls)
@mock.patch('nova.privsep.idmapshift.confirm_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_confirm_dir_short_circuit_file(self, mock_walk, mock_join,
mock_confirm_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
def confirm_path_side_effect(path, *args):
if 'a' in path:
return False
return True
mock_confirm_path.side_effect = confirm_path_side_effect
nova.privsep.idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID)
mock_walk.assert_has_calls([mock.call('/')])
mock_join.assert_has_calls([mock.call('/', 'a')])
args = (self.uid_map_ranges, self.gid_map_ranges,
nova.privsep.idmapshift.NOBODY_ID)
confirm_path_calls = [mock.call('/', *args),
mock.call('/' + 'a', *args)]
mock_confirm_path.assert_has_calls(confirm_path_calls)
@mock.patch('nova.privsep.idmapshift.confirm_path')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_confirm_dir_short_circuit_dir(self, mock_walk, mock_join,
mock_confirm_path):
mock_walk.return_value = [('/', ['a', 'b'], ['c', 'd'])]
mock_join.side_effect = join_side_effect
def confirm_path_side_effect(path, *args):
if 'c' in path:
return False
return True
mock_confirm_path.side_effect = confirm_path_side_effect
nova.privsep.idmapshift.confirm_dir('/', self.uid_maps, self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID)
files = ['a', 'b', 'c']
mock_walk.assert_has_calls([mock.call('/')])
mock_join_calls = [mock.call('/', x) for x in files]
mock_join.assert_has_calls(mock_join_calls)
args = (self.uid_map_ranges, self.gid_map_ranges,
nova.privsep.idmapshift.NOBODY_ID)
confirm_path_calls = [mock.call('/', *args)]
confirm_path_calls += [mock.call('/' + x, *args)
for x in files]
mock_confirm_path.assert_has_calls(confirm_path_calls)
class IntegrationTestCase(BaseTestCase):
@mock.patch('os.lchown')
@mock.patch('os.lstat')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_integrated_shift_dir(self, mock_walk, mock_join, mock_lstat,
mock_lchown):
mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
('/tmp/test/d', ['1', '2'], [])]
mock_join.side_effect = join_side_effect
def lstat(path):
stats = {
't': FakeStat(0, 0),
'a': FakeStat(0, 0),
'b': FakeStat(0, 2),
'c': FakeStat(30000, 30000),
'd': FakeStat(100, 100),
'1': FakeStat(0, 100),
'2': FakeStat(100, 100),
}
return stats[path[-1]]
mock_lstat.side_effect = lstat
nova.privsep.idmapshift.shift_dir('/tmp/test', self.uid_maps,
self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID)
lchown_calls = [
mock.call('/tmp/test', 10000, 10000),
mock.call('/tmp/test/a', 10000, 10000),
mock.call('/tmp/test/b', 10000, 10002),
mock.call('/tmp/test/c', nova.privsep.idmapshift.NOBODY_ID,
nova.privsep.idmapshift.NOBODY_ID),
mock.call('/tmp/test/d', 20090, 20090),
mock.call('/tmp/test/d/1', 10000, 20090),
mock.call('/tmp/test/d/2', 20090, 20090),
]
mock_lchown.assert_has_calls(lchown_calls)
@mock.patch('os.lstat')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_integrated_confirm_dir_shifted(self, mock_walk, mock_join,
mock_lstat):
mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
('/tmp/test/d', ['1', '2'], [])]
mock_join.side_effect = join_side_effect
def lstat(path):
stats = {
't': FakeStat(10000, 10000),
'a': FakeStat(10000, 10000),
'b': FakeStat(10000, 10002),
'c': FakeStat(nova.privsep.idmapshift.NOBODY_ID,
nova.privsep.idmapshift.NOBODY_ID),
'd': FakeStat(20090, 20090),
'1': FakeStat(10000, 20090),
'2': FakeStat(20090, 20090),
}
return stats[path[-1]]
mock_lstat.side_effect = lstat
result = nova.privsep.idmapshift.confirm_dir(
'/tmp/test', self.uid_maps, self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID)
self.assertTrue(result)
@mock.patch('os.lstat')
@mock.patch('os.path.join')
@mock.patch('os.walk')
def test_integrated_confirm_dir_unshifted(self, mock_walk, mock_join,
mock_lstat):
mock_walk.return_value = [('/tmp/test', ['a', 'b', 'c'], ['d']),
('/tmp/test/d', ['1', '2'], [])]
mock_join.side_effect = join_side_effect
def lstat(path):
stats = {
't': FakeStat(0, 0),
'a': FakeStat(0, 0),
'b': FakeStat(0, 2),
'c': FakeStat(30000, 30000),
'd': FakeStat(100, 100),
'1': FakeStat(0, 100),
'2': FakeStat(100, 100),
}
return stats[path[-1]]
mock_lstat.side_effect = lstat
result = nova.privsep.idmapshift.confirm_dir(
'/tmp/test', self.uid_maps, self.gid_maps,
nova.privsep.idmapshift.NOBODY_ID)
self.assertFalse(result)
|
|
#!/usr/bin/python
# Creator: Daniel Wooten
# Version 1.0.
import os as os
import time as time
import logging as logging
import copy as cp
import re as re
import sys
import json
""" This is a collection of commonly used functions in programs created by
Daniel Wooten. They should be ( and are ) utilized in other programs
created by Daniel Wooten. This file must be in the current directory of
execution for these programs or the system path
This file currently lives in...
"/usr/lib/pymodules/Python2.7"
"""
def Read_Setup( prefix ):
""" This function reads in a setup file named "[something]_setup.json".
Clearly the setup file must be formatted as a json file with a parent
dictionary. Any stadard json input is accepted.
"""
input_file = open( prefix + "_setup.txt" , "r" )
setup_file = input_file.readlines()
setup_file = [ x.rstrip( "\n" ) for x in setup_file ]
dictionary = {}
num_lines = len( setup_file )
for i in range( num_lines ):
line = setup_file[ i ].split( ',' )
dictionary[ line[ 0 ] ] = line[ 1 ]
if 'log_level' in dictionary.keys():
dictionary[ 'log_level' ] = int( dictionary[ 'log_level' ] )
input_file.close()
return( dictionary )
def Read_Json_Setup( selection ):
""" This function reads in a json formatted setup file. The file
<<[ selection ]_setup.json>> takes precedence followed by the generic
<<setup.json>>.
"""
if os.path.isfile( selection + "_setup.json" ):
setup_file = open( selection + "_setup.json" , "r" )
elif os.path.isfile( "setup.json" ):
setup_file = open( "setup.json" , "r" )
else:
print( "ERROR!! No setup file found in \n <<<" + os.getcwd() + \
">>> \n for Read_Json_Setup. The program will now die. " )
sys.exit()
setup = json.load( setup_file , object_hook = Decode_Json_Dict )
setup_file.close()
return( setup )
def Read_Json_Data( file_name ):
""" This function reads in a json formatted data file.
"""
if os.path.isfile(file_name):
setup_file = open( file_name , "r" )
else:
print( "ERROR!! File <<<" + file_name + ">>> not found in \n \
<<<" + os.getcwd() + \
">>> \n for Read_Json_Setup. The program will now die.\n" )
sys.exit()
setup = json.load( setup_file , object_hook = Decode_Json_Dict )
setup_file.close()
return( setup )
def Get_Base_Name( file_name ):
""" This function gets a base name from the host file name """
end_index = file_name.rfind( "." )
base_name = file_name[ 0 : end_index ]
return( base_name )
def Start_Log( base_name , level ):
log_file_name = "Log_" + base_name + "_" + time.strftime( "%d_%m_%Y" ) \
+ ".log"
LogLevel = level
logging.basicConfig( filename = log_file_name , format = \
"[%(levelname)8s] %(message)s" , filemode = 'w' , level = LogLevel )
logging.debug( "This is the debug level reporting in" )
logging.info( "This is the info level reporting in " )
logging.warning( "This is the warning level reporting in" )
logging.error( "This is the error level reporting in" )
logging.critical( "This is the critical level reporting in" )
return
def Cep():
''' A wrapper to place file seperators in a log file for the
debug level '''
logging.info( "*****************************************************" )
return
#Function, refer to docstring for purpose
def Sep():
'''A wrapper to visually seperate functions in log files'''
logging.info( '//////////////////////////////////////////////////////' )
return
def Read_Input( file_name , form ):
""" This function reads in a file whose name is given in file_name to the
function. It's contents are saved in a list and stripped of new lines.
They are also converted to floats. """
logging.debug( '//////////////////////////////////////////////////////' )
logging.debug( "Read_Input" )
logging.debug( "Reading in file: " + file_name )
input_file = open( file_name , "r" )
file_contents = input_file.readlines()
if form == 'string':
file_contents = [ x.rstrip( "\n" ) for x in file_contents ]
elif form == 'float':
file_contents = [ float( x ) for x in file_contents ]
elif form == 'raw':
pass
else:
print( "ERROR!!!: Choice of conversion for input from file " + \
file_name + " either not given or not 'string','raw', or 'float'" )
logging.debug( "ERROR!!!: Choice of conversion for input from file " + \
file_name + " either not given or not 'string', 'raw', or 'float'" )
exit()
logging.debug( "Closing file: " + file_name )
input_file.close()
return( file_contents )
def Decode_List( data , fun ):
""" These two functions ( Decode_List and Decode_Dict )
can be used to un-nest nested dicts and lists ( combined )
while applying "fun" to the items in these dicts and lists (
that are themselves not dicts or lists ). Keys in dicts
are always converted to ascii strings. """
output = []
for item in data:
if isinstance( item , list ):
item = Decode_List( item , fun )
elif isinstance( item , dict ):
item = Decode_Dict( item , fun )
else:
item = fun( item )
output.append( item )
return( output )
def Decode_Dict( data , fun ):
""" These two functions ( Decode_List and Decode_Dict )
can be used to un-nest nested dicts and lists ( combined )
while applying "fun" to the items in these dicts and lists (
that are themselves not dicts or lists ). Keys in dicts
are always converted to ascii strings."""
output = {}
for key , value in data.iteritems():
key = key.encode( 'ascii' )
if isinstance( value , list ):
value = Decode_List( value , fun )
elif isinstance( value , dict ):
value = Decode_Dict( value, fun )
else:
value = fun( value )
output[ key ] = value
return( output )
def Decode_Json_List( data ):
"""These two functions can be used with the json module for python as
an object hook to prevent unicode encoding of strings. Simply pass
the Decode_Dict function like so
<< a = json.load( file , object_hook = Decode_Dict ) >>
This will preserve all values but convert strings to ascii strings,
not unicode. If unicode is desired simply do not pass anything to
ojbect_hook. This function decodes lists for the json module """
output = []
for item in data:
if isinstance( item , unicode ):
item = item.encode( 'ascii' )
elif isinstance( item , list ):
item = Decode_Json_List( item )
elif isinstance( item , dict ):
item = Decode_Json_Dict( item )
output.append( item )
return( output )
def Decode_Json_Dict( data ):
"""These two functions can be used with the json module for python as
an object hook to prevent unicode encoding of strings. Simply pass
the Decode_Dict function like so
<< a = json.load( file , object_hook = Decode_Dict ) >>
This will preserve all values but convert strings to ascii strings,
not unicode. If unicode is desired simply do not pass anything to
ojbect_hook. This function decodes dicts for the json module """
output = {}
for key , value in data.iteritems():
if isinstance( key , unicode ):
key = key.encode( 'ascii' )
# Try to convert key to integer in case integer keys were input as
# strings
try:
key = int(key)
except ValueError:
pass
if isinstance( value , unicode ):
value = value.encode( 'ascii' )
elif isinstance( value , list ):
value = Decode_Json_List( value )
elif isinstance( value , dict ):
value = Decode_Json_Dict( value )
output[ key ] = value
return( output )
def File_Name_Conditioner( string ):
""" This function removes any undesirable characters in a file string
and replaces them with a desired substitute. """
string = string.replace( " " , "_" )
string = string.replace( "-" , "_" )
string = string.replace( "\\" , "_backslash" )
string = string.replace( "&" , "_and_" )
string = string.replace( ":" , "_" )
string = string.replace( "," , "_" )
string = string.replace( "?" , "_question_mark_" )
string = string.replace( "!" , "_exclamation_mark_" )
string = string.replace( "~" , "_tilde_" )
string = string.replace( "*" , "_asterisk_" )
string = string.replace( "<" , "_left_arrow_" )
string = string.replace( ">" , "_right_arrow_" )
string = string.replace( "^" , "_carrot_" )
string = string.replace( "$" , "_dollar_sign_" )
string = string.replace( "/" , "_forward_slash_" )
return( string )
def Save_Output_As_Json( options , data ):
""" This function saves given data as a json formated file in the
current directory
"""
Sep()
if "output_save_name" in options:
output_save_name = options[ "output_save_name" ]
else:
output_save_name = Get_Base_Name( options[ "host_file" ] ) + \
".txt_dep.json"
save_file = open( output_save_name , "w" )
json.dump( data , save_file )
return
def Check_Import():
""" This function simply prints a statment to make sure the import worked"""
print( "<< wooten_common.py >> imported sucessfully" )
return
|
|
"""
Dependency injection utilities
:copyright: (c) 2013-15 by Telefonica I+D.
:license: see LICENSE.txt for more details.
Python is a highly dynamic language with an "open class" implementation for user
types, thus the need for a full blown dependency injection framework is not
specially needed. For medium to large applications though there is still the
issue of how to actually implement dependency injection in the code using only
Python's standard syntax/library.
The following tools are designed to be very lightweight and flexible as to allow
their use in a variety of scenarios, including their use to aid with unit testing.
It doesn't form a *framework* but just a set of utilities to keep the dependency
injection needs in a project under control by applying it only where it makes
sense, with minimum overhead and a lean learning curve.
"""
import sys
import logging
import warnings
import types
import inspect
import functools
from contextlib import contextmanager
import threading
try:
import thread
except ImportError:
# Python 3.3 exposes .get_ident on the threading module
thread = threading
PY2 = sys.version_info[0] == 2
logger = logging.getLogger(__name__)
class Key(object):
""" Wraps a value to be used as key with the injector decorator.
In some cases it may be needed to map a dependency injection to something
other than a class. For instance, we might want to make some value
injectable based on a string identifier.
For those cases this class can be used to indicate the decorator that it
should look in the mapping for the wrapped value.
inject = injector({ 'foo': 'FOO' })
@inject
def foo(msg=Key('foo')):
print msg
"""
def __init__(self, value, *values):
if len(values):
self.value = (value,) + values
else:
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if isinstance(other, Key):
return self.value == other.value
return self.value == other
def __ne__(self, other):
if isinstance(other, Key):
return self.value != other.value
return self.value != other
def get_callable_defaults(fn, follow_wrapped=False):
""" Helper function to extracts a map of name:default from the signature
of a function.
"""
try: # PY35
sign = inspect.signature(fn, follow_wrapped=follow_wrapped)
defaults = dict(
(p.name, p.annotation if p.default is Key else p.default)
for p in sign.parameters.values()
if p.default is not p.empty
)
except (TypeError, ValueError, AttributeError) as ex:
if follow_wrapped and not isinstance(ex, ValueError):
raise RuntimeError(
'injector is configured to follow wrapped methods but your Python '
'version does not support this feature')
try: # PY3
args, _, _, defaults, _, kwonlydefaults, _ = inspect.getfullargspec(fn)
except AttributeError: # PY2
args, _, _, defaults = inspect.getargspec(fn)
kwonlydefaults = None
defaults = dict(zip(reversed(args), reversed(defaults))) if defaults else {}
if kwonlydefaults:
defaults.update(kwonlydefaults)
return defaults
def injector(dependencies, warn=True, follow_wrapped=False):
""" Factory for the dependency injection decorator. It's meant to be
initialized with the map of dependencies to use on decorated functions.
inject = injector({
ConfigManager: ConfigManager('settings.cfg'),
Redis: Redis('127.0.0.1')
})
@inject
def process(queue, config=ConfigManager, redis=Redis):
return redis.hmget(config['info_key'])
Dependency resolution is very straightforward, no inheritance is taken
into account, the dependency map must be initialized with the actual
classes used to annotate the decorated functions.
When a decorated method defines a dependency not correctly configured
in the map it will raise a LookupError to indicate so.
Note that the dependency map can be updated at any time, affecting
following calls to decorated methods.
A common pattern is to apply dependency injection only when instantiating
a class. This can be easily accomplished by decorating the class' __init__
method, storing injected values as object attributes.
@inject
def __init__(self, config=ConfigManager):
self._config = config
If you see a TypeError with the message 'got multiple values for keyword
argument', make sure that all calls to the decorated method always use
keyword arguments for injected values. Use of positional injected arguments
is not supported.
"""
if isinstance(dependencies, (types.FunctionType, types.BuiltinFunctionType, functools.partial)):
raise RuntimeError('It seems injector is being used as a decorator instead of a decorator factory. Usage: inject = injector(deps)')
# Prepare the dependencies storage stack
deps_stack = [dependencies]
def wrapper(fn, __warn__=warn, follow_wrapped=follow_wrapped):
# Mapping for injectable values (classes used as default value)
mapping = {}
defaults = get_callable_defaults(fn, follow_wrapped=follow_wrapped)
for name, default in defaults.items():
if isinstance(default, Key):
mapping[name] = default.value
elif inspect.isclass(default):
mapping[name] = default
if __warn__ and not mapping:
warnings.warn('{0}: No injectable params found. You can safely remove the decorator.'.format(fn.__name__), stacklevel=2)
return fn
# Micro optimization: prepare mapping as a list of pairs
pairs = tuple(mapping.items())
# Wrapper executed on each invocation of the decorated method
@functools.wraps(fn)
def inner(*args, **kwargs):
# Micro optimization: cache logger level
debug = logger.isEnabledFor(logging.DEBUG)
# Alias the latest dependencies
deps = deps_stack[-1]
# Adapt for deprecated property
if __warn__ and deps is not wrapper.dependencies:
warnings.warn('dependencies property is deprecated, please use patch/unpatch', stacklevel=2)
patch(wrapper.dependencies)
deps = wrapper.dependencies
# Iterate over the set of 'injectable' parameters
for name, dependency in pairs:
# If the argument was not explicitly given inject it
if name not in kwargs:
debug and logger.debug('%s: Injecting %s with %s', fn.__name__, name, dependency)
# Avoid using `in` operator to check, so we can work with
# maps not supporting __contain__
try:
kwargs[name] = deps[dependency]
except KeyError:
raise LookupError('Unable to find an instance for {0} when calling {1}'.format(
dependency, fn.__name__))
return fn(*args, **kwargs)
return inner
def patch(deps):
deps_stack.append(deps)
wrapper.dependencies = deps
def unpatch():
if len(deps_stack) < 2:
raise RuntimeError('Unable to unpatch. Did you call patch?')
deps_stack.pop()
wrapper.dependencies = deps_stack[-1]
# Allow calling sites to change the dependency map
wrapper.patch = patch
wrapper.unpatch = unpatch
# Deprecated: Expose the dependency map publicly in the decorator
wrapper.dependencies = deps_stack[-1]
return wrapper
def MetaInject(inject_fn):
"""
Builds a metaclass with the *injector* parameter as dependecy injector.
"""
def is_user_function(name, fn):
""" Checks that a function isn't named as an operator overload (__name__) """
return callable(fn) and name[:2] != '__' and name[-2:] != '__'
class ActualMetaInject(type):
"""
Metaclass to define the dependency injection in a class level instead
of requiring the decorator definition in every instance method.
This might be used in classes that injects dependencies for most of
their methods.
class Foo(object):
__metaclass__ = MetaInject(inject)
# this method will be automatically decorated with `inject`
def foo(self, redis=Redis):
pass
"""
def __new__(cls, name, bases, dct):
"""
Generates a new instance including the injector factory for every
method except for *operator overloads*.
"""
# Filter methods to be decorated
methods = ((k, v) for (k, v) in dct.items() if is_user_function(k, v))
for m, fn in methods:
dct[m] = inject_fn(fn, __warn__=False)
return type.__new__(cls, name, bases, dct)
return ActualMetaInject
class DependencyMap(object):
"""
Implements the "dict" protocol for the dependencies but applies
custom logic on how to obtain them based on the configured flags:
FACTORY: obtain the value by executing a function
SINGLETON: only execute the factory once
THREAD: only execute the factory once for each unique thread
"""
NONE = 0
FACTORY = 1
SINGLETON = 2
THREAD = 4
def __init__(self, *args, **kwargs):
self._values = dict(*args, **kwargs)
self._flags = {}
self._singletons = {}
self._threadlocals = threading.local()
def __call__(self, key):
""" descriptor factory method.
>>> dm = DependencyMap()
>>> class Bar(object):
pass
>>> class Foo(object):
my_injected_dep = dm(Spam)
"""
return InjectorDescriptor(key, self)
def __getitem__(self, key):
# Unwrap Key instances
if isinstance(key, Key):
key = key.value
value = self._values[key]
flags = self._flags.get(key, DependencyMap.NONE)
# HACK: Somewhat complex code but we strive for performance here
try:
if flags & DependencyMap.FACTORY:
if flags & DependencyMap.SINGLETON:
if key not in self._singletons:
logger.debug('Running singleton factory for dependency %s', key)
self._singletons[key] = value(self)
value = self._singletons[key]
elif flags & DependencyMap.THREAD:
if not hasattr(self._threadlocals, key):
logger.debug('Running thread factory for dependency %s in thread (%d)',
key, thread.get_ident())
setattr(self._threadlocals, key, value(self))
value = getattr(self._threadlocals, key)
else:
logger.debug('Running factory for dependency %s', key)
value = value(self)
except Exception as e:
# factory method's exceptions might occur at devel time,
# better to log them in an unpleasant way to fix them quickly
logger.exception('Unexpected problem when creating an instance')
raise e
return value
def __setitem__(self, key, value):
# Make sure we remove any flags associated with the key
if key in self._flags:
del self._flags[key]
self._values[key] = value
def __contains__(self, key):
# Unwrap Key instances
if isinstance(key, Key):
key = key.value
return key in self._values
def __enter__(self):
""" ContextManager interface to temporally modify dependencies.
>>> deps[MyClass] = True
>>> with deps:
>>> deps[MyClass] = False
>>> assert deps[MyClass] is True
"""
self._saved = (self._values, self._flags)
self._values = dict((k, v) for k, v in self._values.items())
return self._values
def __exit__(self, type, value, traceback):
self._values, self._flags = self._saved
def proxy(self, key):
""" Proxy factory method.
>>> dm = DependencyMap()
>>> my_injected_dep = dm.proxy(Spam)
"""
return InjectorProxy(self, key)
def register(self, key, value, flags=NONE):
""" Register a new dependency optionally giving it a set of flags
"""
logger.debug('Registered %s with flags=%d', key, flags)
# Unwrap Key instances
if isinstance(key, Key):
key = key.value
self._values[key] = value
self._flags[key] = flags
def factory(self, key, flags=NONE):
""" Factory decorator to register functions as dependency factories
"""
def decorator(fn):
self.register(key, fn, flags | DependencyMap.FACTORY)
return decorator
def singleton(self, key):
return self.factory(key, flags=DependencyMap.SINGLETON)
def thread(self, key):
return self.factory(key, flags=DependencyMap.THREAD)
class ContextualDependencyMap(DependencyMap):
""" Specialized dependency map to support scenarios where different
dependency instances should be used based on some context.
Provisioning of dependencies is only done once but allows to
execute singleton/thread factory functions for every different
context. For instance, when a language setting is used this can
help organize the dependencies with factories depending on it.
"""
def __init__(self, *args, **kwargs):
super(ContextualDependencyMap, self).__init__(*args, **kwargs)
self._maps = {}
self.map = self
@contextmanager
def activate(self, context):
""" Context manager to temporary activate a given DependencyMap
for the duration of the with block.
with deps.activate('es'):
...
"""
saved = self.map
try:
yield self.context(context)
finally:
self.map = saved
def context(self, context=None):
""" Switches the active set of the dependencies. New context values
will automatically create a DependencyMap associated with it.
Returns the dependency map instance switched to.
"""
# If no context is given the context-less map is activated
if context is None:
self.map = self
return self.map
# Every new context is associated with an isolated dependency
# map, which is initialized with the current state for the root map.
if context not in self._maps:
logger.debug('Initializing dependency map for context: %s', context)
self._maps[context] = DependencyMap()
for k, v in self._values.items():
self._maps[context].register(k, v, self._flags.get(k, DependencyMap.NONE))
logger.debug('Switched dependency map context to: %s', context)
self.map = self._maps[context]
return self.map
def reset(self):
""" Destroys any reference to specific contexts. This method is specially
suited for unit testing.
"""
self._maps = {}
self.context(None)
def __getitem__(self, key):
if self.map is self:
return super(ContextualDependencyMap, self).__getitem__(key)
# Forward the query to the current context's map
return self.map[key]
def __setitem__(self, key, value):
""" When setting a value it's assigned to the current map
"""
if self.map is self:
super(ContextualDependencyMap, self).__setitem__(key, value)
else:
self.map[key] = value
def __contains__(self, key):
if self.map is self:
return super(ContextualDependencyMap, self).__contains__(key)
return key in self.map
class PatchedDependencyMap(object):
""" Serves the purpose of overriding values from a dependency map. Specially useful for
modifying dependencies while testing.
def setUp(self):
# Replace the map in the inject decorator with a patched one
deps = PatchedDependencyMap(inject.dependencies)
inject.dependencies = deps
deps[ConfigManager] = mock()
def tearDown(self):
# Restore original dependency map
inject.dependencies = inject.dependencies.target
"""
def __init__(self, depsmap):
self.target = depsmap
self._patched = {}
def __getitem__(self, key):
""" This is hacky and easy to break so tread lightly. The purpose is to hijack the getter
in the target dependency map so that dependency hierarchies can also look up into
patched ones.
"""
# HACK: Note that we have to override the getter in the class and not the instance
# Python will ignore an overridden __getitem__ set on the instance object, calling
# always the unbound class method.
target_cls = self.target.__class__
target_getter = target_cls.__getitem__
def getter(inst, key):
if key in self._patched:
return self._patched[key]
return target_getter(inst, key)
try:
target_cls.__getitem__ = getter
return getter(self.target, key)
finally:
target_cls.__getitem__ = target_getter
def __setitem__(self, key, value):
# Unwrap Key instances
if isinstance(key, Key):
key = key.value
self._patched[key] = value
def __contains__(self, key):
return (key in self._patched) or (key in self.target)
def __getattr__(self, key):
""" Forward attribute access to the target map
"""
return getattr(self.target, key)
def copy(self):
""" expose dict method to help with mocking frameworks """
return self._patched.copy()
def update(self, *args, **kwargs):
""" expose dict method to help with mocking frameworks """
self._patched.update(*args, **kwargs)
def clear(self):
""" expose dict method to help with mocking frameworks """
self._patched.clear()
class InjectorDescriptor(object):
"""alternate way of using the injector with a descriptor
>>> dm = DependencyMap()
>>> class MyClass(object):
myfoo = dm(FOO)
>>> 'when unit testing just clear the singletons dict'
>>> class FooTestCase(unittest.TestCase):
def setUp():
dm._singletons.clear()
"""
def __init__(self, class_obj, dependencies):
self.class_obj = class_obj
self.dependencies = dependencies
def __get__(self, inst, cls):
# Dependency map already introduces a caching mechanism, no need
# to insert the resolved dependency into the instance.
# If wanted, just iterate the cls.__dict__ looking for the key to
# the descriptor with same id as self
try:
return self.dependencies[self.class_obj]
except KeyError:
raise LookupError('Unable to find an instance for {0}'.format(self.class_obj))
class InjectorProxy(object):
"""
Alternate way of using the injector with a Proxy
>>> dm = DependencyMap()
>>> myfoo = dm.proxy(FOO)
This code is based on the LocalProxy implemented by Werkzeug
https://github.com/pallets/werkzeug/blob/master/werkzeug/local.py#L254
"""
__slots__ = ('__dependencies', '__class_obj', '__dict__')
def __init__(self, dependencies, class_obj):
object.__setattr__(self, '_InjectorProxy__dependencies', dependencies)
object.__setattr__(self, '_InjectorProxy__class_obj', class_obj)
def _get_current_object(self):
try:
return self.__dependencies[self.__class_obj]
except KeyError:
raise LookupError('Unable to find an instance for {0}'.format(self.__class_obj))
@property
def __dict__(self):
return self._get_current_object().__dict__
def __repr__(self):
return repr(self._get_current_object())
def __bool__(self):
return bool(self._get_current_object())
def __unicode__(self):
return unicode(self._get_current_object())
def __dir__(self):
return dir(self._get_current_object())
def __getattr__(self, name):
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
if PY2:
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object()) # noqa
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
__enter__ = lambda x: x._get_current_object().__enter__()
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
__radd__ = lambda x, o: o + x._get_current_object()
__rsub__ = lambda x, o: o - x._get_current_object()
__rmul__ = lambda x, o: o * x._get_current_object()
__rdiv__ = lambda x, o: o / x._get_current_object()
if PY2:
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
else:
__rtruediv__ = __rdiv__
__rfloordiv__ = lambda x, o: o // x._get_current_object()
__rmod__ = lambda x, o: o % x._get_current_object()
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
__copy__ = lambda x: copy.copy(x._get_current_object())
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
|
|
# Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
import mock
from oslo_serialization import jsonutils
import six
from openstack_dashboard.api.rest import keystone
from openstack_dashboard.test import helpers as test
class KeystoneRestTestCase(test.TestCase):
#
# Version
#
@mock.patch.object(keystone.api, 'keystone')
def test_version_get(self, kc):
request = self.mock_rest_request()
kc.get_version.return_value = '2.0'
response = keystone.Version().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"version": "2.0"})
kc.get_version.assert_called_once_with()
#
# Users
#
@mock.patch.object(keystone.api, 'keystone')
def test_user_get(self, kc):
request = self.mock_rest_request()
kc.user_get.return_value.to_dict.return_value = {'name': 'Ni!'}
response = keystone.User().get(request, 'the_id')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"name": "Ni!"})
kc.user_get.assert_called_once_with(request, 'the_id')
@mock.patch.object(keystone.api, 'keystone')
def test_user_get_current(self, kc):
request = self.mock_rest_request(**{'user.id': 'current_id'})
kc.user_get.return_value.to_dict.return_value = {'name': 'Ni!'}
response = keystone.User().get(request, 'current')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"name": "Ni!"})
kc.user_get.assert_called_once_with(request, 'current_id')
@mock.patch.object(keystone.api, 'keystone')
def test_user_get_list(self, kc):
request = self.mock_rest_request(**{
'session.get': mock.Mock(return_value='the_domain'),
'GET': {},
})
kc.user_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'Ptang!'}})
]
response = keystone.Users().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"name": "Ni!"}, {"name": "Ptang!"}]})
kc.user_list.assert_called_once_with(request, project=None,
domain='the_domain', group=None,
filters=None)
@mock.patch.object(keystone.api, 'keystone')
def test_user_get_list_with_filters(self, kc):
filters = {'enabled': True}
request = self.mock_rest_request(**{
'session.get': mock.Mock(return_value='the_domain'),
'GET': dict(**filters),
})
kc.user_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'Ptang!'}})
]
response = keystone.Users().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"name": "Ni!"}, {"name": "Ptang!"}]})
kc.user_list.assert_called_once_with(request, project=None,
domain='the_domain', group=None,
filters=filters)
def test_user_create_full(self):
self._test_user_create(
'{"name": "bob", '
'"password": "sekrit", "project_id": "project123", '
'"email": "spam@company.example"}',
{
'name': 'bob',
'password': 'sekrit',
'email': 'spam@company.example',
'project': 'project123',
'domain': 'the_domain',
'enabled': True
}
)
def test_user_create_existing_role(self):
self._test_user_create(
'{"name": "bob", '
'"password": "sekrit", "project_id": "project123", '
'"email": "spam@company.example"}',
{
'name': 'bob',
'password': 'sekrit',
'email': 'spam@company.example',
'project': 'project123',
'domain': 'the_domain',
'enabled': True
}
)
def test_user_create_no_project(self):
self._test_user_create(
'{"name": "bob", '
'"password": "sekrit", "project_id": "", '
'"email": "spam@company.example"}',
{
'name': 'bob',
'password': 'sekrit',
'email': 'spam@company.example',
'project': None,
'domain': 'the_domain',
'enabled': True
}
)
def test_user_create_partial(self):
self._test_user_create(
'{"name": "bob"}',
{
'name': 'bob',
'password': None,
'email': None,
'project': None,
'domain': 'the_domain',
'enabled': True
}
)
@mock.patch.object(keystone.api, 'keystone')
def _test_user_create(self, supplied_body, add_user_call, kc):
request = self.mock_rest_request(body=supplied_body)
kc.get_default_domain.return_value = mock.Mock(**{'id': 'the_domain'})
kc.user_create.return_value.id = 'user123'
kc.user_create.return_value = mock.Mock(**{
'id': 'user123',
'to_dict.return_value': {'id': 'user123', 'name': 'bob'}
})
response = keystone.Users().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'],
'/api/keystone/users/user123')
self.assertEqual(response.json,
{"id": "user123", "name": "bob"})
kc.user_create.assert_called_once_with(request, **add_user_call)
@mock.patch.object(keystone.api, 'keystone')
def test_user_delete_many(self, kc):
request = self.mock_rest_request(body='''
["id1", "id2", "id3"]
''')
response = keystone.Users().delete(request)
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.user_delete.assert_has_calls([
mock.call(request, 'id1'),
mock.call(request, 'id2'),
mock.call(request, 'id3'),
])
@mock.patch.object(keystone.api, 'keystone')
def test_user_delete(self, kc):
request = self.mock_rest_request()
response = keystone.User().delete(request, 'the_id')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.user_delete.assert_called_once_with(request, 'the_id')
@mock.patch.object(keystone.api, 'keystone')
def test_user_patch_password(self, kc):
request = self.mock_rest_request(body='''
{"password": "sekrit"}
''')
user = keystone.User()
kc.user_get = mock.MagicMock(return_value=user)
response = user.patch(request, 'user123')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.user_update_password.assert_called_once_with(request,
user,
'sekrit')
@mock.patch.object(keystone.api, 'keystone')
def test_user_patch_enabled(self, kc):
request = self.mock_rest_request(body='''
{"enabled": false}
''')
user = keystone.User()
kc.user_get = mock.MagicMock(return_value=user)
response = user.patch(request, 'user123')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.user_get.assert_called_once_with(request, 'user123')
kc.user_update_enabled.assert_called_once_with(request,
user,
False)
@mock.patch.object(keystone.api, 'keystone')
def test_user_patch_project(self, kc):
request = self.mock_rest_request(body='''
{"project": "other123"}
''')
user = keystone.User()
kc.user_get = mock.MagicMock(return_value=user)
response = user.patch(request, 'user123')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.user_update.assert_called_once_with(request,
user,
project='other123')
@mock.patch.object(keystone.api, 'keystone')
def test_user_patch_multiple(self, kc):
request = self.mock_rest_request(body='''
{"project": "other123", "name": "something"}
''')
user = keystone.User()
kc.user_get = mock.MagicMock(return_value=user)
response = user.patch(request, 'user123')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.user_update.assert_called_once_with(request,
user,
project='other123',
name='something')
#
# Roles
#
@mock.patch.object(keystone.api, 'keystone')
def test_role_get(self, kc):
request = self.mock_rest_request()
kc.role_get.return_value.to_dict.return_value = {'name': 'Ni!'}
response = keystone.Role().get(request, 'the_id')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"name": "Ni!"})
kc.role_get.assert_called_once_with(request, 'the_id')
@mock.patch.object(keystone.api, 'keystone')
def test_role_get_default(self, kc):
request = self.mock_rest_request()
kc.get_default_role.return_value.to_dict.return_value = {'name': 'Ni!'}
response = keystone.Role().get(request, 'default')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"name": "Ni!"})
kc.get_default_role.assert_called_once_with(request)
kc.role_get.assert_not_called()
@mock.patch.object(keystone.api, 'keystone')
def test_role_get_list(self, kc):
request = self.mock_rest_request(**{'GET': {}})
kc.role_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'Ptang!'}})
]
response = keystone.Roles().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"name": "Ni!"}, {"name": "Ptang!"}]})
kc.role_list.assert_called_once_with(request)
@mock.patch.object(keystone.api, 'keystone')
def test_role_get_for_user(self, kc):
request = self.mock_rest_request(**{'GET': {'user_id': 'user123',
'project_id': 'project123'}})
kc.roles_for_user.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'Ptang!'}})
]
response = keystone.Roles().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"name": "Ni!"}, {"name": "Ptang!"}]})
kc.roles_for_user.assert_called_once_with(request, 'user123',
'project123')
@mock.patch.object(keystone.api, 'keystone')
def test_role_create(self, kc):
request = self.mock_rest_request(body='''
{"name": "bob"}
''')
kc.role_create.return_value.id = 'role123'
kc.role_create.return_value.to_dict.return_value = {
'id': 'role123', 'name': 'bob'
}
response = keystone.Roles().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'],
'/api/keystone/roles/role123')
self.assertEqual(response.json, {"id": "role123", "name": "bob"})
kc.role_create.assert_called_once_with(request, 'bob')
@mock.patch.object(keystone.api, 'keystone')
def test_role_grant(self, kc):
request = self.mock_rest_request(body='''
{"action": "grant", "data": {"user_id": "user123",
"role_id": "role123", "project_id": "project123"}}
''')
response = keystone.ProjectRole().put(request, "project1", "role2",
"user3")
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.add_tenant_user_role.assert_called_once_with(request, 'project1',
'user3', 'role2')
@mock.patch.object(keystone.api, 'keystone')
def test_role_delete_many(self, kc):
request = self.mock_rest_request(body='''
["id1", "id2", "id3"]
''')
response = keystone.Roles().delete(request)
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.role_delete.assert_has_calls([
mock.call(request, 'id1'),
mock.call(request, 'id2'),
mock.call(request, 'id3'),
])
@mock.patch.object(keystone.api, 'keystone')
def test_role_delete(self, kc):
request = self.mock_rest_request()
response = keystone.Role().delete(request, 'the_id')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.role_delete.assert_called_once_with(request, 'the_id')
@mock.patch.object(keystone.api, 'keystone')
def test_role_patch(self, kc):
request = self.mock_rest_request(body='{"name": "spam"}')
response = keystone.Role().patch(request, 'the_id')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.role_update.assert_called_once_with(request,
'the_id',
'spam')
#
# Domains
#
@mock.patch.object(keystone.api, 'keystone')
def test_domain_get(self, kc):
request = self.mock_rest_request()
kc.domain_get.return_value.to_dict.return_value = {'name': 'Ni!'}
response = keystone.Domain().get(request, 'the_id')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"name": "Ni!"})
kc.domain_get.assert_called_once_with(request, 'the_id')
@mock.patch.object(keystone.api, 'keystone')
def test_domain_get_default(self, kc):
request = self.mock_rest_request()
kc.get_default_domain.return_value.to_dict.return_value = {
'name': 'Ni!'
}
response = keystone.Domain().get(request, 'default')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"name": "Ni!"})
kc.get_default_domain.assert_called_once_with(request)
kc.domain_get.assert_not_called()
@mock.patch.object(keystone.api, 'keystone')
def test_domain_get_list(self, kc):
request = self.mock_rest_request()
kc.domain_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'Ptang!'}})
]
response = keystone.Domains().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"name": "Ni!"}, {"name": "Ptang!"}]})
kc.domain_list.assert_called_once_with(request)
def test_domain_create_full(self):
self._test_domain_create(
'{"name": "bob", '
'"description": "sekrit", "enabled": false}',
{
'description': 'sekrit',
'enabled': False
}
)
def test_domain_create_partial(self):
self._test_domain_create(
'{"name": "bob"}',
{
'description': None,
'enabled': True
}
)
@mock.patch.object(keystone.api, 'keystone')
def _test_domain_create(self, supplied_body, expected_call, kc):
request = self.mock_rest_request(body=supplied_body)
kc.domain_create.return_value.id = 'domain123'
kc.domain_create.return_value.to_dict.return_value = {
'id': 'domain123', 'name': 'bob'
}
response = keystone.Domains().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'],
'/api/keystone/domains/domain123')
self.assertEqual(response.json, {"id": "domain123", "name": "bob"})
kc.domain_create.assert_called_once_with(request, 'bob',
**expected_call)
@mock.patch.object(keystone.api, 'keystone')
def test_domain_delete_many(self, kc):
request = self.mock_rest_request(body='''
["id1", "id2", "id3"]
''')
response = keystone.Domains().delete(request)
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.domain_delete.assert_has_calls([
mock.call(request, 'id1'),
mock.call(request, 'id2'),
mock.call(request, 'id3'),
])
@mock.patch.object(keystone.api, 'keystone')
def test_domain_delete(self, kc):
request = self.mock_rest_request()
response = keystone.Domain().delete(request, 'the_id')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.domain_delete.assert_called_once_with(request, 'the_id')
@mock.patch.object(keystone.api, 'keystone')
def test_domain_patch(self, kc):
request = self.mock_rest_request(body='{"name": "spam"}')
response = keystone.Domain().patch(request, 'the_id')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.domain_update.assert_called_once_with(request,
'the_id',
name='spam',
description=None,
enabled=None)
#
# Projects
#
@mock.patch.object(keystone.api, 'keystone')
def test_project_get(self, kc):
request = self.mock_rest_request()
kc.tenant_get.return_value.to_dict.return_value = {'name': 'Ni!'}
response = keystone.Project().get(request, 'the_id')
self.assertStatusCode(response, 200)
self.assertEqual(response.json, {"name": "Ni!"})
kc.tenant_get.assert_called_once_with(request, 'the_id')
def test_project_get_list(self):
self._test_project_get_list(
{},
{
'paginate': False,
'marker': None,
'domain': None,
'user': None,
'admin': True,
'filters': None
}
)
def test_project_get_list_with_params_true(self):
self._test_project_get_list(
{
'paginate': 'true',
'admin': 'true'
},
{
'paginate': True,
'marker': None,
'domain': None,
'user': None,
'admin': True,
'filters': None
}
)
def test_project_get_list_with_params_false(self):
self._test_project_get_list(
{
'paginate': 'false',
'admin': 'false'
},
{
'paginate': False,
'marker': None,
'domain': None,
'user': None,
'admin': False,
'filters': None
}
)
@mock.patch.object(keystone.api, 'keystone')
def _test_project_get_list(self, params, expected_call, kc):
request = self.mock_rest_request(**{'GET': dict(**params)})
kc.tenant_list.return_value = ([
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'Ptang!'}})
], False)
with mock.patch.object(settings, 'DEBUG', True):
response = keystone.Projects().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"has_more": False,
"items": [{"name": "Ni!"}, {"name": "Ptang!"}]})
kc.tenant_list.assert_called_once_with(request, **expected_call)
@mock.patch.object(keystone.api, 'keystone')
def test_project_get_list_with_filters(self, kc):
filters = {'name': 'Ni!'}
request = self.mock_rest_request(**{'GET': dict(**filters)})
kc.tenant_list.return_value = ([
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'Ni!'}})
], False)
with mock.patch.object(settings, 'DEBUG', True):
response = keystone.Projects().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"has_more": False,
"items": [{"name": "Ni!"}, {"name": "Ni!"}]})
kc.tenant_list.assert_called_once_with(request, paginate=False,
marker=None, domain=None,
user=None, admin=True,
filters=filters)
def test_project_create_full(self):
self._test_project_create(
'{"name": "bob", '
'"domain_id": "domain123", "description": "sekrit", '
'"enabled": false}',
{
'description': 'sekrit',
'domain': 'domain123',
'enabled': False
}
)
def test_project_create_partial(self):
self._test_project_create(
'{"name": "bob"}',
{
'description': None,
'domain': None,
'enabled': True
}
)
@mock.patch.object(keystone.api, 'keystone')
def _test_project_create(self, supplied_body, expected_call, kc):
request = self.mock_rest_request(body=supplied_body)
kc.tenant_create.return_value.id = 'project123'
kc.tenant_create.return_value.to_dict.return_value = {
'id': 'project123', 'name': 'bob'
}
response = keystone.Projects().post(request)
self.assertStatusCode(response, 201)
self.assertEqual(response['location'],
'/api/keystone/projects/project123')
self.assertEqual(response.json,
{"id": "project123", "name": "bob"})
kc.tenant_create.assert_called_once_with(request, 'bob',
**expected_call)
@mock.patch.object(keystone.api, 'keystone')
def test_project_delete_many(self, kc):
request = self.mock_rest_request(body='''
["id1", "id2", "id3"]
''')
response = keystone.Projects().delete(request)
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.tenant_delete.assert_has_calls([
mock.call(request, 'id1'),
mock.call(request, 'id2'),
mock.call(request, 'id3'),
])
@mock.patch.object(keystone.api, 'keystone')
def test_project_delete(self, kc):
request = self.mock_rest_request()
response = keystone.Project().delete(request, 'the_id')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.tenant_delete.assert_called_once_with(request, 'the_id')
@mock.patch.object(keystone.api, 'keystone')
def test_project_patch(self, kc):
# nothing in the Horizon code documents what additional parameters are
# allowed, so we'll just assume GIGO
request = self.mock_rest_request(body='''
{"name": "spam", "domain_id": "domain123", "foo": "bar"}
''')
response = keystone.Project().patch(request, 'spam123')
self.assertStatusCode(response, 204)
self.assertEqual(response.content, b'')
kc.tenant_update.assert_called_once_with(request,
'spam123',
name='spam', foo='bar',
description=None,
domain='domain123',
enabled=None)
#
# Service Catalog
#
@mock.patch.object(keystone.api, 'keystone')
def test_service_catalog_get(self, kc):
request = self.mock_rest_request()
response = keystone.ServiceCatalog().get(request)
self.assertStatusCode(response, 200)
content = jsonutils.dumps(request.user.service_catalog,
sort_keys=settings.DEBUG)
if six.PY3:
content = content.encode('utf-8')
self.assertEqual(content, response.content)
#
# User Session
#
@mock.patch.object(keystone.api, 'keystone')
def test_user_session_get(self, kc):
request = self.mock_rest_request()
request.user = mock.Mock(
services_region='some region',
super_secret_thing='not here',
token=type('', (object,), {'id': 'token here'}),
is_authenticated=lambda: True,
spec=['services_region', 'super_secret_thing']
)
response = keystone.UserSession().get(request)
self.assertStatusCode(response, 200)
content = jsonutils.loads(response.content)
self.assertEqual(content['services_region'], 'some region')
self.assertEqual(content['token'], 'token here')
self.assertNotIn('super_secret_thing', content)
#
# Groups
#
@mock.patch.object(keystone.api, 'keystone')
def test_group_get_list(self, kc):
request = self.mock_rest_request(**{
'session.get': mock.Mock(return_value='the_domain'),
'GET': {},
})
kc.group_list.return_value = [
mock.Mock(**{'to_dict.return_value': {'name': 'uno!'}}),
mock.Mock(**{'to_dict.return_value': {'name': 'dos!'}})
]
response = keystone.Groups().get(request)
self.assertStatusCode(response, 200)
self.assertEqual(response.json,
{"items": [{"name": "uno!"}, {"name": "dos!"}]})
kc.group_list.assert_called_once_with(request, domain='the_domain')
#
# Services
#
@mock.patch.object(keystone.api, 'keystone')
def test_services_get(self, kc):
request = self.mock_rest_request()
mock_service = {
"name": "srv_name",
"type": "srv_type",
"host": "srv_host"
}
request.user = mock.Mock(
service_catalog=[mock_service],
services_region='some region'
)
response = keystone.Services().get(request)
self.assertStatusCode(response, 200)
kc.Service.assert_called_once_with(mock_service, "some region")
|
|
"""
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
# But first, remove any previously imported versions of
# astropy_helpers; this is necessary for nested installs where one
# package's installer is installing another package via
# setuptools.sandbox.run_set, as in the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return False
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBoostrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBoostrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
if sys.version_info[:2] < (2, 7):
# In Python 2.6 the distutils log does not log warnings, errors, etc. to
# stderr so we have to wrap it to ensure consistency at least in this
# module
import distutils
class log(object):
def __getattr__(self, attr):
return getattr(distutils.log, attr)
def warn(self, msg, *args):
self._log_to_stderr(distutils.log.WARN, msg, *args)
def error(self, msg):
self._log_to_stderr(distutils.log.ERROR, msg, *args)
def fatal(self, msg):
self._log_to_stderr(distutils.log.FATAL, msg, *args)
def log(self, level, msg, *args):
if level in (distutils.log.WARN, distutils.log.ERROR,
distutils.log.FATAL):
self._log_to_stderr(level, msg, *args)
else:
distutils.log.log(level, msg, *args)
def _log_to_stderr(self, level, msg, *args):
# This is the only truly 'public' way to get the current threshold
# of the log
current_threshold = distutils.log.set_threshold(distutils.log.WARN)
distutils.log.set_threshold(current_threshold)
if level >= current_threshold:
if args:
msg = msg % args
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
log = log()
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
|
from __future__ import division
import sys
from icarus.models.cache import insert_after_k_hits_cache
if sys.version_info[:2] >= (2, 7):
import unittest
else:
try:
import unittest2 as unittest
except ImportError:
raise ImportError("The unittest2 package is needed to run the tests.")
del sys
import collections
import numpy as np
import icarus.models as cache
class TestLinkedSet(unittest.TestCase):
def link_consistency(self, linked_set):
"""Checks that links of a linked set are consistent iterating from top
or from bottom.
This method depends on the internal implementation of the LinkedSet
class
"""
topdown = collections.deque()
bottomup = collections.deque()
cur = linked_set._top
while cur:
topdown.append(cur.val)
cur = cur.down
cur = linked_set._bottom
while cur:
bottomup.append(cur.val)
cur = cur.up
bottomup.reverse()
if topdown != bottomup:
return False
return list(reversed(list(linked_set))) == list(reversed(linked_set))
def test_append_top(self):
c = cache.LinkedSet()
c.append_top(1)
self.assertEqual(len(c), 1)
self.assertEqual(list(c), [1])
c.append_top(2)
self.assertEqual(len(c), 2)
self.assertEqual(list(c), [2, 1])
c.append_top(3)
self.assertEqual(len(c), 3)
self.assertEqual(list(c), [3, 2, 1])
self.assertTrue(self.link_consistency(c))
self.assertRaises(KeyError, c.append_top, 2)
def test_append_bottom(self):
c = cache.LinkedSet()
c.append_bottom(1)
self.assertEqual(len(c), 1)
self.assertEqual(list(c), [1])
c.append_bottom(2)
self.assertEqual(len(c), 2)
self.assertEqual(list(c), [1, 2])
c.append_bottom(3)
self.assertEqual(len(c), 3)
self.assertEqual(list(c), [1, 2, 3])
self.assertTrue(self.link_consistency(c))
self.assertRaises(KeyError, c.append_top, 2)
def test_move_to_top(self):
c = cache.LinkedSet()
c.append_top(1)
c.move_to_top(1)
self.assertEqual(list(c), [1])
c.append_bottom(2)
c.move_to_top(1)
self.assertEqual(list(c), [1, 2])
c.move_to_top(2)
self.assertEqual(list(c), [2, 1])
c.append_bottom(3)
c.move_to_top(1)
self.assertEqual(list(c), [1, 2, 3])
self.assertTrue(self.link_consistency(c))
def test_move_to_bottom(self):
c = cache.LinkedSet()
c.append_top(1)
c.move_to_bottom(1)
self.assertEqual(list(c), [1])
c.append_bottom(2)
c.move_to_bottom(2)
self.assertEqual(list(c), [1, 2])
c.move_to_bottom(1)
self.assertEqual(list(c), [2, 1])
c.append_top(3)
c.move_to_bottom(1)
self.assertEqual(list(c), [3, 2, 1])
self.assertTrue(self.link_consistency(c))
def test_move_up(self):
c = cache.LinkedSet()
c.append_bottom(1)
c.move_up(1)
self.assertEqual(list(c), [1])
c.append_bottom(2)
c.move_up(1)
self.assertEqual(list(c), [1, 2])
c.move_up(2)
self.assertEqual(list(c), [2, 1])
c.append_bottom(3)
c.move_up(3)
self.assertEqual(list(c), [2, 3, 1])
c.move_up(3)
self.assertEqual(list(c), [3, 2, 1])
self.assertTrue(self.link_consistency(c))
self.assertRaises(KeyError, c.move_up, 4)
def test_move_down(self):
c = cache.LinkedSet()
c.append_top(1)
c.move_down(1)
self.assertEqual(list(c), [1])
c.append_top(2)
c.move_down(1)
self.assertEqual(list(c), [2, 1])
c.move_down(2)
self.assertEqual(list(c), [1, 2])
c.move_down(2)
self.assertEqual(list(c), [1, 2])
c.append_top(3)
self.assertEqual(list(c), [3, 1, 2])
c.move_down(3)
self.assertEqual(list(c), [1, 3, 2])
c.move_down(3)
self.assertEqual(list(c), [1, 2, 3])
self.assertTrue(self.link_consistency(c))
self.assertRaises(KeyError, c.move_down, 4)
def test_pop_top(self):
c = cache.LinkedSet([1, 2, 3])
evicted = c.pop_top()
self.assertEqual(evicted, 1)
self.assertEqual(list(c), [2, 3])
self.assertTrue(self.link_consistency(c))
evicted = c.pop_top()
self.assertEqual(evicted, 2)
self.assertEqual(list(c), [3])
self.assertTrue(self.link_consistency(c))
evicted = c.pop_top()
self.assertEqual(evicted, 3)
self.assertEqual(list(c), [])
evicted = c.pop_top()
self.assertEqual(evicted, None)
self.assertEqual(list(c), [])
def test_pop_bottom(self):
c = cache.LinkedSet([1, 2, 3])
evicted = c.pop_bottom()
self.assertEqual(evicted, 3)
self.assertEqual(list(c), [1, 2])
self.assertTrue(self.link_consistency(c))
evicted = c.pop_bottom()
self.assertEqual(evicted, 2)
self.assertEqual(list(c), [1])
self.assertTrue(self.link_consistency(c))
evicted = c.pop_bottom()
self.assertEqual(evicted, 1)
self.assertEqual(list(c), [])
evicted = c.pop_bottom()
self.assertEqual(evicted, None)
self.assertEqual(list(c), [])
def test_insert_above(self):
c = cache.LinkedSet([3])
c.insert_above(3, 2)
self.assertEqual(list(c), [2, 3])
self.assertTrue(self.link_consistency(c))
c.insert_above(2, 1)
self.assertEqual(list(c), [1, 2, 3])
self.assertTrue(self.link_consistency(c))
c.insert_above(1, 'a')
self.assertEqual(list(c), ['a', 1, 2, 3])
self.assertTrue(self.link_consistency(c))
c.insert_above(2, 'b')
self.assertEqual(list(c), ['a', 1, 'b', 2, 3])
self.assertTrue(self.link_consistency(c))
c.insert_above(3, 'c')
self.assertEqual(list(c), ['a', 1, 'b', 2, 'c', 3])
self.assertTrue(self.link_consistency(c))
def test_insert_below(self):
c = cache.LinkedSet([1])
c.insert_below(1, 2)
self.assertEqual(list(c), [1, 2])
self.assertTrue(self.link_consistency(c))
c.insert_below(2, 3)
self.assertEqual(list(c), [1, 2, 3])
self.assertTrue(self.link_consistency(c))
c.insert_below(1, 'a')
self.assertEqual(list(c), [1, 'a', 2, 3])
self.assertTrue(self.link_consistency(c))
c.insert_below(2, 'b')
self.assertEqual(list(c), [1, 'a', 2, 'b', 3])
self.assertTrue(self.link_consistency(c))
c.insert_below(3, 'c')
self.assertEqual(list(c), [1, 'a', 2, 'b', 3, 'c'])
self.assertTrue(self.link_consistency(c))
def test_clear(self):
c = cache.LinkedSet()
c.append_top(1)
c.append_top(2)
self.assertEqual(len(c), 2)
c.clear()
self.assertEqual(len(c), 0)
self.assertEqual(list(c), [])
c.clear()
def test_duplicated_elements(self):
self.assertRaises(ValueError, cache.LinkedSet, iterable=[1, 1, 2])
self.assertRaises(ValueError, cache.LinkedSet, iterable=[1, None, None])
self.assertIsNotNone(cache.LinkedSet(iterable=[1, 0, None]))
class TestCache(unittest.TestCase):
def test_do(self):
c = cache.FifoCache(2)
self.assertEquals(len(c), 0)
c.do('PUT', 1)
self.assertEquals(len(c), 1)
c.do('UPDATE', 1)
self.assertEquals(len(c), 1)
self.assertTrue(c.do('GET', 1))
c.do('PUT', 2)
self.assertTrue(c.do('GET', 2))
self.assertEquals(len(c), 2)
self.assertEquals(c.dump(), [2, 1])
c.do('PUT', 3)
self.assertEquals(len(c), 2)
self.assertEquals(c.dump(), [3, 2])
self.assertTrue(c.do('GET', 2))
self.assertTrue(c.do('GET', 3))
self.assertFalse(c.do('GET', 1))
c.do('DELETE', 3)
self.assertFalse(c.do('GET', 3))
self.assertEquals(c.dump(), [2])
c.do('DELETE', 2)
self.assertFalse(c.do('GET', 2))
self.assertEquals(c.dump(), [])
class TestLruCache(unittest.TestCase):
def test_lru(self):
c = cache.LruCache(4)
c.put(0)
self.assertEquals(len(c), 1)
c.put(2)
self.assertEquals(len(c), 2)
c.put(3)
self.assertEquals(len(c), 3)
c.put(4)
self.assertEquals(len(c), 4)
self.assertEquals(c.dump(), [4, 3, 2, 0])
self.assertEquals(c.put(5), 0)
self.assertEquals(c.put(5), None)
self.assertEquals(len(c), 4)
self.assertEquals(c.dump(), [5, 4, 3, 2])
c.get(2)
self.assertEquals(c.dump(), [2, 5, 4, 3])
c.get(4)
self.assertEquals(c.dump(), [4, 2, 5, 3])
c.clear()
self.assertEquals(len(c), 0)
self.assertEquals(c.dump(), [])
def test_remove(self):
c = cache.LruCache(4)
c.put(1)
c.put(2)
c.put(3)
c.remove(2)
self.assertEqual(len(c), 2)
self.assertEqual(c.dump(), [3, 1])
c.put(4)
c.put(5)
self.assertEqual(c.dump(), [5, 4, 3, 1])
c.remove(5)
self.assertEqual(len(c), 3)
self.assertEqual(c.dump(), [4, 3, 1])
c.remove(1)
self.assertEqual(len(c), 2)
self.assertEqual(c.dump(), [4, 3])
def test_position(self):
c = cache.LruCache(4)
c.put(4)
c.put(3)
c.put(2)
c.put(1)
self.assertEqual(c.dump(), [1, 2, 3, 4])
self.assertEqual(c.position(1), 0)
self.assertEqual(c.position(2), 1)
self.assertEqual(c.position(3), 2)
self.assertEqual(c.position(4), 3)
class TestSlruCache(unittest.TestCase):
def test_put_get(self):
c = cache.SegmentedLruCache(9, 3)
self.assertEqual(c.maxlen, 9)
c.put(1)
self.assertEqual(c.dump(serialized=False), [[], [], [1]])
c.put(2)
self.assertEqual(c.dump(serialized=False), [[], [], [2, 1]])
c.put(3)
self.assertEqual(len(c), 3)
self.assertEqual(c.dump(serialized=False), [[], [], [3, 2, 1]])
c.get(2)
self.assertEqual(len(c), 3)
self.assertEqual(c.dump(serialized=False), [[], [2], [3, 1]])
c.get(2)
self.assertEqual(len(c), 3)
self.assertEqual(c.dump(serialized=False), [[2], [], [3, 1]])
c.put(4)
self.assertEqual(len(c), 4)
self.assertEqual(c.dump(serialized=False), [[2], [], [4, 3, 1]])
evicted = c.put(5)
self.assertEqual(evicted, 1)
self.assertEqual(len(c), 4)
self.assertEqual(c.dump(serialized=False), [[2], [], [5, 4, 3]])
c.get(5)
self.assertEqual(len(c), 4)
self.assertEqual(c.dump(serialized=False), [[2], [5], [4, 3]])
c.put(6)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[2], [5], [6, 4, 3]])
c.get(6)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[2], [6, 5], [4, 3]])
c.get(3)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[2], [3, 6, 5], [4]])
c.get(4)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[2], [4, 3, 6], [5]])
c.get(4)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[4, 2], [3, 6], [5]])
c.get(2)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[2, 4], [3, 6], [5]])
c.get(3)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[3, 2, 4], [6], [5]])
c.get(3)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[3, 2, 4], [6], [5]])
c.get(2)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[2, 3, 4], [6], [5]])
c.get(6)
self.assertEqual(len(c), 5)
self.assertEqual(c.dump(serialized=False), [[6, 2, 3], [4], [5]])
def test_remove(self):
c = cache.SegmentedLruCache(4, 2)
c.put(2)
c.put(2)
c.put(1)
c.put(1)
c.put(4)
c.put(3)
self.assertEqual(c.dump(serialized=False), [[1, 2], [3, 4]])
c.remove(2)
self.assertEqual(len(c), 3)
self.assertEqual(c.dump(serialized=False), [[1], [3, 4]])
c.remove(1)
self.assertEqual(len(c), 2)
self.assertEqual(c.dump(serialized=False), [[], [3, 4]])
c.remove(4)
self.assertEqual(len(c), 1)
self.assertEqual(c.dump(serialized=False), [[], [3]])
c.remove(3)
self.assertEqual(len(c), 0)
self.assertEqual(c.dump(serialized=False), [[], []])
def test_position(self):
c = cache.SegmentedLruCache(4, 2)
c.put(2)
c.put(2)
c.put(1)
c.put(1)
c.put(4)
c.put(3)
self.assertEqual(c.dump(serialized=False), [[1, 2], [3, 4]])
self.assertEqual(c.position(1), 0)
self.assertEqual(c.position(2), 1)
self.assertEqual(c.position(3), 2)
self.assertEqual(c.position(4), 3)
def test_has(self):
c = cache.SegmentedLruCache(4, 2)
c.put(2)
c.put(2)
c.put(1)
c.put(1)
c.put(4)
c.put(3)
self.assertEqual(c.dump(serialized=False), [[1, 2], [3, 4]])
self.assertTrue(c.has(1))
self.assertTrue(c.has(2))
self.assertTrue(c.has(3))
self.assertTrue(c.has(4))
self.assertFalse(c.has(5))
def test_dump(self):
c = cache.SegmentedLruCache(4, 2)
c.put(2)
c.put(2)
c.put(1)
c.put(1)
c.put(4)
c.put(3)
self.assertEqual(c.dump(serialized=False), [[1, 2], [3, 4]])
self.assertEqual(c.dump(serialized=True), [1, 2, 3, 4])
self.assertEqual(c.dump(), [1, 2, 3, 4])
class TestFifoCache(unittest.TestCase):
def test_fifo(self):
c = cache.FifoCache(4)
self.assertEquals(len(c), 0)
c.put(1)
self.assertEquals(len(c), 1)
c.put(2)
self.assertEquals(len(c), 2)
c.put(3)
self.assertEquals(len(c), 3)
c.put(4)
self.assertEquals(len(c), 4)
self.assertEquals(c.dump(), [4, 3, 2, 1])
c.put(5)
self.assertEquals(len(c), 4)
self.assertEquals(c.dump(), [5, 4, 3, 2])
c.get(2)
self.assertEquals(c.dump(), [5, 4, 3, 2])
c.get(4)
self.assertEquals(c.dump(), [5, 4, 3, 2])
c.put(6)
self.assertEquals(c.dump(), [6, 5, 4, 3])
c.clear()
self.assertEquals(len(c), 0)
self.assertEquals(c.dump(), [])
def test_remove(self):
c = cache.FifoCache(4)
c.put(1)
c.put(2)
c.put(3)
c.remove(2)
self.assertEqual(len(c), 2)
self.assertEqual(c.dump(), [3, 1])
c.put(4)
c.put(5)
self.assertEqual(c.dump(), [5, 4, 3, 1])
c.remove(5)
self.assertEqual(len(c), 3)
self.assertEqual(c.dump(), [4, 3, 1])
class TestClimbCache(unittest.TestCase):
def test_climb(self):
c = cache.ClimbCache(4)
c.put(1)
self.assertEquals(len(c), 1)
c.put(2)
self.assertEquals(len(c), 2)
c.put(3)
self.assertEquals(len(c), 3)
c.put(5)
self.assertEquals(len(c), 4)
self.assertEquals(c.dump(), [1, 2, 3, 5])
self.assertEquals(c.put(4), 5)
self.assertEquals(c.dump(), [1, 2, 3, 4])
self.assertEquals(c.put(4), None)
self.assertEquals(c.dump(), [1, 2, 4, 3])
self.assertEquals(c.put(4), None)
self.assertEquals(c.dump(), [1, 4, 2, 3])
self.assertEquals(c.put(4), None)
self.assertEquals(c.dump(), [4, 1, 2, 3])
self.assertEquals(c.put(4), None)
self.assertEquals(c.dump(), [4, 1, 2, 3])
self.assertEquals(c.put(5), 3)
self.assertEquals(c.dump(), [4, 1, 2, 5])
def test_remove(self):
c = cache.ClimbCache(4)
c.put(1)
c.put(2)
c.put(3)
c.remove(2)
self.assertEqual(len(c), 2)
self.assertEqual(c.dump(), [1, 3])
c.put(4)
c.put(5)
self.assertEqual(c.dump(), [1, 3, 4, 5])
c.remove(5)
self.assertEqual(len(c), 3)
self.assertEqual(c.dump(), [1, 3, 4])
c.remove(1)
self.assertEqual(len(c), 2)
self.assertEqual(c.dump(), [3, 4])
def test_position(self):
c = cache.ClimbCache(4)
c.put(1)
c.put(2)
c.put(3)
c.put(4)
self.assertEqual(c.dump(), [1, 2, 3, 4])
self.assertEqual(c.position(1), 0)
self.assertEqual(c.position(2), 1)
self.assertEqual(c.position(3), 2)
self.assertEqual(c.position(4), 3)
class TestRandCache(unittest.TestCase):
def test_rand(self):
c = cache.RandEvictionCache(4)
self.assertEquals(len(c), 0)
c.put(1)
self.assertEquals(len(c), 1)
c.put(2)
self.assertEquals(len(c), 2)
c.put(3)
self.assertEquals(len(c), 3)
c.put(4)
self.assertEquals(len(c), 4)
self.assertEquals(len(c.dump()), 4)
for v in (1, 2, 3, 4):
self.assertTrue(c.has(v))
c.get(3)
for v in (1, 2, 3, 4):
self.assertTrue(c.has(v))
c.put(5)
self.assertEquals(len(c), 4)
self.assertTrue(c.has(5))
c.clear()
self.assertEquals(len(c), 0)
self.assertEquals(c.dump(), [])
def test_remove(self):
c = cache.RandEvictionCache(4)
c.put(1)
c.put(2)
c.put(3)
c.remove(2)
self.assertEqual(len(c), 2)
for v in (3, 1):
self.assertTrue(c.has(v))
c.put(4)
c.put(5)
for v in (5, 4, 3, 1):
self.assertTrue(c.has(v))
c.remove(5)
self.assertEqual(len(c), 3)
for v in (4, 3, 1):
self.assertTrue(c.has(v))
class TestInCacheLfuCache(unittest.TestCase):
def test_lfu(self):
c = cache.InCacheLfuCache(4)
self.assertEquals(len(c), 0)
c.put(1)
self.assertEquals(len(c), 1)
c.put(2)
self.assertEquals(len(c), 2)
c.put(3)
self.assertEquals(len(c), 3)
c.put(4)
self.assertEquals(len(c), 4)
self.assertEquals(len(c.dump()), 4)
for v in (1, 2, 3, 4):
self.assertTrue(c.has(v))
c.get(1)
c.get(1)
c.get(1)
c.get(2)
c.get(2)
c.get(3)
c.put(5)
self.assertEquals(c.dump(), [1, 2, 3, 5])
self.assertEquals(len(c), 4)
self.assertTrue(c.has(5))
c.clear()
self.assertEquals(len(c), 0)
self.assertEquals(c.dump(), [])
class TestPerfectLfuCache(unittest.TestCase):
def test_lfu(self):
c = cache.PerfectLfuCache(3)
self.assertEquals(len(c), 0)
c.put(1)
self.assertEquals(len(c), 1)
c.put(2)
self.assertEquals(len(c), 2)
c.put(3)
self.assertEquals(len(c), 3)
self.assertEquals(len(c.dump()), 3)
for v in (1, 2, 3):
self.assertTrue(c.has(v))
c.get(1)
c.get(1)
c.get(1)
c.get(1)
c.get(1)
c.get(2)
c.get(2)
c.get(2)
c.get(2)
c.get(3)
c.get(3)
c.get(3)
c.get(5)
c.put(5) # This does not removes 3
self.assertEquals(c.dump(), [1, 2, 3])
c.get(5)
c.get(5)
c.get(5)
c.get(5)
c.get(5)
# Now 5 has been requested frequently enough to be included in cache
# and replace 3
c.put(5)
self.assertEquals(c.dump(), [5, 1, 2])
# Now 5 has been requested 2 times, but 3 was requested 3 times. If I
# reinsert 3, 3 is kept and 5 discarded
c.get(3)
c.get(3)
c.get(3)
c.get(3)
c.get(3)
# Now 3 has been requested enough times to be inserted and evict 2
c.put(3)
self.assertEquals(c.dump(), [3, 5, 1])
c.clear()
self.assertEquals(len(c), 0)
self.assertEquals(c.dump(), [])
class TestInsertAfterKHits(unittest.TestCase):
def test_put_get_no_memory(self):
c = cache.LruCache(2)
c = cache.insert_after_k_hits_cache(c, k=3, memory=None)
self.assertFalse(c.get(1))
c.put(1)
self.assertFalse(c.get(1))
c.put(1)
self.assertFalse(c.get(1))
c.put(1)
self.assertTrue(c.get(1))
def test_put_get_mixed_no_memory(self):
c = cache.LruCache(2)
c = cache.insert_after_k_hits_cache(c, k=3, memory=None)
self.assertFalse(c.get(1))
c.put(1)
self.assertFalse(c.get(2))
c.put(2)
self.assertFalse(c.get(1))
c.put(1)
self.assertFalse(c.get(2))
c.put(2)
self.assertFalse(c.get(2))
c.put(2)
self.assertTrue(c.get(2))
self.assertFalse(c.get(1))
c.put(1)
self.assertTrue(c.get(1))
self.assertTrue(c.get(2))
def test_put_get_mixed_memory(self):
c = cache.LruCache(2)
c = cache.insert_after_k_hits_cache(c, k=2, memory=2)
self.assertEqual(0, len(c._metacache_queue))
self.assertEqual(0, len(c._metacache_hits))
self.assertFalse(c.get(1))
c.put(1)
self.assertEqual(1, len(c._metacache_queue))
self.assertEqual(1, len(c._metacache_hits))
self.assertFalse(c.get(2))
c.put(2)
self.assertEqual(2, len(c._metacache_queue))
self.assertEqual(2, len(c._metacache_hits))
self.assertFalse(c.get(3))
c.put(3)
self.assertEqual(2, len(c._metacache_queue))
self.assertEqual(2, len(c._metacache_hits))
self.assertFalse(c.get(1))
c.put(1)
self.assertEqual(2, len(c._metacache_queue))
self.assertEqual(2, len(c._metacache_hits))
# This fails because memory wiped out record for 1
self.assertFalse(c.get(1))
self.assertFalse(c.get(3))
c.put(3)
self.assertEqual(1, len(c._metacache_queue))
self.assertEqual(1, len(c._metacache_hits))
self.assertTrue(c.get(3))
self.assertFalse(c.get(1))
c.put(1)
self.assertTrue(c.get(1))
self.assertEqual(0, len(c._metacache_queue))
self.assertEqual(0, len(c._metacache_hits))
def test_deepcopy(self):
c = cache.LruCache(10)
rc = cache.insert_after_k_hits_cache(c, k=3)
rc.put(1)
self.assertFalse(c.has(1))
c.put(3)
self.assertFalse(rc.has(3))
def test_naming(self):
c = cache.insert_after_k_hits_cache(cache.FifoCache(3), k=3)
self.assertEqual(c.get.__name__, 'get')
self.assertEqual(c.put.__name__, 'put')
self.assertEqual(c.dump.__name__, 'dump')
self.assertEqual(c.clear.__name__, 'clear')
self.assertGreater(len(c.get.__doc__), 0)
self.assertGreater(len(c.put.__doc__), 0)
self.assertGreater(len(c.dump.__doc__), 0)
self.assertGreater(len(c.clear.__doc__), 0)
class TestRandInsert(unittest.TestCase):
def test_rand_insert(self):
n = 10000
r = 10
p1 = 0.01
p2 = 0.1
rc1 = cache.rand_insert_cache(cache.LruCache(n), p1)
rc2 = cache.rand_insert_cache(cache.LruCache(n), p2)
len_rc1 = 0
len_rc2 = 0
for _ in range(r):
for i in range(n):
rc1.put(i)
rc2.put(i)
len_rc1 += len(rc1)
len_rc2 += len(rc2)
rc1.clear()
rc2.clear()
self.assertLess(abs(len_rc1/r - n*p1), 50)
self.assertLess(abs(len_rc2/r - n*p2), 50)
def test_constant_seed(self):
n = 10000
p = 0.1
rc1 = cache.rand_insert_cache(cache.LruCache(n), p, seed=0)
for i in range(n):
rc1.put(i)
rc2 = cache.rand_insert_cache(cache.LruCache(n), p, seed=0)
for i in range(n):
rc2.put(i)
self.assertEqual(rc1.dump(), rc2.dump())
def test_different_seed(self):
n = 10000
p = 0.1
rc1 = cache.rand_insert_cache(cache.LruCache(n), p, seed=1)
for i in range(n):
rc1.put(i)
rc2 = cache.rand_insert_cache(cache.LruCache(n), p, seed=2)
for i in range(n):
rc2.put(i)
self.assertNotEqual(rc1.dump(), rc2.dump())
def test_deepcopy(self):
c = cache.LruCache(10)
rc = cache.rand_insert_cache(c, p=1.0)
rc.put(1)
self.assertFalse(c.has(1))
c.put(3)
self.assertFalse(rc.has(3))
def test_naming(self):
c = cache.rand_insert_cache(cache.FifoCache(3), 0.2)
self.assertEqual(c.get.__name__, 'get')
self.assertEqual(c.put.__name__, 'put')
self.assertEqual(c.dump.__name__, 'dump')
self.assertEqual(c.clear.__name__, 'clear')
self.assertGreater(len(c.get.__doc__), 0)
self.assertGreater(len(c.put.__doc__), 0)
self.assertGreater(len(c.dump.__doc__), 0)
self.assertGreater(len(c.clear.__doc__), 0)
class TestKeyValCache(unittest.TestCase):
def test_key_val_cache(self):
c = cache.keyval_cache(cache.FifoCache(3))
c.put(1, 11)
self.assertEqual(c.get(1), 11)
c.put(1, 12)
self.assertEqual(c.get(1), 12)
self.assertEqual(c.dump(), [(1, 12)])
c.put(2, 21)
self.assertTrue(c.has(1))
self.assertTrue(c.has(2))
c.put(3, 31)
k, v = c.put(4, 41)
self.assertEqual(c.remove(2), 21)
self.assertEqual(len(c), 2)
self.assertEqual((k, v), (1, 12))
c.clear()
self.assertEqual(len(c), 0)
def test_naming(self):
c = cache.keyval_cache(cache.FifoCache(3))
self.assertEqual(c.get.__name__, 'get')
self.assertEqual(c.put.__name__, 'put')
self.assertEqual(c.dump.__name__, 'dump')
self.assertEqual(c.clear.__name__, 'clear')
self.assertGreater(len(c.get.__doc__), 0)
self.assertGreater(len(c.put.__doc__), 0)
self.assertGreater(len(c.dump.__doc__), 0)
self.assertGreater(len(c.clear.__doc__), 0)
def test_deepcopy(self):
kc = cache.LruCache(10)
kvc = cache.keyval_cache(kc)
kvc.put(1, 2)
self.assertFalse(kc.has(1))
kc.put(3)
self.assertFalse(kvc.has(3))
def test_zero_val_lru(self):
c = cache.keyval_cache(cache.LruCache(10))
reqs = [(10,0), (10, 1)]
for k, v in reqs:
c.put(k, v)
class TestTtlCache(unittest.TestCase):
def test_put_dump(self):
curr_time = 1
f_time = lambda: curr_time
c = cache.ttl_cache(cache.FifoCache(4), f_time)
c.put(1, ttl=2)
c.put(2, ttl=5)
c.put(3, ttl=3)
self.assertEqual(c.dump(), [(3, 4), (2, 6), (1, 3)])
self.assertTrue(c.has(1))
self.assertTrue(c.has(2))
self.assertTrue(c.has(3))
curr_time = 4
self.assertFalse(c.has(1))
self.assertTrue(c.has(2))
self.assertTrue(c.has(3))
self.assertEqual(c.dump(), [(3, 4), (2, 6)])
c.put(3, ttl=6)
self.assertEqual(c.dump(), [(3, 10), (2, 6)])
curr_time = 11
self.assertEqual(c.dump(), [])
def test_get(self):
curr_time = 1
f_time = lambda: curr_time
c = cache.ttl_cache(cache.FifoCache(3), f_time)
c.put(1, ttl=2)
self.assertTrue(c.get(1))
self.assertFalse(c.get(2))
self.assertTrue(c.get(1))
c.put(2, ttl=7)
self.assertTrue(c.get(1))
self.assertTrue(c.get(2))
curr_time = 4
self.assertFalse(c.get(1))
self.assertTrue(c.get(2))
curr_time = 15
self.assertFalse(c.get(1))
self.assertFalse(c.get(2))
def test_eviction(self):
curr_time = 0
f_time = lambda: curr_time
c = cache.ttl_cache(cache.FifoCache(3), f_time)
self.assertIsNone(c.put(1, ttl=4))
self.assertIsNone(c.put(2, ttl=6))
self.assertIsNone(c.put(3, ttl=8))
self.assertEqual(c.put(4, ttl=10), 1)
curr_time = 7
self.assertIsNone(c.put(5, ttl=12))
def test_incorrect_params(self):
self.assertRaises(TypeError, cache.ttl_cache, 'cache', lambda: 1)
self.assertRaises(TypeError, cache.ttl_cache, cache.FifoCache(4), 'function')
c = cache.ttl_cache(cache.FifoCache(10), lambda: 5)
self.assertRaises(ValueError, c.put, 1, ttl=2, expires=8)
def test_put_stale_content(self):
c = cache.ttl_cache(cache.FifoCache(2), lambda: 5)
c.put(1, ttl= -2)
self.assertFalse(c.has(1))
c.put(2, expires=3)
self.assertFalse(c.has(2))
def test_inf_ttl(self):
curr_time = 1
f_time = lambda: curr_time
c = cache.ttl_cache(cache.FifoCache(5), f_time)
c.put(1)
c.put(2)
c.put(3)
curr_time = 1000
dump = c.dump()
self.assertIn((1, np.infty), dump)
self.assertIn((2, np.infty), dump)
self.assertIn((3, np.infty), dump)
c.put(1, ttl=100)
curr_time = 2000
dump = c.dump()
self.assertEqual(len(dump), 3)
self.assertIn((1, np.infty), dump)
self.assertIn((2, np.infty), dump)
self.assertIn((3, np.infty), dump)
c.put(4, ttl=200)
dump = c.dump()
self.assertEqual(len(dump), 4)
self.assertEqual(dump[0], (4, 2200))
self.assertIn((1, np.infty), dump)
self.assertIn((2, np.infty), dump)
self.assertIn((3, np.infty), dump)
curr_time = 3000
dump = c.dump()
self.assertEqual(len(dump), 3)
self.assertIn((1, np.infty), dump)
self.assertIn((2, np.infty), dump)
self.assertIn((3, np.infty), dump)
def test_clear(self):
curr_time = 1
f_time = lambda: curr_time
c = cache.ttl_cache(cache.FifoCache(3), f_time)
c.put(1, ttl=4)
c.put(2, ttl=5)
c.put(1, ttl=8)
c.put(3, ttl=3)
c.put(4, ttl=1)
c.clear()
self.assertEqual(len(c), 0)
self.assertEqual(c.dump(), [])
def test_naming(self):
c = cache.ttl_cache(cache.FifoCache(4), lambda: 0)
self.assertEqual(c.get.__name__, 'get')
self.assertEqual(c.put.__name__, 'put')
self.assertEqual(c.dump.__name__, 'dump')
self.assertEqual(c.clear.__name__, 'clear')
self.assertEqual(c.has.__name__, 'has')
def test_deepcopy(self):
c = cache.LruCache(10)
ttl_c = cache.ttl_cache(c, lambda: 0)
ttl_c.put(1, 2)
self.assertFalse(c.has(1))
c.put(3)
self.assertFalse(ttl_c.has(3))
|
|
# Python test set -- part 6, built-in types
from test.test_support import *
print '6. Built-in types'
print '6.1 Truth value testing'
if None: raise TestFailed, 'None is true instead of false'
if 0: raise TestFailed, '0 is true instead of false'
if 0L: raise TestFailed, '0L is true instead of false'
if 0.0: raise TestFailed, '0.0 is true instead of false'
if '': raise TestFailed, '\'\' is true instead of false'
if not 1: raise TestFailed, '1 is false instead of true'
if not 1L: raise TestFailed, '1L is false instead of true'
if not 1.0: raise TestFailed, '1.0 is false instead of true'
if not 'x': raise TestFailed, '\'x\' is false instead of true'
if not {'x': 1}: raise TestFailed, '{\'x\': 1} is false instead of true'
def f(): pass
class C: pass
import sys
x = C()
if not f: raise TestFailed, 'f is false instead of true'
if not C: raise TestFailed, 'C is false instead of true'
if not sys: raise TestFailed, 'sys is false instead of true'
if not x: raise TestFailed, 'x is false instead of true'
print '6.2 Boolean operations'
if 0 or 0: raise TestFailed, '0 or 0 is true instead of false'
if 1 and 1: pass
else: raise TestFailed, '1 and 1 is false instead of true'
if not 1: raise TestFailed, 'not 1 is true instead of false'
print '6.3 Comparisons'
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: raise TestFailed, 'int comparisons failed'
if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
else: raise TestFailed, 'long int comparisons failed'
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: raise TestFailed, 'float comparisons failed'
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: raise TestFailed, 'string comparisons failed'
if None is None: pass
else: raise TestFailed, 'identity test failed'
try: float('')
except ValueError: pass
else: raise TestFailed, "float('') didn't raise ValueError"
try: float('5\0')
except ValueError: pass
else: raise TestFailed, "float('5\0') didn't raise ValueError"
try: 5.0 / 0.0
except ZeroDivisionError: pass
else: raise TestFailed, "5.0 / 0.0 didn't raise ZeroDivisionError"
try: 5.0 // 0.0
except ZeroDivisionError: pass
else: raise TestFailed, "5.0 // 0.0 didn't raise ZeroDivisionError"
try: 5.0 % 0.0
except ZeroDivisionError: pass
else: raise TestFailed, "5.0 % 0.0 didn't raise ZeroDivisionError"
try: 5 / 0L
except ZeroDivisionError: pass
else: raise TestFailed, "5 / 0L didn't raise ZeroDivisionError"
try: 5 // 0L
except ZeroDivisionError: pass
else: raise TestFailed, "5 // 0L didn't raise ZeroDivisionError"
try: 5 % 0L
except ZeroDivisionError: pass
else: raise TestFailed, "5 % 0L didn't raise ZeroDivisionError"
print '6.4 Numeric types (mostly conversions)'
if 0 != 0L or 0 != 0.0 or 0L != 0.0: raise TestFailed, 'mixed comparisons'
if 1 != 1L or 1 != 1.0 or 1L != 1.0: raise TestFailed, 'mixed comparisons'
if -1 != -1L or -1 != -1.0 or -1L != -1.0:
raise TestFailed, 'int/long/float value not equal'
# calling built-in types without argument must return 0
if int() != 0: raise TestFailed, 'int() does not return 0'
if long() != 0L: raise TestFailed, 'long() does not return 0L'
if float() != 0.0: raise TestFailed, 'float() does not return 0.0'
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: raise TestFailed, 'int() does not round properly'
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: raise TestFailed, 'long() does not round properly'
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: raise TestFailed, 'float() does not work properly'
print '6.4.1 32-bit integers'
if 12 + 24 != 36: raise TestFailed, 'int op'
if 12 + (-24) != -12: raise TestFailed, 'int op'
if (-12) + 24 != 12: raise TestFailed, 'int op'
if (-12) + (-24) != -36: raise TestFailed, 'int op'
if not 12 < 24: raise TestFailed, 'int op'
if not -24 < -12: raise TestFailed, 'int op'
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
raise TestFailed, 'int mul commutativity'
# And another.
m = -sys.maxint - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
raise TestFailed, "%r * %r == %r != %r" % (divisor, j, prod, m)
if type(prod) is not int:
raise TestFailed, ("expected type(prod) to be int, not %r" %
type(prod))
# Check for expected * overflow to long.
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
# Check for expected * overflow to long.
m = sys.maxint
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
print '6.4.2 Long integers'
if 12L + 24L != 36L: raise TestFailed, 'long op'
if 12L + (-24L) != -12L: raise TestFailed, 'long op'
if (-12L) + 24L != 12L: raise TestFailed, 'long op'
if (-12L) + (-24L) != -36L: raise TestFailed, 'long op'
if not 12L < 24L: raise TestFailed, 'long op'
if not -24L < -12L: raise TestFailed, 'long op'
x = sys.maxint
if int(long(x)) != x: raise TestFailed, 'long op'
try: y = int(long(x)+1L)
except OverflowError: raise TestFailed, 'long op'
if not isinstance(y, long): raise TestFailed, 'long op'
x = -x
if int(long(x)) != x: raise TestFailed, 'long op'
x = x-1
if int(long(x)) != x: raise TestFailed, 'long op'
try: y = int(long(x)-1L)
except OverflowError: raise TestFailed, 'long op'
if not isinstance(y, long): raise TestFailed, 'long op'
try: 5 << -5
except ValueError: pass
else: raise TestFailed, 'int negative shift <<'
try: 5L << -5L
except ValueError: pass
else: raise TestFailed, 'long negative shift <<'
try: 5 >> -5
except ValueError: pass
else: raise TestFailed, 'int negative shift >>'
try: 5L >> -5L
except ValueError: pass
else: raise TestFailed, 'long negative shift >>'
print '6.4.3 Floating point numbers'
if 12.0 + 24.0 != 36.0: raise TestFailed, 'float op'
if 12.0 + (-24.0) != -12.0: raise TestFailed, 'float op'
if (-12.0) + 24.0 != 12.0: raise TestFailed, 'float op'
if (-12.0) + (-24.0) != -36.0: raise TestFailed, 'float op'
if not 12.0 < 24.0: raise TestFailed, 'float op'
if not -24.0 < -12.0: raise TestFailed, 'float op'
print '6.5 Sequence types'
print '6.5.1 Strings'
if len('') != 0: raise TestFailed, 'len(\'\')'
if len('a') != 1: raise TestFailed, 'len(\'a\')'
if len('abcdef') != 6: raise TestFailed, 'len(\'abcdef\')'
if 'xyz' + 'abcde' != 'xyzabcde': raise TestFailed, 'string concatenation'
if 'xyz'*3 != 'xyzxyzxyz': raise TestFailed, 'string repetition *3'
if 0*'abcde' != '': raise TestFailed, 'string repetition 0*'
if min('abc') != 'a' or max('abc') != 'c': raise TestFailed, 'min/max string'
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: raise TestFailed, 'in/not in string'
x = 'x'*103
if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug'
#extended slices for strings
a = '0123456789'
vereq(a[::], a)
vereq(a[::2], '02468')
vereq(a[1::2], '13579')
vereq(a[::-1],'9876543210')
vereq(a[::-2], '97531')
vereq(a[3::-2], '31')
vereq(a[-100:100:], a)
vereq(a[100:-100:-1], a[::-1])
vereq(a[-100L:100L:2L], '02468')
if have_unicode:
a = unicode('0123456789', 'ascii')
vereq(a[::], a)
vereq(a[::2], unicode('02468', 'ascii'))
vereq(a[1::2], unicode('13579', 'ascii'))
vereq(a[::-1], unicode('9876543210', 'ascii'))
vereq(a[::-2], unicode('97531', 'ascii'))
vereq(a[3::-2], unicode('31', 'ascii'))
vereq(a[-100:100:], a)
vereq(a[100:-100:-1], a[::-1])
vereq(a[-100L:100L:2L], unicode('02468', 'ascii'))
print '6.5.2 Tuples [see test_tuple.py]'
print '6.5.3 Lists [see test_list.py]'
print '6.6 Mappings == Dictionaries [see test_dict.py]'
try: type(1, 2)
except TypeError: pass
else: raise TestFailed, 'type(), w/2 args expected TypeError'
try: type(1, 2, 3, 4)
except TypeError: pass
else: raise TestFailed, 'type(), w/4 args expected TypeError'
print 'Buffers'
try: buffer('asdf', -1)
except ValueError: pass
else: raise TestFailed, "buffer('asdf', -1) should raise ValueError"
try: buffer(None)
except TypeError: pass
else: raise TestFailed, "buffer(None) should raise TypeError"
a = buffer('asdf')
hash(a)
b = a * 5
if a == b:
raise TestFailed, 'buffers should not be equal'
if str(b) != ('asdf' * 5):
raise TestFailed, 'repeated buffer has wrong content'
if str(a * 0) != '':
raise TestFailed, 'repeated buffer zero times has wrong content'
if str(a + buffer('def')) != 'asdfdef':
raise TestFailed, 'concatenation of buffers yields wrong content'
if str(buffer(a)) != 'asdf':
raise TestFailed, 'composing buffers failed'
if str(buffer(a, 2)) != 'df':
raise TestFailed, 'specifying buffer offset failed'
if str(buffer(a, 0, 2)) != 'as':
raise TestFailed, 'specifying buffer size failed'
if str(buffer(a, 1, 2)) != 'sd':
raise TestFailed, 'specifying buffer offset and size failed'
try: buffer(buffer('asdf', 1), -1)
except ValueError: pass
else: raise TestFailed, "buffer(buffer('asdf', 1), -1) should raise ValueError"
if str(buffer(buffer('asdf', 0, 2), 0)) != 'as':
raise TestFailed, 'composing length-specified buffer failed'
if str(buffer(buffer('asdf', 0, 2), 0, 5000)) != 'as':
raise TestFailed, 'composing length-specified buffer failed'
if str(buffer(buffer('asdf', 0, 2), 0, -1)) != 'as':
raise TestFailed, 'composing length-specified buffer failed'
if str(buffer(buffer('asdf', 0, 2), 1, 2)) != 's':
raise TestFailed, 'composing length-specified buffer failed'
try: a[1] = 'g'
except TypeError: pass
else: raise TestFailed, "buffer assignment should raise TypeError"
try: a[0:1] = 'g'
except TypeError: pass
else: raise TestFailed, "buffer slice assignment should raise TypeError"
|
|
import hashlib
import re
import os
from six import binary_type
from six.moves.urllib.parse import urljoin
from fnmatch import fnmatch
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree
import html5lib
from . import XMLParser
from .item import Stub, ManualTest, WebdriverSpecTest, RefTestNode, RefTest, TestharnessTest, SupportFile, ConformanceCheckerTest, VisualTest
from .utils import rel_path_to_url, ContextManagerBytesIO, cached_property
wd_pattern = "*.py"
meta_re = re.compile(b"//\s*META:\s*(\w*)=(.*)$")
reference_file_re = re.compile(r'(^|[\-_])(not)?ref[0-9]*([\-_]|$)')
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
def read_script_metadata(f):
"""
Yields any metadata (pairs of bytestrings) from the file-like object `f`,
as specified according to the `meta_re` regex.
"""
for line in f:
assert isinstance(line, binary_type), line
m = meta_re.match(line)
if not m:
break
yield (m.groups()[0], m.groups()[1])
class SourceFile(object):
parsers = {"html":lambda x:html5lib.parse(x, treebuilder="etree"),
"xhtml":lambda x:ElementTree.parse(x, XMLParser.XMLParser()),
"svg":lambda x:ElementTree.parse(x, XMLParser.XMLParser())}
root_dir_non_test = set(["common",
"work-in-progress"])
dir_non_test = set(["resources",
"support",
"tools"])
dir_path_non_test = {("css21", "archive")}
def __init__(self, tests_root, rel_path, url_base, contents=None):
"""Object representing a file in a source tree.
:param tests_root: Path to the root of the source tree
:param rel_path: File path relative to tests_root
:param url_base: Base URL used when converting file paths to urls
:param contents: Byte array of the contents of the file or ``None``.
"""
self.tests_root = tests_root
if os.name == "nt":
# do slash normalization on Windows
if isinstance(rel_path, binary_type):
self.rel_path = rel_path.replace(b"/", b"\\")
else:
self.rel_path = rel_path.replace(u"/", u"\\")
else:
self.rel_path = rel_path
self.url_base = url_base
self.contents = contents
self.dir_path, self.filename = os.path.split(self.rel_path)
self.name, self.ext = os.path.splitext(self.filename)
self.type_flag = None
if "-" in self.name:
self.type_flag = self.name.rsplit("-", 1)[1].split(".")[0]
self.meta_flags = self.name.split(".")[1:]
self.items_cache = None
def __getstate__(self):
# Remove computed properties if we pickle this class
rv = self.__dict__.copy()
if "__cached_properties__" in rv:
cached_properties = rv["__cached_properties__"]
for key in rv.keys():
if key in cached_properties:
del rv[key]
del rv["__cached_properties__"]
return rv
def name_prefix(self, prefix):
"""Check if the filename starts with a given prefix
:param prefix: The prefix to check"""
return self.name.startswith(prefix)
def is_dir(self):
"""Return whether this file represents a directory."""
if self.contents is not None:
return False
return os.path.isdir(self.rel_path)
def open(self):
"""
Return either
* the contents specified in the constructor, if any;
* a File object opened for reading the file contents.
"""
if self.contents is not None:
file_obj = ContextManagerBytesIO(self.contents)
else:
file_obj = open(self.path, 'rb')
return file_obj
@cached_property
def path(self):
return os.path.join(self.tests_root, self.rel_path)
@cached_property
def url(self):
return rel_path_to_url(self.rel_path, self.url_base)
@cached_property
def hash(self):
with self.open() as f:
return hashlib.sha1(f.read()).hexdigest()
def in_non_test_dir(self):
if self.dir_path == "":
return True
parts = self.dir_path.split(os.path.sep)
if parts[0] in self.root_dir_non_test:
return True
elif any(item in self.dir_non_test for item in parts):
return True
else:
for path in self.dir_path_non_test:
if parts[:len(path)] == list(path):
return True
return False
def in_conformance_checker_dir(self):
return (self.dir_path == "conformance-checkers" or
self.dir_path.startswith("conformance-checkers" + os.path.sep))
@property
def name_is_non_test(self):
"""Check if the file name matches the conditions for the file to
be a non-test file"""
return (self.is_dir() or
self.name_prefix("MANIFEST") or
self.filename.startswith(".") or
self.in_non_test_dir())
@property
def name_is_conformance(self):
return (self.in_conformance_checker_dir() and
self.type_flag in ("is-valid", "no-valid"))
@property
def name_is_conformance_support(self):
return self.in_conformance_checker_dir()
@property
def name_is_stub(self):
"""Check if the file name matches the conditions for the file to
be a stub file"""
return self.name_prefix("stub-")
@property
def name_is_manual(self):
"""Check if the file name matches the conditions for the file to
be a manual test file"""
return self.type_flag == "manual"
@property
def name_is_visual(self):
"""Check if the file name matches the conditions for the file to
be a visual test file"""
return self.type_flag == "visual"
@property
def name_is_multi_global(self):
"""Check if the file name matches the conditions for the file to
be a multi-global js test file"""
return "any" in self.meta_flags and self.ext == ".js"
@property
def name_is_worker(self):
"""Check if the file name matches the conditions for the file to
be a worker js test file"""
return "worker" in self.meta_flags and self.ext == ".js"
@property
def name_is_webdriver(self):
"""Check if the file name matches the conditions for the file to
be a webdriver spec test file"""
# wdspec tests are in subdirectories of /webdriver excluding __init__.py
# files.
rel_dir_tree = self.rel_path.split(os.path.sep)
return (rel_dir_tree[0] == "webdriver" and
len(rel_dir_tree) > 1 and
self.filename != "__init__.py" and
fnmatch(self.filename, wd_pattern))
@property
def name_is_reference(self):
"""Check if the file name matches the conditions for the file to
be a reference file (not a reftest)"""
return "/reference/" in self.url or "/reftest/" in self.url or bool(reference_file_re.search(self.name))
@property
def markup_type(self):
"""Return the type of markup contained in a file, based on its extension,
or None if it doesn't contain markup"""
ext = self.ext
if not ext:
return None
if ext[0] == ".":
ext = ext[1:]
if ext in ["html", "htm"]:
return "html"
if ext in ["xhtml", "xht", "xml"]:
return "xhtml"
if ext == "svg":
return "svg"
return None
@cached_property
def root(self):
"""Return an ElementTree Element for the root node of the file if it contains
markup, or None if it does not"""
if not self.markup_type:
return None
parser = self.parsers[self.markup_type]
with self.open() as f:
try:
tree = parser(f)
except Exception:
return None
if hasattr(tree, "getroot"):
root = tree.getroot()
else:
root = tree
return root
@cached_property
def timeout_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify timeouts"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='timeout']")
@cached_property
def script_metadata(self):
if not self.name_is_worker and not self.name_is_multi_global:
return None
with self.open() as f:
return list(read_script_metadata(f))
@cached_property
def timeout(self):
"""The timeout of a test or reference file. "long" if the file has an extended timeout
or None otherwise"""
if self.script_metadata:
if any(m == (b"timeout", b"long") for m in self.script_metadata):
return "long"
if self.root is None:
return None
if self.timeout_nodes:
timeout_str = self.timeout_nodes[0].attrib.get("content", None)
if timeout_str and timeout_str.lower() == "long":
return "long"
return None
@cached_property
def viewport_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify viewport sizes"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='viewport-size']")
@cached_property
def viewport_size(self):
"""The viewport size of a test or reference file"""
if self.root is None:
return None
if not self.viewport_nodes:
return None
return self.viewport_nodes[0].attrib.get("content", None)
@cached_property
def dpi_nodes(self):
"""List of ElementTree Elements corresponding to nodes in a test that
specify device pixel ratios"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='device-pixel-ratio']")
@cached_property
def dpi(self):
"""The device pixel ratio of a test or reference file"""
if self.root is None:
return None
if not self.dpi_nodes:
return None
return self.dpi_nodes[0].attrib.get("content", None)
@cached_property
def testharness_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
testharness.js script"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharness.js']")
@cached_property
def content_is_testharness(self):
"""Boolean indicating whether the file content represents a
testharness.js test"""
if self.root is None:
return None
return bool(self.testharness_nodes)
@cached_property
def variant_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
test variant"""
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='variant']")
@cached_property
def test_variants(self):
rv = []
for element in self.variant_nodes:
if "content" in element.attrib:
variant = element.attrib["content"]
assert variant == "" or variant[0] in ["#", "?"]
rv.append(variant)
if not rv:
rv = [""]
return rv
@cached_property
def reftest_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
to a reftest <link>"""
if self.root is None:
return []
match_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='match']")
mismatch_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='mismatch']")
return match_links + mismatch_links
@cached_property
def references(self):
"""List of (ref_url, relation) tuples for any reftest references specified in
the file"""
rv = []
rel_map = {"match": "==", "mismatch": "!="}
for item in self.reftest_nodes:
if "href" in item.attrib:
ref_url = urljoin(self.url, item.attrib["href"])
ref_type = rel_map[item.attrib["rel"]]
rv.append((ref_url, ref_type))
return rv
@cached_property
def content_is_ref_node(self):
"""Boolean indicating whether the file is a non-leaf node in a reftest
graph (i.e. if it contains any <link rel=[mis]match>"""
return bool(self.references)
@cached_property
def css_flag_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
flag <meta>"""
if self.root is None:
return []
return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='flags']")
@cached_property
def css_flags(self):
"""Set of flags specified in the file"""
rv = set()
for item in self.css_flag_nodes:
if "content" in item.attrib:
for flag in item.attrib["content"].split():
rv.add(flag)
return rv
@cached_property
def content_is_css_manual(self):
"""Boolean indicating whether the file content represents a
CSS WG-style manual test"""
if self.root is None:
return None
# return True if the intersection between the two sets is non-empty
return bool(self.css_flags & {"animated", "font", "history", "interact", "paged", "speech", "userstyle"})
@cached_property
def spec_link_nodes(self):
"""List of ElementTree Elements corresponding to nodes representing a
<link rel=help>, used to point to specs"""
if self.root is None:
return []
return self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='help']")
@cached_property
def spec_links(self):
"""Set of spec links specified in the file"""
rv = set()
for item in self.spec_link_nodes:
if "href" in item.attrib:
rv.add(item.attrib["href"])
return rv
@cached_property
def content_is_css_visual(self):
"""Boolean indicating whether the file content represents a
CSS WG-style manual test"""
if self.root is None:
return None
return bool(self.ext in {'.xht', '.html', '.xhtml', '.htm', '.xml', '.svg'} and
self.spec_links)
@property
def type(self):
rv, _ = self.manifest_items()
return rv
def manifest_items(self):
"""List of manifest items corresponding to the file. There is typically one
per test, but in the case of reftests a node may have corresponding manifest
items without being a test itself."""
if self.items_cache:
return self.items_cache
if self.name_is_non_test:
rv = "support", [SupportFile(self)]
elif self.name_is_stub:
rv = Stub.item_type, [Stub(self, self.url)]
elif self.name_is_manual:
rv = ManualTest.item_type, [ManualTest(self, self.url)]
elif self.name_is_conformance:
rv = ConformanceCheckerTest.item_type, [ConformanceCheckerTest(self, self.url)]
elif self.name_is_conformance_support:
rv = "support", [SupportFile(self)]
elif self.name_is_visual:
rv = VisualTest.item_type, [VisualTest(self, self.url)]
elif self.name_is_multi_global:
rv = TestharnessTest.item_type, [
TestharnessTest(self, replace_end(self.url, ".any.js", ".any.html"), timeout=self.timeout),
TestharnessTest(self, replace_end(self.url, ".any.js", ".any.worker.html"), timeout=self.timeout),
]
elif self.name_is_worker:
rv = (TestharnessTest.item_type,
[TestharnessTest(self, replace_end(self.url, ".worker.js", ".worker.html"),
timeout=self.timeout)])
elif self.name_is_webdriver:
rv = WebdriverSpecTest.item_type, [WebdriverSpecTest(self, self.url)]
elif self.content_is_css_manual and not self.name_is_reference:
rv = ManualTest.item_type, [ManualTest(self, self.url)]
elif self.content_is_testharness:
rv = TestharnessTest.item_type, []
for variant in self.test_variants:
url = self.url + variant
rv[1].append(TestharnessTest(self, url, timeout=self.timeout))
elif self.content_is_ref_node:
rv = (RefTestNode.item_type,
[RefTestNode(self, self.url, self.references, timeout=self.timeout,
viewport_size=self.viewport_size, dpi=self.dpi)])
elif self.content_is_css_visual and not self.name_is_reference:
rv = VisualTest.item_type, [VisualTest(self, self.url)]
else:
rv = "support", [SupportFile(self)]
self.items_cache = rv
return rv
|
|
#! /usr/local/bin/python
# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
# intentionally NOT "/usr/bin/env python". On many systems
# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
# scripts, and /usr/local/bin is the default directory where Python is
# installed, so /usr/bin/env would be unable to find python. Granted,
# binary installations by Linux vendors often install Python in
# /usr/bin. So let those vendors patch cgi.py to match their choice
# of installation.
"""Support module for CGI (Common Gateway Interface) scripts.
This module defines a number of utilities for use by CGI scripts
written in Python.
"""
# XXX Perhaps there should be a slimmed version that doesn't contain
# all those backwards compatible and debugging classes and functions?
# History
# -------
#
# Michael McLay started this module. Steve Majewski changed the
# interface to SvFormContentDict and FormContentDict. The multipart
# parsing was inspired by code submitted by Andreas Paepcke. Guido van
# Rossum rewrote, reformatted and documented the module and is currently
# responsible for its maintenance.
#
__version__ = "2.6"
# Imports
# =======
from operator import attrgetter
import sys
import os
import UserDict
import urlparse
from warnings import filterwarnings, catch_warnings, warn
with catch_warnings():
if sys.py3kwarning:
filterwarnings("ignore", ".*mimetools has been removed",
DeprecationWarning)
filterwarnings("ignore", ".*rfc822 has been removed",
DeprecationWarning)
import mimetools
import rfc822
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
"SvFormContentDict", "InterpFormContentDict", "FormContent",
"parse", "parse_qs", "parse_qsl", "parse_multipart",
"parse_header", "print_exception", "print_environ",
"print_form", "print_directory", "print_arguments",
"print_environ_usage", "escape"]
# Logging support
# ===============
logfile = "" # Filename to log to, if not empty
logfp = None # File object to log to, if not None
def initlog(*allargs):
"""Write a log message, if there is a log file.
Even though this function is called initlog(), you should always
use log(); log is a variable that is set either to initlog
(initially), to dolog (once the log file has been opened), or to
nolog (when logging is disabled).
The first argument is a format string; the remaining arguments (if
any) are arguments to the % operator, so e.g.
log("%s: %s", "a", "b")
will write "a: b" to the log file, followed by a newline.
If the global logfp is not None, it should be a file object to
which log data is written.
If the global logfp is None, the global logfile may be a string
giving a filename to open, in append mode. This file should be
world writable!!! If the file can't be opened, logging is
silently disabled (since there is no safe place where we could
send an error message).
"""
global logfp, log
if logfile and not logfp:
try:
logfp = open(logfile, "a")
except IOError:
pass
if not logfp:
log = nolog
else:
log = dolog
log(*allargs)
def dolog(fmt, *args):
"""Write a log message to the log file. See initlog() for docs."""
logfp.write(fmt%args + "\n")
def nolog(*allargs):
"""Dummy function, assigned to log when logging is disabled."""
pass
log = initlog # The current logging function
# Parsing functions
# =================
# Maximum input we will accept when REQUEST_METHOD is POST
# 0 ==> unlimited input
maxlen = 0
def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
"""Parse a query in the environment or from a file (default stdin)
Arguments, all optional:
fp : file pointer; default: sys.stdin
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
"""
if fp is None:
fp = sys.stdin
if not 'REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
if environ['REQUEST_METHOD'] == 'POST':
ctype, pdict = parse_header(environ['CONTENT_TYPE'])
if ctype == 'multipart/form-data':
return parse_multipart(fp, pdict)
elif ctype == 'application/x-www-form-urlencoded':
clength = int(environ['CONTENT_LENGTH'])
if maxlen and clength > maxlen:
raise ValueError, 'Maximum content length exceeded'
qs = fp.read(clength)
else:
qs = '' # Unknown content-type
if 'QUERY_STRING' in environ:
if qs: qs = qs + '&'
qs = qs + environ['QUERY_STRING']
elif sys.argv[1:]:
if qs: qs = qs + '&'
qs = qs + sys.argv[1]
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
elif 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
else:
if sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
environ['QUERY_STRING'] = qs # XXX Shouldn't, really
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
# parse query string function called from urlparse,
# this is done in order to maintain backward compatibility.
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
"""Parse a query given as a string argument."""
warn("cgi.parse_qs is deprecated, use urlparse.parse_qs instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qs(qs, keep_blank_values, strict_parsing)
def parse_qsl(qs, keep_blank_values=0, strict_parsing=0, max_num_fields=None):
"""Parse a query given as a string argument."""
warn("cgi.parse_qsl is deprecated, use urlparse.parse_qsl instead",
PendingDeprecationWarning, 2)
return urlparse.parse_qsl(qs, keep_blank_values, strict_parsing,
max_num_fields)
def parse_multipart(fp, pdict):
"""Parse multipart input.
Arguments:
fp : input file
pdict: dictionary containing other parameters of content-type header
Returns a dictionary just like parse_qs(): keys are the field names, each
value is a list of values for that field. This is easy to use but not
much good if you are expecting megabytes to be uploaded -- in that case,
use the FieldStorage class instead which is much more flexible. Note
that content-type is the raw, unparsed contents of the content-type
header.
XXX This does not parse nested multipart parts -- use FieldStorage for
that.
XXX This should really be subsumed by FieldStorage altogether -- no
point in having two implementations of the same parsing algorithm.
Also, FieldStorage protects itself better against certain DoS attacks
by limiting the size of the data read in one chunk. The API here
does not support that kind of protection. This also affects parse()
since it can call parse_multipart().
"""
boundary = ""
if 'boundary' in pdict:
boundary = pdict['boundary']
if not valid_boundary(boundary):
raise ValueError, ('Invalid boundary in multipart form: %r'
% (boundary,))
nextpart = "--" + boundary
lastpart = "--" + boundary + "--"
partdict = {}
terminator = ""
while terminator != lastpart:
bytes = -1
data = None
if terminator:
# At start of next part. Read headers first.
headers = mimetools.Message(fp)
clength = headers.getheader('content-length')
if clength:
try:
bytes = int(clength)
except ValueError:
pass
if bytes > 0:
if maxlen and bytes > maxlen:
raise ValueError, 'Maximum content length exceeded'
data = fp.read(bytes)
else:
data = ""
# Read lines until end of part.
lines = []
while 1:
line = fp.readline()
if not line:
terminator = lastpart # End outer loop
break
if line[:2] == "--":
terminator = line.strip()
if terminator in (nextpart, lastpart):
break
lines.append(line)
# Done with part.
if data is None:
continue
if bytes < 0:
if lines:
# Strip final line terminator
line = lines[-1]
if line[-2:] == "\r\n":
line = line[:-2]
elif line[-1:] == "\n":
line = line[:-1]
lines[-1] = line
data = "".join(lines)
line = headers['content-disposition']
if not line:
continue
key, params = parse_header(line)
if key != 'form-data':
continue
if 'name' in params:
name = params['name']
else:
continue
if name in partdict:
partdict[name].append(data)
else:
partdict[name] = [data]
return partdict
def _parseparam(s):
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header(line):
"""Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(';' + line)
key = parts.next()
pdict = {}
for p in parts:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
# Classes for field storage
# =========================
class MiniFieldStorage:
"""Like FieldStorage, for use when no file uploads are possible."""
# Dummy attributes
filename = None
list = None
type = None
file = None
type_options = {}
disposition = None
disposition_options = {}
headers = {}
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
# self.file = StringIO(value)
def __repr__(self):
"""Return printable representation."""
return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
class FieldStorage:
"""Store a sequence of fields, reading multipart/form-data.
This class provides naming, typing, files stored on disk, and
more. At the top level, it is accessible like a dictionary, whose
keys are the field names. (Note: None can occur as a field name.)
The items are either a Python list (if there's multiple values) or
another FieldStorage or MiniFieldStorage object. If it's a single
object, it has the following attributes:
name: the field name, if specified; otherwise None
filename: the filename, if specified; otherwise None; this is the
client side filename, *not* the file name on which it is
stored (that's a temporary file you don't deal with)
value: the value as a *string*; for file uploads, this
transparently reads the file every time you request the value
file: the file(-like) object from which you can read the data;
None if the data is stored a simple string
type: the content-type, or None if not specified
type_options: dictionary of options specified on the content-type
line
disposition: content-disposition, or None if not specified
disposition_options: dictionary of corresponding options
headers: a dictionary(-like) object (sometimes rfc822.Message or a
subclass thereof) containing *all* headers
The class is subclassable, mostly for the purpose of overriding
the make_file() method, which is called internally to come up with
a file open for reading and writing. This makes it possible to
override the default choice of storing all files in a temporary
directory and unlinking them as soon as they have been opened.
"""
def __init__(self, fp=None, headers=None, outerboundary="",
environ=os.environ, keep_blank_values=0, strict_parsing=0,
max_num_fields=None):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
fp : file pointer; default: sys.stdin
(not used when the request method is GET)
headers : header dictionary-like object; default:
taken from environ as per CGI spec
outerboundary : terminating multipart boundary
(for internal use only)
environ : environment dictionary; default: os.environ
keep_blank_values: flag indicating whether blank values in
percent-encoded forms should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
max_num_fields: int. If set, then __init__ throws a ValueError
if there are more than n fields read by parse_qsl().
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
self.max_num_fields = max_num_fields
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
if method == 'GET' or method == 'HEAD':
if 'QUERY_STRING' in environ:
qs = environ['QUERY_STRING']
elif sys.argv[1:]:
qs = sys.argv[1]
else:
qs = ""
fp = StringIO(qs)
if headers is None:
headers = {'content-type':
"application/x-www-form-urlencoded"}
if headers is None:
headers = {}
if method == 'POST':
# Set default content-type for POST to what's traditional
headers['content-type'] = "application/x-www-form-urlencoded"
if 'CONTENT_TYPE' in environ:
headers['content-type'] = environ['CONTENT_TYPE']
if 'QUERY_STRING' in environ:
self.qs_on_post = environ['QUERY_STRING']
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
self.fp = fp or sys.stdin
self.headers = headers
self.outerboundary = outerboundary
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in self.headers:
cdisp, pdict = parse_header(self.headers['content-disposition'])
self.disposition = cdisp
self.disposition_options = pdict
self.name = None
if 'name' in pdict:
self.name = pdict['name']
self.filename = None
if 'filename' in pdict:
self.filename = pdict['filename']
# Process content-type header
#
# Honor any existing content-type header. But if there is no
# content-type header, use some sensible defaults. Assume
# outerboundary is "" at the outer level, but something non-false
# inside a multi-part. The default for an inner part is text/plain,
# but for an outer part it should be urlencoded. This should catch
# bogus clients which erroneously forget to include a content-type
# header.
#
# See below for what we do if there does exist a content-type header,
# but it happens to be something we don't understand.
if 'content-type' in self.headers:
ctype, pdict = parse_header(self.headers['content-type'])
elif self.outerboundary or method != 'POST':
ctype, pdict = "text/plain", {}
else:
ctype, pdict = 'application/x-www-form-urlencoded', {}
self.type = ctype
self.type_options = pdict
self.innerboundary = ""
if 'boundary' in pdict:
self.innerboundary = pdict['boundary']
clen = -1
if 'content-length' in self.headers:
try:
clen = int(self.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
raise ValueError, 'Maximum content length exceeded'
self.length = clen
self.list = self.file = None
self.done = 0
if ctype == 'application/x-www-form-urlencoded':
self.read_urlencoded()
elif ctype[:10] == 'multipart/':
self.read_multi(environ, keep_blank_values, strict_parsing)
else:
self.read_single()
def __repr__(self):
"""Return a printable representation."""
return "FieldStorage(%r, %r, %r)" % (
self.name, self.filename, self.value)
def __iter__(self):
return iter(self.keys())
def __getattr__(self, name):
if name != 'value':
raise AttributeError, name
if self.file:
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
elif self.list is not None:
value = self.list
else:
value = None
return value
def __getitem__(self, key):
"""Dictionary style indexing."""
if self.list is None:
raise TypeError, "not indexable"
found = []
for item in self.list:
if item.name == key: found.append(item)
if not found:
raise KeyError, key
if len(found) == 1:
return found[0]
else:
return found
def getvalue(self, key, default=None):
"""Dictionary style get() method, including 'value' lookup."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return value.value
else:
return default
def getfirst(self, key, default=None):
""" Return the first value received."""
if key in self:
value = self[key]
if type(value) is type([]):
return value[0].value
else:
return value.value
else:
return default
def getlist(self, key):
""" Return list of received values."""
if key in self:
value = self[key]
if type(value) is type([]):
return map(attrgetter('value'), value)
else:
return [value.value]
else:
return []
def keys(self):
"""Dictionary style keys() method."""
if self.list is None:
raise TypeError, "not indexable"
return list(set(item.name for item in self.list))
def has_key(self, key):
"""Dictionary style has_key() method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __contains__(self, key):
"""Dictionary style __contains__ method."""
if self.list is None:
raise TypeError, "not indexable"
return any(item.name == key for item in self.list)
def __len__(self):
"""Dictionary style len(x) support."""
return len(self.keys())
def __nonzero__(self):
return bool(self.list)
def read_urlencoded(self):
"""Internal: read data in query string format."""
qs = self.fp.read(self.length)
if self.qs_on_post:
qs += '&' + self.qs_on_post
query = urlparse.parse_qsl(qs, self.keep_blank_values,
self.strict_parsing, self.max_num_fields)
self.list = [MiniFieldStorage(key, value) for key, value in query]
self.skip_lines()
FieldStorageClass = None
def read_multi(self, environ, keep_blank_values, strict_parsing):
"""Internal: read a part that is itself multipart."""
ib = self.innerboundary
if not valid_boundary(ib):
raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
self.list = []
if self.qs_on_post:
query = urlparse.parse_qsl(self.qs_on_post,
self.keep_blank_values,
self.strict_parsing,
self.max_num_fields)
self.list.extend(MiniFieldStorage(key, value)
for key, value in query)
FieldStorageClass = None
# Propagate max_num_fields into the sub class appropriately
max_num_fields = self.max_num_fields
if max_num_fields is not None:
max_num_fields -= len(self.list)
klass = self.FieldStorageClass or self.__class__
part = klass(self.fp, {}, ib,
environ, keep_blank_values, strict_parsing,
max_num_fields)
# Throw first part away
while not part.done:
headers = rfc822.Message(self.fp)
part = klass(self.fp, headers, ib,
environ, keep_blank_values, strict_parsing,
max_num_fields)
if max_num_fields is not None:
max_num_fields -= 1
if part.list:
max_num_fields -= len(part.list)
if max_num_fields < 0:
raise ValueError('Max number of fields exceeded')
self.list.append(part)
self.skip_lines()
def read_single(self):
"""Internal: read an atomic part."""
if self.length >= 0:
self.read_binary()
self.skip_lines()
else:
self.read_lines()
self.file.seek(0)
bufsize = 8*1024 # I/O buffering size for copy to file
def read_binary(self):
"""Internal: read binary data."""
self.file = self.make_file('b')
todo = self.length
if todo >= 0:
while todo > 0:
data = self.fp.read(min(todo, self.bufsize))
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def read_lines(self):
"""Internal: read lines until EOF or outerboundary."""
self.file = self.__file = StringIO()
if self.outerboundary:
self.read_lines_to_outerboundary()
else:
self.read_lines_to_eof()
def __write(self, line):
if self.__file is not None:
if self.__file.tell() + len(line) > 1000:
self.file = self.make_file('')
self.file.write(self.__file.getvalue())
self.__file = None
self.file.write(line)
def read_lines_to_eof(self):
"""Internal: read lines until EOF."""
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
self.__write(line)
def read_lines_to_outerboundary(self):
"""Internal: read lines until outerboundary."""
next = "--" + self.outerboundary
last = next + "--"
delim = ""
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if delim == "\r":
line = delim + line
delim = ""
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
odelim = delim
if line[-2:] == "\r\n":
delim = "\r\n"
line = line[:-2]
last_line_lfend = True
elif line[-1] == "\n":
delim = "\n"
line = line[:-1]
last_line_lfend = True
elif line[-1] == "\r":
# We may interrupt \r\n sequences if they span the 2**16
# byte boundary
delim = "\r"
line = line[:-1]
last_line_lfend = False
else:
delim = ""
last_line_lfend = False
self.__write(odelim + line)
def skip_lines(self):
"""Internal: skip lines until outer boundary if defined."""
if not self.outerboundary or self.done:
return
next = "--" + self.outerboundary
last = next + "--"
last_line_lfend = True
while 1:
line = self.fp.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
last_line_lfend = line.endswith('\n')
def make_file(self, binary=None):
"""Overridable: return a readable & writable file.
The file will be used as follows:
- data is written to it
- seek(0)
- data is read from it
The 'binary' argument is unused -- the file is always opened
in binary mode.
This version opens a temporary file for reading and writing,
and immediately deletes (unlinks) it. The trick (on Unix!) is
that the file can still be used, but it can't be opened by
another process, and it will automatically be deleted when it
is closed or when the current process terminates.
If you want a more permanent file, you derive a class which
overrides this method. If you want a visible temporary file
that is nevertheless automatically deleted when the script
terminates, try defining a __del__ method in a derived class
which unlinks the temporary files you have created.
"""
import tempfile
return tempfile.TemporaryFile("w+b")
# Backwards Compatibility Classes
# ===============================
class FormContentDict(UserDict.UserDict):
"""Form content as dictionary with a list of values per field.
form = FormContentDict()
form[key] -> [value, value, ...]
key in form -> Boolean
form.keys() -> [key, key, ...]
form.values() -> [[val, val, ...], [val, val, ...], ...]
form.items() -> [(key, [val, val, ...]), (key, [val, val, ...]), ...]
form.dict == {key: [val, val, ...], ...}
"""
def __init__(self, environ=os.environ, keep_blank_values=0, strict_parsing=0):
self.dict = self.data = parse(environ=environ,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing)
self.query_string = environ['QUERY_STRING']
class SvFormContentDict(FormContentDict):
"""Form content as dictionary expecting a single value per field.
If you only expect a single value for each field, then form[key]
will return that single value. It will raise an IndexError if
that expectation is not true. If you expect a field to have
possible multiple values, than you can use form.getlist(key) to
get all of the values. values() and items() are a compromise:
they return single strings where there is a single value, and
lists of strings otherwise.
"""
def __getitem__(self, key):
if len(self.dict[key]) > 1:
raise IndexError, 'expecting a single value'
return self.dict[key][0]
def getlist(self, key):
return self.dict[key]
def values(self):
result = []
for value in self.dict.values():
if len(value) == 1:
result.append(value[0])
else: result.append(value)
return result
def items(self):
result = []
for key, value in self.dict.items():
if len(value) == 1:
result.append((key, value[0]))
else: result.append((key, value))
return result
class InterpFormContentDict(SvFormContentDict):
"""This class is present for backwards compatibility only."""
def __getitem__(self, key):
v = SvFormContentDict.__getitem__(self, key)
if v[0] in '0123456789+-.':
try: return int(v)
except ValueError:
try: return float(v)
except ValueError: pass
return v.strip()
def values(self):
result = []
for key in self.keys():
try:
result.append(self[key])
except IndexError:
result.append(self.dict[key])
return result
def items(self):
result = []
for key in self.keys():
try:
result.append((key, self[key]))
except IndexError:
result.append((key, self.dict[key]))
return result
class FormContent(FormContentDict):
"""This class is present for backwards compatibility only."""
def values(self, key):
if key in self.dict :return self.dict[key]
else: return None
def indexed_value(self, key, location):
if key in self.dict:
if len(self.dict[key]) > location:
return self.dict[key][location]
else: return None
else: return None
def value(self, key):
if key in self.dict: return self.dict[key][0]
else: return None
def length(self, key):
return len(self.dict[key])
def stripped(self, key):
if key in self.dict: return self.dict[key][0].strip()
else: return None
def pars(self):
return self.dict
# Test/debug code
# ===============
def test(environ=os.environ):
"""Robust test CGI script, usable as main program.
Write minimal HTTP headers and dump all information provided to
the script in HTML form.
"""
print "Content-type: text/html"
print
sys.stderr = sys.stdout
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
print_environ_usage()
def f():
exec "testing print_exception() -- <I>italics?</I>"
def g(f=f):
f()
print "<H3>What follows is a test, not an actual exception:</H3>"
g()
except:
print_exception()
print "<H1>Second try with a small maxlen...</H1>"
global maxlen
maxlen = 50
try:
form = FieldStorage() # Replace with other classes to test those
print_directory()
print_arguments()
print_form(form)
print_environ(environ)
except:
print_exception()
def print_exception(type=None, value=None, tb=None, limit=None):
if type is None:
type, value, tb = sys.exc_info()
import traceback
print
print "<H3>Traceback (most recent call last):</H3>"
list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(type, value)
print "<PRE>%s<B>%s</B></PRE>" % (
escape("".join(list[:-1])),
escape(list[-1]),
)
del tb
def print_environ(environ=os.environ):
"""Dump the shell environment as HTML."""
keys = environ.keys()
keys.sort()
print
print "<H3>Shell Environment:</H3>"
print "<DL>"
for key in keys:
print "<DT>", escape(key), "<DD>", escape(environ[key])
print "</DL>"
print
def print_form(form):
"""Dump the contents of a form as HTML."""
keys = form.keys()
keys.sort()
print
print "<H3>Form Contents:</H3>"
if not keys:
print "<P>No form fields."
print "<DL>"
for key in keys:
print "<DT>" + escape(key) + ":",
value = form[key]
print "<i>" + escape(repr(type(value))) + "</i>"
print "<DD>" + escape(repr(value))
print "</DL>"
print
def print_directory():
"""Dump the current directory as HTML."""
print
print "<H3>Current Working Directory:</H3>"
try:
pwd = os.getcwd()
except os.error, msg:
print "os.error:", escape(str(msg))
else:
print escape(pwd)
print
def print_arguments():
print
print "<H3>Command Line Arguments:</H3>"
print
print sys.argv
print
def print_environ_usage():
"""Dump a list of environment variables used by CGI as HTML."""
print """
<H3>These environment variables could have been set:</H3>
<UL>
<LI>AUTH_TYPE
<LI>CONTENT_LENGTH
<LI>CONTENT_TYPE
<LI>DATE_GMT
<LI>DATE_LOCAL
<LI>DOCUMENT_NAME
<LI>DOCUMENT_ROOT
<LI>DOCUMENT_URI
<LI>GATEWAY_INTERFACE
<LI>LAST_MODIFIED
<LI>PATH
<LI>PATH_INFO
<LI>PATH_TRANSLATED
<LI>QUERY_STRING
<LI>REMOTE_ADDR
<LI>REMOTE_HOST
<LI>REMOTE_IDENT
<LI>REMOTE_USER
<LI>REQUEST_METHOD
<LI>SCRIPT_NAME
<LI>SERVER_NAME
<LI>SERVER_PORT
<LI>SERVER_PROTOCOL
<LI>SERVER_ROOT
<LI>SERVER_SOFTWARE
</UL>
In addition, HTTP headers sent by the server may be passed in the
environment as well. Here are some common variable names:
<UL>
<LI>HTTP_ACCEPT
<LI>HTTP_CONNECTION
<LI>HTTP_HOST
<LI>HTTP_PRAGMA
<LI>HTTP_REFERER
<LI>HTTP_USER_AGENT
</UL>
"""
# Utilities
# =========
def escape(s, quote=None):
'''Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true, the quotation mark character (")
is also translated.'''
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
return s
def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
import re
return re.match(_vb_pattern, s)
# Invoke mainline
# ===============
# Call test() when this file is run as a script (not imported as a module)
if __name__ == '__main__':
test()
|
|
#
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import sys, tempfile, os
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import unittest
from test.support import requires, import_module
requires('curses')
# If either of these don't exist, skip the tests.
curses = import_module('curses')
curses.panel = import_module('curses.panel')
# XXX: if newterm was supported we could use it instead of initscr and not exit
term = os.environ.get('TERM')
if not term or term == 'unknown':
raise unittest.SkipTest("$TERM=%r, calling initscr() may cause exit" % term)
if sys.platform == "cygwin":
raise unittest.SkipTest("cygwin's curses mostly just hangs")
def window_funcs(stdscr):
"Test the methods of windows"
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
try:
win.border(65, 66, 67, 68,
69, [], 71, 72)
except TypeError:
pass
else:
raise RuntimeError("Expected win.border() to raise TypeError")
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 3, 3, 2, 1)
win2.overwrite(win, 1, 2, 3, 3, 2, 1)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def module_funcs(stdscr):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
f = tempfile.TemporaryFile()
stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
f.close()
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp(b'abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm(b'cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
if curses.has_colors():
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
if hasattr(curses, 'keyname'):
curses.keyname(13)
if hasattr(curses, 'has_key'):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
# availmask indicates that mouse stuff not available.
if availmask != 0:
curses.mouseinterval(10)
# just verify these don't cause errors
curses.ungetmouse(0, 0, 0, 0, curses.BUTTON1_PRESSED)
m = curses.getmouse()
if hasattr(curses, 'is_term_resized'):
curses.is_term_resized(*stdscr.getmaxyx())
if hasattr(curses, 'resizeterm'):
curses.resizeterm(*stdscr.getmaxyx())
if hasattr(curses, 'resize_term'):
curses.resize_term(*stdscr.getmaxyx())
def unit_tests():
from curses import ascii
for ch, expected in [('a', 'a'), ('A', 'A'),
(';', ';'), (' ', ' '),
('\x7f', '^?'), ('\n', '^J'), ('\0', '^@'),
# Meta-bit characters
('\x8a', '!^J'), ('\xc1', '!A'),
]:
if ascii.unctrl(ch) != expected:
print('curses.unctrl fails on character', repr(ch))
def test_userptr_without_set(stdscr):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
try:
p.userptr()
raise RuntimeError('userptr should fail since not set')
except curses.panel.error:
pass
def test_resize_term(stdscr):
if hasattr(curses, 'resizeterm'):
lines, cols = curses.LINES, curses.COLS
curses.resizeterm(lines - 1, cols + 1)
if curses.LINES != lines - 1 or curses.COLS != cols + 1:
raise RuntimeError("Expected resizeterm to update LINES and COLS")
def test_issue6243(stdscr):
curses.ungetch(1025)
stdscr.getkey()
def test_unget_wch(stdscr):
if not hasattr(curses, 'unget_wch'):
return
encoding = stdscr.encoding
for ch in ('a', '\xe9', '\u20ac', '\U0010FFFF'):
try:
ch.encode(encoding)
except UnicodeEncodeError:
continue
try:
curses.unget_wch(ch)
except Exception as err:
raise Exception("unget_wch(%a) failed with encoding %s: %s"
% (ch, stdscr.encoding, err))
read = stdscr.get_wch()
if read != ch:
raise AssertionError("%r != %r" % (read, ch))
code = ord(ch)
curses.unget_wch(code)
read = stdscr.get_wch()
if read != ch:
raise AssertionError("%r != %r" % (read, ch))
def test_issue10570():
b = curses.tparm(curses.tigetstr("cup"), 5, 3)
assert type(b) is bytes
curses.putp(b)
def test_encoding(stdscr):
import codecs
encoding = stdscr.encoding
codecs.lookup(encoding)
try:
stdscr.encoding = 10
except TypeError:
pass
else:
raise AssertionError("TypeError not raised")
stdscr.encoding = encoding
try:
del stdscr.encoding
except TypeError:
pass
else:
raise AssertionError("TypeError not raised")
def main(stdscr):
curses.savetty()
try:
module_funcs(stdscr)
window_funcs(stdscr)
test_userptr_without_set(stdscr)
test_resize_term(stdscr)
test_issue6243(stdscr)
test_unget_wch(stdscr)
test_issue10570()
test_encoding(stdscr)
finally:
curses.resetty()
def test_main():
if not sys.__stdout__.isatty():
raise unittest.SkipTest("sys.__stdout__ is not a tty")
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=sys.__stdout__.fileno())
try:
stdscr = curses.initscr()
main(stdscr)
finally:
curses.endwin()
unit_tests()
if __name__ == '__main__':
curses.wrapper(main)
unit_tests()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Module storing functions to calculate statistical metrics from numpy arrays
'''
import subprocess
import os, sys
import datetime
import numpy as np
import numpy.ma as ma
import scipy.stats as stats
import matplotlib.pyplot as plt
from toolkit import plots, process
from ocw import plotter
from utils import misc
from storage import files
def calcAnnualCycleMeans(dataset1):
'''
Purpose::
Calculate annual cycle in terms of monthly means at every grid point.
Input::
dataset1 - 3d numpy array of data in (t,lat,lon) or 1d numpy array timeseries
Output::
means - if 3d numpy was entered, 3d (# of months,lat,lon), if 1d data entered
it is a timeseries of the data of length # of months
'''
data = misc.reshapeMonthlyData(dataset1)
means = data.mean(axis = 0)
return data, means
def calcAnnualCycleMeansSubRegion(dataset1):
'''
Purpose::
Calculate annual cycle in terms of monthly means at every sub-region.
Input::
dataset1 - 2d numpy array of data in (region, t)
Output::
means - (region, # of months)
'''
nregion, nT = dataset1.shape
data = dataset1.reshape([nregion, nT/12, 12])
means = data.mean(axis = 1)
return data, means
def calcClimYear(dataset1):
'''
Purpose::
Calculate annual mean timeseries and climatology for both 2-D and point time series.
Input::
dataset1 - 3d numpy array of data in (t,lat,lon) or 1d numpy array timeseries
Output::
tSeries - if 3d numpy was entered, 3d (nYr,lat,lon), if 1d data entered
it is a timeseries of the data of length nYr
means - if 3d numpy was entered, 2d (lat,lon), if 1d data entered
it is a floating point number representing the overall mean
'''
data = misc.reshapeMonthlyData(dataset1)
tSeries = data.mean(axis = 1)
means = tSeries.mean(axis = 0)
return tSeries, means
def calcClimSeason(monthBegin, monthEnd, dataset1):
'''
Purpose ::
Calculate seasonal mean montheries and climatology for both 3-D and point time series.
For example, to calculate DJF mean time series, monthBegin = 12, monthEnd =2
This can handle monthBegin=monthEnd i.e. for climatology of a specific month
Input::
monthBegin - an integer for the beginning month (Jan =1)
monthEnd - an integer for the ending month (Jan = 1)
dataset1 - 3d numpy array of data in (t,lat,lon) or 1d numpy array montheries
Output::
tSeries - if 3d numpy was entered, 3d (number of years/number of years -1 if monthBegin > monthEnd,lat,lon),
if 1d data entered it is a montheries of the data of length number of years
means - if 3d numpy was entered, 2d (lat,lon), if 1d data entered
it is a floating point number representing the overall mean
'''
if monthBegin > monthEnd:
# Offset the original array so that the the first month
# becomes monthBegin, note that this cuts off the first year of data
offset = slice(monthBegin - 1, monthBegin - 13)
data = misc.reshapeMonthlyData(dataset1[offset])
monthIndex = slice(0, 13 - monthBegin + monthEnd)
else:
# Since monthBegin <= monthEnd, just take a slice containing those months
data = misc.reshapeMonthlyData(dataset1)
monthIndex = slice(monthBegin - 1, monthEnd)
tSeries = data[:, monthIndex].mean(axis = 1)
means = tSeries.mean(axis = 0)
return tSeries, means
def calcClimSeasonSubRegion(monthBegin, monthEnd, dataset1):
'''
Purpose ::
Calculate seasonal mean montheries and climatology for both 3-D and point time series.
For example, to calculate DJF mean time series, monthBegin = 12, monthEnd =2
This can handle monthBegin=monthEnd i.e. for climatology of a specific month
Input::
monthBegin - an integer for the beginning month (Jan =1)
monthEnd - an integer for the ending month (Jan = 1)
dataset1 - 3d numpy array of data in (region, t) or 1d numpy array montheries
Output::
tSeries - (region, number of years/number of years -1 if monthBegin > monthEnd,lat,lon),
means - (region)
'''
nregion, nT = dataset1.shape
nYR = nT/12
if monthBegin > monthEnd:
# Offset the original array so that the the first month
# becomes monthBegin, note that this cuts off the first year of data
offset = slice(monthBegin - 1, monthBegin - 13)
data = dataset1[:,offset].reshape([nregion,nYR-1, 12])
monthIndex = slice(0, 13 - monthBegin + monthEnd)
else:
# Since monthBegin <= monthEnd, just take a slice containing those months
data = dataset1.reshape([nregion,nYR,12])
monthIndex = slice(monthBegin - 1, monthEnd)
tSeries = data[:, :, monthIndex].mean(axis = 2)
means = tSeries.mean(axis = 1)
return tSeries, means
def calcAnnualCycleStdev(dataset1):
'''
Purpose::
Calculate monthly standard deviations for every grid point
Input::
dataset1 - 3d numpy array of data in (12* number of years,lat,lon)
Output::
stds - if 3d numpy was entered, 3d (12,lat,lon)
'''
data = misc.reshapeMonthlyData(dataset1)
stds = data.std(axis = 0, ddof = 1)
return stds
def calcAnnualCycleStdevSubRegion(dataset1):
'''
Purpose::
Calculate monthly standard deviations for every sub-region
Input::
dataset1 - 2d numpy array of data in (nregion, 12* number of years)
Output::
stds - (nregion, 12)
'''
nregion, nT = dataset1.shape
data = dataset1.reshape([nregion, nT/12, 12])
stds = data.std(axis = 1, ddof = 1)
return stds
def calcAnnualCycleDomainMeans(dataset1):
'''
Purpose::
Calculate spatially averaged monthly climatology and standard deviation
Input::
dataset1 - 3d numpy array of data in (12* number of years,lat,lon)
Output::
means - time series (12)
'''
data = misc.reshapeMonthlyData(dataset1)
# Calculate the means, month by month
means = np.zeros(12)
for i in np.arange(12):
means[i] = data[:, i, :, :].mean()
return means
def calcSpatialStdevRatio(evaluationData, referenceData):
'''
Purpose ::
Calculate the ratio of spatial standard deviation (model standard deviation)/(observed standard deviation)
Input ::
evaluationData - model data array (lat, lon)
referenceData- observation data array (lat,lon)
Output::
ratio of standard deviation (a scholar)
'''
stdevRatio = evaluationData[(evaluationData.mask==False) & (referenceData.mask==False)].std()/ \
referenceData[(evaluationData.mask==False) & (referenceData.mask==False)].std()
return stdevRatio
def calcTemporalStdev(dataset1):
'''
Purpose::
Calculate sample standard deviations over the time
Input::
dataset1 - 3d numpy array of data in (time,lat,lon)
Output::
stds - time series (lat, lon)
'''
stds = dataset1.std(axis = 0, ddof = 1)
return stds
def calcAnnualCycleDomainStdev(dataset1):
'''
Purpose::
Calculate sample standard deviations representing the domain in each month
Input::
dataset1 - 3d numpy array of data in (12* number of years,lat,lon)
Output::
stds - time series (12)
'''
data = misc.reshapeMonthlyData(dataset1)
# Calculate the standard deviation, month by months
stds = np.zeros(12)
for i in np.arange(12):
stds[i] = data[:, i, :, :].std(ddof = 1)
return stds
def calcBiasAveragedOverTime(evaluationData, referenceData, option): # Mean Bias
'''
Purpose::
Calculate the mean difference between two fields over time for each grid point.
Input::
referenceData - array of data
evaluationData - array of data with same dimension of referenceData
option - string indicating absolute values or not
Output::
bias - difference between referenceData and evaluationData averaged over the first dimension
'''
# Calculate mean difference between two fields over time for each grid point
# Precrocessing of both obs and model data ensures the absence of missing values
diff = evaluationData - referenceData
if(option == 'abs'):
diff = abs(diff)
bias = diff.mean(axis = 0)
return bias
def calcBiasAveragedOverTimeAndSigLev(evaluationData, referenceData):
'''
Purpose::
Calculate mean difference between two fields over time for each grid point
Classify missing data resulting from multiple times (using threshold
data requirement)
i.e. if the working time unit is monthly data, and we are dealing with
multiple months of data then when we show mean of several months, we need
to decide what threshold of missing data we tolerate before classifying a
data point as missing data.
Input::
referenceData - array of data
evaluationData - array of data with same dimension of referenceData
Output::
bias - difference between referenceData and evaluationData averaged over the first dimension
sigLev - significance of the difference (masked array)
For example: sig[iy,ix] = 0.95 means that the observation and model is different at 95% confidence level
at X=ix and Y=iy
'''
# If either gridcell in each data set is missing, set that cell to
# missing for the output significance level
evaluationDataMask = process.create_mask_using_threshold(evaluationData, threshold = 0.75)
referenceDataMask = process.create_mask_using_threshold(referenceData, threshold = 0.75)
# The overall mask associated with missing data
overallMask = np.logical_or(evaluationDataMask, referenceDataMask)
diff = evaluationData - referenceData
bias = diff.mean(axis = 0)
sigLev = 1 - stats.ttest_rel(evaluationData, referenceData, axis = 0)[1]
sigLev[overallMask] = -100.
sigLev = ma.masked_equal(sigLev, -100.)
# Set mask for bias metric using missing data in obs or model data series
# i.e. if obs contains more than threshold (e.g.50%) missing data
# then classify time average bias as missing data for that location.
bias = ma.masked_array(bias.data, overallMask)
return bias, sigLev
def calcBiasAveragedOverTimeAndDomain(evaluationData, referenceData):
'''
Purpose::
Calculate the mean difference between two fields over time and domain
Input::
referenceData - array of data
evaluationData - array of data with same dimension of referenceData
Output::
bias - difference between referenceData and evaluationData averaged over time and space
'''
diff = evaluationData - referenceData
bias = diff.mean()
return bias
def calcBias(evaluationData, referenceData):
'''
Purpose::
Calculate the difference between two fields at each grid point
Input::
referenceData - array of data
evaluationData - array of data with same dimension of referenceData
Output::
diff - difference between referenceData and evaluationData
'''
diff = evaluationData - referenceData
return diff
def calcRootMeanSquaredDifferenceAveragedOverTime(evaluationData, referenceData):
'''
Purpose::
Calculate root mean squared difference (RMS errors) averaged over time between two fields for each grid point
Input::
referenceData - array of data
evaluationData - array of data with same dimension of referenceData
Output::
rms - root mean squared difference, if the input is 1-d data, the output becomes a single floating number.
'''
sqdiff = (evaluationData - referenceData)** 2
rms = np.sqrt(sqdiff.mean(axis = 0))
return rms
def calcRootMeanSquaredDifferenceAveragedOverTimeAndDomain(evaluationData, referenceData):
'''
Purpose::
Calculate root mean squared difference (RMS errors) averaged over time and space between two fields
Input::
referenceData - array of data (should be 3-d array)
evaluationData - array of data with same dimension of referenceData
Output::
rms - root mean squared difference averaged over time and space
'''
sqdiff = (evaluationData - referenceData)** 2
rms = np.sqrt(sqdiff.mean())
return rms
def calcTemporalCorrelation(evaluationData, referenceData):
'''
Purpose ::
Calculate the temporal correlation.
Assumption(s) ::
The first dimension of two datasets is the time axis.
Input ::
evaluationData - model data array of any shape
referenceData- observation data array of any shape
Output::
temporalCorelation - A 2-D array of temporal correlation coefficients at each subregion
sigLev - A 2-D array of confidence levels related to temporalCorelation
REF: 277-281 in Stat methods in atmos sci by Wilks, 1995, Academic Press, 467pp.
sigLev: the correlation between model and observation is significant at sigLev * 100 %
'''
evaluationDataMask = process.create_mask_using_threshold(evaluationData, threshold = 0.75)
referenceDataMask = process.create_mask_using_threshold(referenceData, threshold = 0.75)
nregion = evaluationData.shape[0]
temporalCorrelation = ma.zeros([nregion])-100.
sigLev = ma.zeros([nregion])-100.
for iregion in np.arange(nregion):
temporalCorrelation[iregion], sigLev[iregion] = stats.pearsonr(evaluationData[iregion,:], referenceData[iregion,:])
sigLev[iregion] = 1 - sigLev[iregion]
temporalCorrelation=ma.masked_equal(temporalCorrelation.data, -100.)
sigLev=ma.masked_equal(sigLev.data, -100.)
return temporalCorrelation, sigLev
def calcTemporalCorrelationSubRegion(evaluationData, referenceData):
'''
Purpose ::
Calculate the temporal correlation.
Assumption(s) ::
both evaluation and reference data are subregion averaged time series
Input ::
evaluationData - model data array [region,t]
referenceData- observation data [region, t]
Output::
temporalCorelation - A 1-D array of temporal correlation coefficients at each grid point.
sigLev - A 1-D array of confidence levels related to temporalCorelation
REF: 277-281 in Stat methods in atmos sci by Wilks, 1995, Academic Press, 467pp.
sigLev: the correlation between model and observation is significant at sigLev * 100 %
'''
temporalCorrelation = 0.
sigLev = 0.
t1=evaluationData[:]
t2=referenceData[:]
if t1.min()!=t1.max() and t2.min()!=t2.max():
temporalCorrelation, sigLev=stats.pearsonr(t1,t2)
sigLev=1.-sigLev # p-value => confidence level
return temporalCorrelation
def calcPatternCorrelation(evaluationData, referenceData):
'''
Purpose ::
Calculate the spatial correlation.
Input ::
evaluationData - model data array (lat, lon)
referenceData- observation data array (lat,lon)
Output::
patternCorrelation - a single floating point
sigLev - a single floating point representing the confidence level
'''
patternCorrelation, sigLev = stats.pearsonr(evaluationData[(evaluationData.mask==False) & (referenceData.mask==False)],
referenceData[(evaluationData.mask==False) & (referenceData.mask==False)])
return patternCorrelation, sigLev
def calcPatternCorrelationEachTime(evaluationData, referenceData):
'''
Purpose ::
Calculate the spatial correlation for each time
Assumption(s) ::
The first dimension of two datasets is the time axis.
Input ::
evaluationData - model data array (time,lat, lon)
referenceData- observation data array (time,lat,lon)
Output::
patternCorrelation - a timeseries (time)
sigLev - a time series (time)
'''
nT = evaluationData.shape[0]
patternCorrelation = ma.zeros(nT)-100.
sigLev = ma.zeros(nT)-100.
for it in np.arange(nT):
patternCorrelation[it], sigLev[it] = calcPatternCorrelation(evaluationData[it,:,:], referenceData[it,:,:])
return patternCorrelation,sigLev
def calcNashSutcliff(evaluationData, referenceData):
'''
Assumption(s)::
Both evaluationData and referenceData are the same shape.
* lat, lon must match up
* time steps must align (i.e. months vs. months)
Input::
evaluationData - 3d (time, lat, lon) array of data
referenceData - 3d (time, lat, lon) array of data
Output:
nashcor - 1d array aligned along the time dimension of the input
datasets. Time Series of Nash-Sutcliff Coefficient of efficiency
'''
# Flatten the spatial dimensions
data1 = evaluationData[:]
data2 = referenceData[:]
nT = data1.shape[0]
data1.shape = nT, data1.size / nT
data2.shape = nT, data2.size / nT
meanData2 = data2.mean(axis = 1)
# meanData2 must be reshaped to 2D as to obey
# numpy broadcasting rules
meanData2.shape = nT, 1
nashcor = 1 - ((((data2 - data1) ** 2).sum(axis = 1)) /
(((data2 - meanData2) ** 2).sum(axis = 1)))
return nashcor
def calcPdf(evaluationData, referenceData):
'''
Routine to calculate a normalized Probability Distribution Function with
bins set according to data range.
Equation from Perkins et al. 2007
PS=sum(min(Z_O_i, Z_M_i)) where Z is the distribution (histogram of the data for either set)
called in do_rcmes_processing_sub.py
Inputs::
2 arrays of data
t1 is the modelData and t2 is 3D obsdata - time,lat, lon NB, time here
is the number of time values eg for time period 199001010000 - 199201010000
if annual means-opt 1, was chosen, then t2.shape = (2,lat,lon)
if monthly means - opt 2, was choosen, then t2.shape = (24,lat,lon)
User inputs: number of bins to use and edges (min and max)
Output:
one float which represents the PDF for the year
TODO: Clean up this docstring so we have a single purpose statement
Routine to calculate a normalised PDF with bins set according to data range.
Input::
2 data arrays, modelData and obsData
Output::
PDF for the year
'''
# float to store the final PDF similarity score
similarityScore = 0.0
print 'min modelData', evaluationData[:, :, :].min()
print 'max modelData', evaluationData[:, :, :].max()
print 'min obsData', referenceData[:, :, :].min()
print 'max obsData', referenceData[:, :, :].max()
# find a distribution for the entire dataset
#prompt the user to enter the min, max and number of bin values.
# The max, min info above is to help guide the user with these choises
print '****PDF input values from user required **** \n'
nbins = int (raw_input('Please enter the number of bins to use. \n'))
minEdge = float(raw_input('Please enter the minimum value to use for the edge. \n'))
maxEdge = float(raw_input('Please enter the maximum value to use for the edge. \n'))
mybins = np.linspace(minEdge, maxEdge, nbins)
print 'nbins is', nbins, 'mybins are', mybins
pdfMod, edges = np.histogram(evaluationData, bins = mybins, normed = True, density = True)
print 'evaluationData distribution and edges', pdfMod, edges
pdfObs, edges = np.histogram(referenceData, bins = mybins, normed = True, density = True)
print 'referenceData distribution and edges', pdfObs, edges
#find minimum at each bin between lists
i = 0
for model_value in pdfMod :
print 'model_value is', model_value, 'pdfObs[', i, '] is', pdfObs[i]
if model_value < pdfObs[i]:
similarityScore += model_value
else:
similarityScore += pdfObs[i]
i += 1
print 'similarity_score is', similarityScore
return similarityScore
def calculate_metrics_and_make_plots(varName, workdir, lons, lats, obsData, mdlData, obsRgn, mdlRgn, obsList, mdlList, subRegions, \
subRgnLon0, subRgnLon1, subRgnLat0, subRgnLat1):
'''
Purpose::
Calculate all the metrics used in Kim et al. [2013] paper and plot them
Input::
varName - evaluating variable
workdir -
lons -
lats -
obsData -
mdlData -
obsRgn -
mdlRgn -
obsList -
mdlList -
subRegions -
subRgnLon0, subRgnLat0 - southwest boundary of sub-regions [numSubRgn]
subRgnLon1, subRgnLat1 - northeast boundary of sub-regions [numSubRgn]
Output::
png files
'''
nobs, nt, ny, nx = obsData.shape
nmodel = mdlData.shape[0]
### TODO: unit conversion (K to C)
if varName == 'temp':
obsData[0, :, :, :] = obsData[0, :, :, :] - 273.15
if subRegions:
obsRgn[0, :, :] = obsRgn[0, :, :] - 273.15
if varName == 'prec' and obsData.max() > mdlData.max()*1000.:
mdlData[:, :, :, :] = mdlData[:, :, :, :]*86400.
if subRegions:
mdlRgn[:, :, :] = mdlRgn[:, :, :]*86400.
oTser, oClim = calcClimYear( obsData[0, :, :, :])
bias_of_overall_average = ma.zeros([nmodel, ny, nx])
spatial_stdev_ratio = np.zeros([nmodel])
spatial_corr = np.zeros([nmodel])
mdlList.append('ENS')
for imodel in np.arange(nmodel):
mTser, mClim = calcClimYear( mdlData[imodel,:,:,:])
bias_of_overall_average[imodel,:,:] = calcBias(mClim, oClim)
spatial_corr[imodel], sigLev = calcPatternCorrelation(oClim, mClim)
spatial_stdev_ratio[imodel] = calcSpatialStdevRatio(mClim, oClim)
fig_return = plotter.draw_contour_map(oClim, lats, lons, workdir+'/observed_climatology_'+varName, fmt='png', gridshape=(1, 1),
clabel='', ptitle='', subtitles=obsList, cmap=None,
clevs=None, nlevs=10, parallels=None, meridians=None,
extend='neither')
# TODO:
# Be sure to update "gridshape" argument to be the number of sub plots (rows,columns). This should be improved so that the
# gridshape is optimally determined for a given number of models. For example:
# For 3 models, a gridshape of (2,2) would be sensible:
# X X
# X
#
fig_return = plotter.draw_contour_map(bias_of_overall_average, lats, lons, workdir+'/bias_of_climatology_'+varName, fmt='png', gridshape=(6, 2),
clabel='', ptitle='', subtitles=mdlList, cmap=None,
clevs=None, nlevs=10, parallels=None, meridians=None,
extend='neither')
Taylor_data = np.array([spatial_stdev_ratio, spatial_corr]).transpose()
fig_return = plotter.draw_taylor_diagram(Taylor_data, mdlList, refname='CRU', fname = workdir+'/Taylor_'+varName, fmt='png',frameon=False)
if subRegions:
nseason = 2 # (0: summer and 1: winter)
nregion = len(subRgnLon0)
season_name = ['summer','winter']
rowlabels = ['PNw','PNe','CAn','CAs','SWw','SWe','COL','GPn','GPc','GC','GL','NE','SE','FL']
collabels = ['M1','M2','M3','M4','M5','M6','ENS']
collabels[nmodel-1] = 'ENS'
for iseason in [0,1]:
portrait_subregion = np.zeros([4, nregion, nmodel])
portrait_titles = ['(a) Normalized Bias', '(b) Normalized STDV', '(c) Normalized RMSE', '(d) Correlation']
if iseason == 0:
monthBegin=6
monthEnd=8
if iseason == 1:
monthBegin=12
monthEnd=2
obsTser,obsClim = calcClimSeasonSubRegion(6,8,obsRgn[0,:,:])
for imodel in np.arange(nmodel):
mTser, mClim = calcClimSeasonSubRegion(6,8,mdlRgn[imodel,:,:])
for iregion in np.arange(nregion):
portrait_subregion[0,iregion,imodel] = calcBias(mClim[iregion],obsClim[iregion])/calcTemporalStdev(obsTser[iregion,:])
portrait_subregion[1,iregion,imodel] = calcTemporalStdev(mTser[iregion,:])/ calcTemporalStdev(obsTser[iregion,:])
portrait_subregion[2,iregion,imodel] = calcRootMeanSquaredDifferenceAveragedOverTime(mTser[iregion,:], obsTser[iregion,:])/calcTemporalStdev(obsTser[iregion,:])
portrait_subregion[3,iregion, imodel] = calcTemporalCorrelationSubRegion(mTser[iregion,:],obsTser[iregion,:])
portrait_return = plotter.draw_portrait_diagram(portrait_subregion, rowlabels, collabels[0:nmodel], workdir+'/portrait_diagram_'+season_name[iseason]+'_'+varName, fmt='png',
gridshape=(2, 2), xlabel='', ylabel='', clabel='',
ptitle='', subtitles=portrait_titles, cmap=None, clevs=None,
nlevs=10, extend='neither')
# annual cycle
nmonth = 12
times = np.arange(nmonth)
data_names = [obsList[0]] + list(mdlList)
annual_cycle = np.zeros([nregion, nmonth, nmodel+1])
obsTser, annual_cycle[:, :, 0] = calcAnnualCycleMeansSubRegion(obsRgn[0,:,:])
obsStd = calcAnnualCycleStdevSubRegion(obsRgn[0,:,:])
for imodel in np.arange(nmodel):
mdlTser, annual_cycle[:, :, imodel+1] = calcAnnualCycleMeansSubRegion(mdlRgn[imodel, :, :])
# Make annual_cycle shape compatible with draw_time_series
annual_cycle = annual_cycle.swapaxes(1, 2)
tseries_return = plotter.draw_time_series(annual_cycle, times, data_names, workdir+'/time_series_'+varName, gridshape=(7, 2),
subtitles=rowlabels, label_month=True)
|
|
"""Functions to plot ICA specific data (besides topographies)."""
from __future__ import print_function
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: Simplified BSD
from functools import partial
from numbers import Integral
import numpy as np
from .utils import (tight_layout, _prepare_trellis, _select_bads,
_layout_figure, _plot_raw_onscroll, _mouse_click,
_helper_raw_resize, _plot_raw_onkey, plt_show)
from .topomap import (_prepare_topo_plot, plot_topomap, _hide_frame,
_plot_ica_topomap)
from .raw import _prepare_mne_browse_raw, _plot_raw_traces, _convert_psds
from .epochs import _prepare_mne_browse_epochs, plot_epochs_image
from .evoked import _butterfly_on_button_press, _butterfly_onpick
from ..utils import warn, _validate_type
from ..defaults import _handle_default
from ..io.meas_info import create_info
from ..io.pick import pick_types
from ..externals.six import string_types
from ..time_frequency.psd import psd_multitaper
def plot_ica_sources(ica, inst, picks=None, exclude=None, start=None,
stop=None, title=None, show=True, block=False,
show_first_samp=False):
"""Plot estimated latent sources given the unmixing matrix.
Typical usecases:
1. plot evolution of latent sources over time based on (Raw input)
2. plot latent source around event related time windows (Epochs input)
3. plot time-locking in ICA space (Evoked input)
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
The object to plot the sources from.
picks : int | array_like of int | None.
The components to be displayed. If None, plot will show the
sources in the order as fitted.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
start : int
X-axis start index. If None, from the beginning.
stop : int
X-axis stop index. If None, next 20 are shown, in case of evoked to the
end.
title : str | None
The figure title. If None a default is provided.
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for interactive selection of components in raw and epoch
plotter. For evoked, this parameter has no effect. Defaults to False.
show_first_samp : bool
If True, show time axis relative to the ``raw.first_samp``.
Returns
-------
fig : instance of pyplot.Figure
The figure.
Notes
-----
For raw and epoch instances, it is possible to select components for
exclusion by clicking on the line. The selected components are added to
``ica.exclude`` on close.
.. versionadded:: 0.10.0
"""
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..epochs import BaseEpochs
if exclude is None:
exclude = ica.exclude
elif len(ica.exclude) > 0:
exclude = np.union1d(ica.exclude, exclude)
if isinstance(inst, BaseRaw):
fig = _plot_sources_raw(ica, inst, picks, exclude, start=start,
stop=stop, show=show, title=title,
block=block, show_first_samp=show_first_samp)
elif isinstance(inst, BaseEpochs):
fig = _plot_sources_epochs(ica, inst, picks, exclude, start=start,
stop=stop, show=show, title=title,
block=block)
elif isinstance(inst, Evoked):
if start is not None or stop is not None:
inst = inst.copy().crop(start, stop)
sources = ica.get_sources(inst)
fig = _plot_ica_sources_evoked(
evoked=sources, picks=picks, exclude=exclude, title=title,
labels=getattr(ica, 'labels_', None), show=show, ica=ica)
else:
raise ValueError('Data input must be of Raw or Epochs type')
return fig
def _create_properties_layout(figsize=None):
"""Create main figure and axes layout used by plot_ica_properties."""
import matplotlib.pyplot as plt
if figsize is None:
figsize = [7., 6.]
fig = plt.figure(figsize=figsize, facecolor=[0.95] * 3)
axes_params = (('topomap', [0.08, 0.5, 0.3, 0.45]),
('image', [0.5, 0.6, 0.45, 0.35]),
('erp', [0.5, 0.5, 0.45, 0.1]),
('spectrum', [0.08, 0.1, 0.32, 0.3]),
('variance', [0.5, 0.1, 0.45, 0.25]))
axes = [fig.add_axes(loc, label=name) for name, loc in axes_params]
return fig, axes
def _plot_ica_properties(pick, ica, inst, psds_mean, freqs, n_trials,
epoch_var, plot_lowpass_edge, epochs_src,
set_title_and_labels, plot_std, psd_ylabel,
spectrum_std, topomap_args, image_args, fig, axes):
"""Plot ICA properties (helper)."""
topo_ax, image_ax, erp_ax, spec_ax, var_ax = axes
# plotting
# --------
# component topomap
_plot_ica_topomap(ica, pick, show=False, axes=topo_ax, **topomap_args)
# image and erp
plot_epochs_image(epochs_src, picks=pick, axes=[image_ax, erp_ax],
combine=None, colorbar=False, show=False, **image_args)
# spectrum
spec_ax.plot(freqs, psds_mean, color='k')
if plot_std:
spec_ax.fill_between(freqs, psds_mean - spectrum_std[0],
psds_mean + spectrum_std[1],
color='k', alpha=.2)
if plot_lowpass_edge:
spec_ax.axvline(inst.info['lowpass'], lw=2, linestyle='--',
color='k', alpha=0.2)
# epoch variance
var_ax.scatter(range(len(epoch_var)), epoch_var, alpha=0.5,
facecolor=[0, 0, 0], lw=0)
# aesthetics
# ----------
topo_ax.set_title(ica._ica_names[pick])
set_title_and_labels(image_ax, 'Epochs image and ERP/ERF', [], 'Epochs')
# erp
set_title_and_labels(erp_ax, [], 'Time (s)', 'AU\n')
erp_ax.spines["right"].set_color('k')
erp_ax.set_xlim(epochs_src.times[[0, -1]])
# remove half of yticks if more than 5
yt = erp_ax.get_yticks()
if len(yt) > 5:
yt = yt[::2]
erp_ax.yaxis.set_ticks(yt)
# remove xticks - erp plot shows xticks for both image and erp plot
image_ax.xaxis.set_ticks([])
yt = image_ax.get_yticks()
image_ax.yaxis.set_ticks(yt[1:])
image_ax.set_ylim([-0.5, n_trials + 0.5])
# spectrum
set_title_and_labels(spec_ax, 'Spectrum', 'Frequency (Hz)', psd_ylabel)
spec_ax.yaxis.labelpad = 0
spec_ax.set_xlim(freqs[[0, -1]])
ylim = spec_ax.get_ylim()
air = np.diff(ylim)[0] * 0.1
spec_ax.set_ylim(ylim[0] - air, ylim[1] + air)
image_ax.axhline(0, color='k', linewidth=.5)
# epoch variance
set_title_and_labels(var_ax, 'Epochs variance', 'Epoch (index)', 'AU')
return fig
def _get_psd_label_and_std(this_psd, dB, ica, num_std):
"""Handle setting up PSD for one component, for plot_ica_properties."""
psd_ylabel = _convert_psds(this_psd, dB, estimate='auto', scaling=1.,
unit='AU', ch_names=ica.ch_names)
psds_mean = this_psd.mean(axis=0)
diffs = this_psd - psds_mean
# the distribution of power for each frequency bin is highly
# skewed so we calculate std for values below and above average
# separately - this is used for fill_between shade
spectrum_std = [
[np.sqrt((d[d < 0] ** 2).mean(axis=0)) for d in diffs.T],
[np.sqrt((d[d > 0] ** 2).mean(axis=0)) for d in diffs.T]]
spectrum_std = np.array(spectrum_std) * num_std
return psd_ylabel, psds_mean, spectrum_std
def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None, image_args=None,
psd_args=None, figsize=None, show=True):
"""Display component properties.
Properties include the topography, epochs image, ERP/ERF, power
spectrum, and epoch variance.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst: instance of Epochs or Raw
The data to use in plotting properties.
picks : int | array-like of int | None
The components to be displayed. If None, plot will show the first
five sources. If more than one components were chosen in the picks,
each one will be plotted in a separate figure. Defaults to None.
axes: list of matplotlib axes | None
List of five matplotlib axes to use in plotting: [topomap_axis,
image_axis, erp_axis, spectrum_axis, variance_axis]. If None a new
figure with relevant axes is created. Defaults to None.
dB: bool
Whether to plot spectrum in dB. Defaults to True.
plot_std: bool | float
Whether to plot standard deviation in ERP/ERF and spectrum plots.
Defaults to True, which plots one standard deviation above/below.
If set to float allows to control how many standard deviations are
plotted. For example 2.5 will plot 2.5 standard deviation above/below.
topomap_args : dict | None
Dictionary of arguments to ``plot_topomap``. If None, doesn't pass any
additional arguments. Defaults to None.
image_args : dict | None
Dictionary of arguments to ``plot_epochs_image``. If None, doesn't pass
any additional arguments. Defaults to None.
psd_args : dict | None
Dictionary of arguments to ``psd_multitaper``. If None, doesn't pass
any additional arguments. Defaults to None.
figsize : array-like of size (2,) | None
Allows to control size of the figure. If None, the figure size
defaults to [7., 6.].
show : bool
Show figure if True.
Returns
-------
fig : list
List of matplotlib figures.
Notes
-----
.. versionadded:: 0.13
"""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..preprocessing import ICA
# input checks and defaults
# -------------------------
_validate_type(inst, (BaseRaw, BaseEpochs), "inst", "Raw or Epochs")
_validate_type(ica, ICA, "ica", "ICA")
if isinstance(plot_std, bool):
num_std = 1. if plot_std else 0.
elif isinstance(plot_std, (float, int)):
num_std = plot_std
plot_std = True
else:
raise ValueError('plot_std has to be a bool, int or float, '
'got %s instead' % type(plot_std))
# if no picks given - plot the first 5 components
picks = list(range(min(5, ica.n_components_))) if picks is None else picks
picks = [picks] if isinstance(picks, Integral) else picks
if axes is None:
fig, axes = _create_properties_layout(figsize=figsize)
else:
if len(picks) > 1:
raise ValueError('Only a single pick can be drawn '
'to a set of axes.')
from .utils import _validate_if_list_of_axes
_validate_if_list_of_axes(axes, obligatory_len=5)
fig = axes[0].get_figure()
psd_args = dict() if psd_args is None else psd_args
topomap_args = dict() if topomap_args is None else topomap_args
image_args = dict() if image_args is None else image_args
image_args["ts_args"] = dict(truncate_xaxis=False, show_sensors=False)
for item_name, item in (("psd_args", psd_args),
("topomap_args", topomap_args),
("image_args", image_args)):
_validate_type(item, dict, item_name, "dictionary")
if dB is not None:
_validate_type(dB, bool, "dB", "bool")
# calculations
# ------------
if isinstance(inst, BaseRaw):
# break up continuous signal into segments
from ..epochs import _segment_raw
inst = _segment_raw(inst, segment_length=2., verbose=False,
preload=True)
epochs_src = ica.get_sources(inst)
ica_data = np.swapaxes(epochs_src.get_data()[:, picks, :], 0, 1)
# spectrum
Nyquist = inst.info['sfreq'] / 2.
lp = inst.info['lowpass']
if 'fmax' not in psd_args:
psd_args['fmax'] = min(lp * 1.25, Nyquist)
plot_lowpass_edge = lp < Nyquist and (psd_args['fmax'] > lp)
psds, freqs = psd_multitaper(epochs_src, picks=picks, **psd_args)
def set_title_and_labels(ax, title, xlab, ylab):
if title:
ax.set_title(title)
if xlab:
ax.set_xlabel(xlab)
if ylab:
ax.set_ylabel(ylab)
ax.axis('auto')
ax.tick_params('both', labelsize=8)
ax.axis('tight')
# plot
# ----
all_fig = list()
for idx, pick in enumerate(picks):
# calculate component-specific spectrum stuff
psd_ylabel, psds_mean, spectrum_std = _get_psd_label_and_std(
psds[:, idx, :].copy(), dB, ica, num_std)
# if more than one component, spawn additional figures and axes
if idx > 0:
fig, axes = _create_properties_layout(figsize=figsize)
# the actual plot
fig = _plot_ica_properties(
pick, ica, inst, psds_mean, freqs, ica_data.shape[1],
np.var(ica_data[idx], axis=1), plot_lowpass_edge,
epochs_src, set_title_and_labels, plot_std, psd_ylabel,
spectrum_std, topomap_args, image_args, fig, axes)
all_fig.append(fig)
plt_show(show)
return all_fig
def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, ica,
labels=None):
"""Plot average over epochs in ICA space.
Parameters
----------
evoked : instance of mne.Evoked
The Evoked to be used.
picks : int | array_like of int | None.
The components to be displayed. If None, plot will show the
sources in the order as fitted.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
title : str
The figure title.
show : bool
Show figure if True.
labels : None | dict
The ICA labels attribute.
"""
import matplotlib.pyplot as plt
if title is None:
title = 'Reconstructed latent sources, time-locked'
fig, axes = plt.subplots(1)
ax = axes
axes = [axes]
times = evoked.times * 1e3
# plot unclassified sources and label excluded ones
lines = list()
texts = list()
if picks is None:
picks = np.arange(evoked.data.shape[0])
picks = np.sort(picks)
idxs = [picks]
if labels is not None:
labels_used = [k for k in labels if '/' not in k]
exclude_labels = list()
for ii in picks:
if ii in exclude:
line_label = ica._ica_names[ii]
if labels is not None:
annot = list()
for this_label in labels_used:
indices = labels[this_label]
if ii in indices:
annot.append(this_label)
line_label += (' - ' + ', '.join(annot))
exclude_labels.append(line_label)
else:
exclude_labels.append(None)
if labels is not None:
# compute colors only based on label categories
unique_labels = set([k.split(' - ')[1] for k in exclude_labels if k])
label_colors = plt.cm.rainbow(np.linspace(0, 1, len(unique_labels)))
label_colors = dict(zip(unique_labels, label_colors))
else:
label_colors = dict((k, 'red') for k in exclude_labels)
for exc_label, ii in zip(exclude_labels, picks):
if exc_label is not None:
# create look up for color ...
if ' - ' in exc_label:
key = exc_label.split(' - ')[1]
else:
key = exc_label
color = label_colors[key]
# ... but display component number too
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
zorder=2, color=color, label=exc_label))
else:
lines.extend(ax.plot(times, evoked.data[ii].T, picker=3.,
color='k', zorder=1))
ax.set(title=title, xlim=times[[0, -1]], xlabel='Time (ms)', ylabel='(NA)')
if len(exclude) > 0:
plt.legend(loc='best')
tight_layout(fig=fig)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
# this is done to give the structure of a list of lists of a group of lines
# in each subplot
lines = [lines]
ch_names = evoked.ch_names
from matplotlib import patheffects
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
params = dict(axes=axes, texts=texts, lines=lines, idxs=idxs,
ch_names=ch_names, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
plt_show(show)
return fig
def plot_ica_scores(ica, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=None, show=True):
"""Plot scores related to detected components.
Use this function to asses how well your score describes outlier
sources and how well you were detecting them.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
scores : array_like of float, shape (n ica components) | list of arrays
Scores based on arbitrary metric to characterize ICA components.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
labels : str | list | 'ecg' | 'eog' | None
The labels to consider for the axes tests. Defaults to None.
If list, should match the outer shape of `scores`.
If 'ecg' or 'eog', the ``labels_`` attributes will be looked up.
Note that '/' is used internally for sublabels specifying ECG and
EOG channels.
axhline : float
Draw horizontal line to e.g. visualize rejection threshold.
title : str
The figure title.
figsize : tuple of int | None
The figure size. If None it gets set automatically.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure object
"""
import matplotlib.pyplot as plt
my_range = np.arange(ica.n_components_)
if exclude is None:
exclude = ica.exclude
exclude = np.unique(exclude)
if not isinstance(scores[0], (list, np.ndarray)):
scores = [scores]
n_rows = len(scores)
if figsize is None:
figsize = (6.4, 2.7 * n_rows)
fig, axes = plt.subplots(n_rows, figsize=figsize, sharex=True, sharey=True)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
axes[0].set_title(title)
if labels == 'ecg':
labels = [l for l in ica.labels_ if l.startswith('ecg/')]
elif labels == 'eog':
labels = [l for l in ica.labels_ if l.startswith('eog/')]
labels.sort(key=lambda l: l.split('/')[1]) # sort by index
elif isinstance(labels, string_types):
if len(axes) > 1:
raise ValueError('Need as many labels as axes (%i)' % len(axes))
labels = [labels]
elif isinstance(labels, (tuple, list)):
if len(labels) != len(axes):
raise ValueError('Need as many labels as axes (%i)' % len(axes))
elif labels is None:
labels = (None,) * n_rows
for label, this_scores, ax in zip(labels, scores, axes):
if len(my_range) != len(this_scores):
raise ValueError('The length of `scores` must equal the '
'number of ICA components.')
ax.bar(my_range, this_scores, color='gray', edgecolor='k')
for excl in exclude:
ax.bar(my_range[excl], this_scores[excl], color='r', edgecolor='k')
if axhline is not None:
if np.isscalar(axhline):
axhline = [axhline]
for axl in axhline:
ax.axhline(axl, color='r', linestyle='--')
ax.set_ylabel('score')
if label is not None:
if 'eog/' in label:
split = label.split('/')
label = ', '.join([split[0], split[2]])
elif '/' in label:
label = ', '.join(label.split('/'))
ax.set_title('(%s)' % label)
ax.set_xlabel('ICA components')
ax.set_xlim(0, len(this_scores))
tight_layout(fig=fig)
plt_show(show)
return fig
def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True):
"""Overlay of raw and cleaned signals given the unmixing matrix.
This method helps visualizing signal quality and artifact rejection.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
inst : instance of mne.io.Raw or mne.Evoked
The signals to be compared given the ICA solution. If Raw input,
The raw data are displayed before and after cleaning. In a second
panel the cross channel average will be displayed. Since dipolar
sources will be canceled out this display is sensitive to
artifacts. If evoked input, butterfly plots for clean and raw
signals will be superimposed.
exclude : array_like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels
are used that were included on fitting).
start : int
X-axis start index. If None from the beginning.
stop : int
X-axis stop index. If None to the end.
title : str
The figure title.
show : bool
Show figure if True.
Returns
-------
fig : instance of pyplot.Figure
The figure.
"""
# avoid circular imports
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..preprocessing.ica import _check_start_stop
_validate_type(inst, (BaseRaw, Evoked), "inst", "Raw or Evoked")
if title is None:
title = 'Signals before (red) and after (black) cleaning'
if picks is None:
picks = [inst.ch_names.index(k) for k in ica.ch_names]
if exclude is None:
exclude = ica.exclude
if isinstance(inst, BaseRaw):
if start is None:
start = 0.0
if stop is None:
stop = 3.0
ch_types_used = [k for k in ['mag', 'grad', 'eeg'] if k in ica]
start_compare, stop_compare = _check_start_stop(inst, start, stop)
data, times = inst[picks, start_compare:stop_compare]
raw_cln = ica.apply(inst.copy(), exclude=exclude,
start=start, stop=stop)
data_cln, _ = raw_cln[picks, start_compare:stop_compare]
fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
times=times, title=title,
ch_types_used=ch_types_used, show=show)
elif isinstance(inst, Evoked):
inst = inst.copy().crop(start, stop)
if picks is not None:
inst.info['comps'] = [] # can be safely disabled
inst.pick_channels([inst.ch_names[p] for p in picks])
evoked_cln = ica.apply(inst.copy(), exclude=exclude)
fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
title=title, show=show)
return fig
def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
Show figure if True.
Returns
-------
fig : instance of pyplot.Figure
"""
import matplotlib.pyplot as plt
# Restore sensor space data and keep all PCA components
# let's now compare the date before and after cleaning.
# first the raw data
assert data.shape == data_cln.shape
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
plt.suptitle(title)
ax1.plot(times, data.T, color='r')
ax1.plot(times, data_cln.T, color='k')
ax1.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Raw data')
_ch_types = {'mag': 'Magnetometers',
'grad': 'Gradiometers',
'eeg': 'EEG'}
ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
ax2.set_title('Average across channels ({0})'.format(ch_types))
ax2.plot(times, data.mean(0), color='r')
ax2.plot(times, data_cln.mean(0), color='k')
ax2.set(xlabel='Time (s)', xlim=times[[0, -1]])
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
If True, all open plots will be shown.
Returns
-------
fig : instance of pyplot.Figure
"""
import matplotlib.pyplot as plt
ch_types_used = [c for c in ['mag', 'grad', 'eeg'] if c in evoked]
n_rows = len(ch_types_used)
ch_types_used_cln = [c for c in ['mag', 'grad', 'eeg'] if
c in evoked_cln]
if len(ch_types_used) != len(ch_types_used_cln):
raise ValueError('Raw and clean evokeds must match. '
'Found different channels.')
fig, axes = plt.subplots(n_rows, 1)
fig.suptitle('Average signal before (red) and after (black) ICA')
axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
evoked.plot(axes=axes, show=show, time_unit='s')
for ax in fig.axes:
for l in ax.get_lines():
l.set_color('r')
fig.canvas.draw()
evoked_cln.plot(axes=axes, show=show, time_unit='s')
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_sources_raw(ica, raw, picks, exclude, start, stop, show, title,
block, show_first_samp):
"""Plot the ICA components as raw array."""
color = _handle_default('color', (0., 0., 0.))
orig_data = ica._transform_raw(raw, 0, len(raw.times)) * 0.2
if picks is None:
picks = range(len(orig_data))
types = ['misc' for _ in picks]
picks = list(sorted(picks))
eog_chs = pick_types(raw.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(raw.info, meg=False, ecg=True, ref_meg=False)
data = [orig_data[pick] for pick in picks]
c_names = list(ica._ica_names) # new list
for eog_idx in eog_chs:
c_names.append(raw.ch_names[eog_idx])
types.append('eog')
for ecg_idx in ecg_chs:
c_names.append(raw.ch_names[ecg_idx])
types.append('ecg')
extra_picks = np.append(eog_chs, ecg_chs).astype(int)
if len(extra_picks) > 0:
eog_ecg_data, _ = raw[extra_picks, :]
for idx in range(len(eog_ecg_data)):
if idx < len(eog_chs):
eog_ecg_data[idx] /= 150e-6 # scaling for eog
else:
eog_ecg_data[idx] /= 5e-4 # scaling for ecg
data = np.append(data, eog_ecg_data, axis=0)
for idx in range(len(extra_picks)):
picks = np.append(picks, ica.n_components_ + idx)
if title is None:
title = 'ICA components'
info = create_info([c_names[x] for x in picks], raw.info['sfreq'])
info['bads'] = [c_names[x] for x in exclude]
if start is None:
start = 0
if stop is None:
stop = start + 20
stop = min(stop, raw.times[-1])
duration = stop - start
if duration <= 0:
raise RuntimeError('Stop must be larger than start.')
t_end = int(duration * raw.info['sfreq'])
times = raw.times[0:t_end]
bad_color = (1., 0., 0.)
inds = list(range(len(picks)))
data = np.array(data)
n_channels = min([20, len(picks)])
first_time = raw._first_time if show_first_samp else 0
start += first_time
params = dict(raw=raw, orig_data=data, data=data[:, 0:t_end], inds=inds,
ch_start=0, t_start=start, info=info, duration=duration,
ica=ica, n_channels=n_channels, times=times, types=types,
n_times=raw.n_times, bad_color=bad_color, picks=picks,
first_time=first_time, data_picks=[], decim=1,
noise_cov=None, whitened_ch_names=())
_prepare_mne_browse_raw(params, title, 'w', color, bad_color, inds,
n_channels)
params['scale_factor'] = 1.0
params['plot_fun'] = partial(_plot_raw_traces, params=params, color=color,
bad_color=bad_color)
params['update_fun'] = partial(_update_data, params)
params['pick_bads_fun'] = partial(_pick_bads, params=params)
params['label_click_fun'] = partial(_label_clicked, params=params)
_layout_figure(params)
# callbacks
callback_key = partial(_plot_raw_onkey, params=params)
params['fig'].canvas.mpl_connect('key_press_event', callback_key)
callback_scroll = partial(_plot_raw_onscroll, params=params)
params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
callback_pick = partial(_mouse_click, params=params)
params['fig'].canvas.mpl_connect('button_press_event', callback_pick)
callback_resize = partial(_helper_raw_resize, params=params)
params['fig'].canvas.mpl_connect('resize_event', callback_resize)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
params['fig_proj'] = None
params['event_times'] = None
params['butterfly'] = False
params['update_fun']()
params['plot_fun']()
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
def _update_data(params):
"""Prepare the data on horizontal shift of the viewport."""
sfreq = params['info']['sfreq']
start = int((params['t_start'] - params['first_time']) * sfreq)
end = int((params['t_start'] + params['duration']) * sfreq)
params['data'] = params['orig_data'][:, start:end]
params['times'] = params['raw'].times[start:end]
def _pick_bads(event, params):
"""Select components on click."""
bads = params['info']['bads']
params['info']['bads'] = _select_bads(event, params, bads)
params['update_fun']()
params['plot_fun']()
def _close_event(events, params):
"""Exclude the selected components on close."""
info = params['info']
exclude = [params['ica']._ica_names.index(x)
for x in info['bads'] if x.startswith('ICA')]
params['ica'].exclude = exclude
def _plot_sources_epochs(ica, epochs, picks, exclude, start, stop, show,
title, block):
"""Plot the components as epochs."""
data = ica._transform_epochs(epochs, concatenate=True)
eog_chs = pick_types(epochs.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(epochs.info, meg=False, ecg=True, ref_meg=False)
c_names = list(ica._ica_names)
ch_types = np.repeat('misc', ica.n_components_)
for eog_idx in eog_chs:
c_names.append(epochs.ch_names[eog_idx])
ch_types = np.append(ch_types, 'eog')
for ecg_idx in ecg_chs:
c_names.append(epochs.ch_names[ecg_idx])
ch_types = np.append(ch_types, 'ecg')
extra_picks = np.append(eog_chs, ecg_chs).astype(int)
if len(extra_picks) > 0:
eog_ecg_data = np.concatenate(epochs.get_data()[:, extra_picks],
axis=1)
data = np.append(data, eog_ecg_data, axis=0)
scalings = _handle_default('scalings_plot_raw')
scalings['misc'] = 5.0
info = create_info(ch_names=c_names, sfreq=epochs.info['sfreq'],
ch_types=ch_types)
info['projs'] = list()
info['bads'] = [c_names[x] for x in exclude]
if title is None:
title = 'ICA components'
if picks is None:
picks = list(range(ica.n_components_))
if start is None:
start = 0
if stop is None:
stop = start + 20
stop = min(stop, len(epochs.events))
for idx in range(len(extra_picks)):
picks = np.append(picks, ica.n_components_ + idx)
n_epochs = stop - start
if n_epochs <= 0:
raise RuntimeError('Stop must be larger than start.')
params = dict(ica=ica, epochs=epochs, info=info, orig_data=data,
bads=list(), bad_color=(1., 0., 0.),
t_start=start * len(epochs.times),
data_picks=list(), decim=1, whitened_ch_names=(),
noise_cov=None)
params['label_click_fun'] = partial(_label_clicked, params=params)
_prepare_mne_browse_epochs(params, projs=list(), n_channels=20,
n_epochs=n_epochs, scalings=scalings,
title=title, picks=picks,
order=['misc', 'eog', 'ecg'])
params['plot_update_proj_callback'] = _update_epoch_data
_update_epoch_data(params)
params['hsel_patch'].set_x(params['t_start'])
callback_close = partial(_close_epochs_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
def _update_epoch_data(params):
"""Prepare the data on horizontal shift."""
start = params['t_start']
n_epochs = params['n_epochs']
end = start + n_epochs * len(params['epochs'].times)
data = params['orig_data'][:, start:end]
types = params['types']
for pick, ind in enumerate(params['inds']):
params['data'][pick] = data[ind] / params['scalings'][types[pick]]
params['plot_fun']()
def _close_epochs_event(events, params):
"""Exclude the selected components on close."""
info = params['info']
exclude = [info['ch_names'].index(x) for x in info['bads']
if x.startswith('IC')]
params['ica'].exclude = exclude
def _label_clicked(pos, params):
"""Plot independent components on click to label."""
import matplotlib.pyplot as plt
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1]) + params['ch_start']
if line_idx >= len(params['picks']):
return
ic_idx = [params['picks'][line_idx]]
if params['types'][line_idx] != 'misc':
warn('Can only plot ICA components.')
return
types = list()
info = params['ica'].info
if len(pick_types(info, meg=False, eeg=True, ref_meg=False)) > 0:
types.append('eeg')
if len(pick_types(info, meg='mag', ref_meg=False)) > 0:
types.append('mag')
if len(pick_types(info, meg='grad', ref_meg=False)) > 0:
types.append('grad')
ica = params['ica']
data = np.dot(ica.mixing_matrix_[:, ic_idx].T,
ica.pca_components_[:ica.n_components_])
data = np.atleast_2d(data)
fig, axes = _prepare_trellis(len(types), max_col=3)
for ch_idx, ch_type in enumerate(types):
try:
data_picks, pos, merge_grads, _, _ = _prepare_topo_plot(ica,
ch_type,
None)
except Exception as exc:
warn(exc)
plt.close(fig)
return
this_data = data[:, data_picks]
ax = axes[ch_idx]
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_ in zip(ic_idx, this_data):
ax.set_title('%s %s' % (ica._ica_names[ii], ch_type), fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
plot_topomap(data_.flatten(), pos, axes=ax, show=False)
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.88, bottom=0.)
fig.canvas.draw()
plt_show(True)
|
|
#
#------------------------------------------------------------------------------
# Copyright (c) 2013-2014, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# detect_int.py - This file is part of the PySptools package.
#
"""
MatchedFilter, ACE, CEM, GLRT, OSP classes
"""
import numpy as np
from . import detect
from .inval import *
from .docstring import *
def _plot_target_map(path, tmap, map_type, whiteOnBlack, suffix=None):
""" Plot a target map using matplotlib """
import matplotlib.pyplot as plt
import os.path as osp
if path != None:
plt.ioff()
img = plt.imshow(tmap)
if whiteOnBlack == True:
img.set_cmap('Greys_r')
elif whiteOnBlack == False:
img.set_cmap('Greys')
else:
# throw an error?
img.set_cmap('Blues')
if path != None:
if suffix == None:
fout = osp.join(path, 'tmap_{0}.png'.format(map_type))
else:
fout = osp.join(path, 'tmap_{0}_{1}.png'.format(map_type, suffix))
try:
plt.savefig(fout)
except IOError:
raise IOError('in detection._plot_target_map, no such file or directory: {0}'.format(path))
else:
if suffix == None:
plt.title('{0} Target Map'.format(map_type))
else:
plt.title('{0} Target Map - {1}'.format(map_type, suffix))
plt.show()
plt.clf()
def _document(cls):
import sys
if sys.version_info[0] == 2:
cls.plot.__func__.__doc__ = plot_docstring
cls.display.__func__.__doc__ = display_docstring
if sys.version_info[0] == 3:
cls.plot.__doc__ = plot_docstring
cls.display.__doc__ = display_docstring
class MatchedFilter(object):
"""
Performs the matched filter algorithm for target detection.
"""
def __init__(self):
self.target_map = None
@DetectInputValidation1('MatchedFilter')
def detect(self, M, t, threshold=None):
"""
Parameters:
M: `numpy array`
A HSI cube (m x n x p).
t: `numpy array`
A target pixel (p).
threshold: `float or None [default None]`
Apply a threshold to the detection result.
Usefull to isolate the result.
Returns: `numpy array`
Vector of detector output (m x n x 1).
References:
Qian Du, Hsuan Ren, and Chein-I Cheng. A Comparative Study of
Orthogonal Subspace Projection and Constrained Energy Minimization.
IEEE TGRS. Volume 41. Number 6. June 2003.
"""
h,w,numBands = M.shape
self.h, self.w, self.numBands = M.shape
Mr = np.reshape(M, (w*h, numBands))
target = detect.MatchedFilter(Mr, t)
self.target_map = np.reshape(target, (h, w))
if threshold != None:
self.target_map = self.target_map > threshold
return self.target_map
def __str__(self):
return 'pysptools.detection.detect_int.MatchedFilter object, hcube: {0}x{1}x{2}'.format(self.h, self.w, self.numBands)
@PlotInputValidation('MatchedFilter')
def plot(self, path, whiteOnBlack=True, suffix=None):
_plot_target_map(path, self.target_map, 'MatchedFilter', whiteOnBlack, suffix)
@DisplayInputValidation('MatchedFilter')
def display(self, whiteOnBlack=True, suffix=None):
_plot_target_map(None, self.target_map, 'MatchedFilter', whiteOnBlack, suffix)
_document(MatchedFilter)
class ACE(object):
"""
Performs the adaptive cosin/coherent estimator algorithm for target
detection.
"""
def __init__(self):
self.target_map = None
@DetectInputValidation1('ACE')
def detect(self, M, t, threshold=None):
"""
Parameters:
M: `numpy array`
A HSI cube (m x n x p).
t: `numpy array`
A target pixel (p).
threshold: `float or None [default None]`
Apply a threshold to the detection result.
Usefull to isolate the result.
Returns: `numpy array`
Vector of detector output (m x n x 1).
References:
X Jin, S Paswater, H Cline. "A Comparative Study of Target Detection
Algorithms for Hyperspectral Imagery." SPIE Algorithms and Technologies
for Multispectral, Hyperspectral, and Ultraspectral Imagery XV. Vol
7334. 2009.
"""
h,w,numBands = M.shape
self.h, self.w, self.numBands = M.shape
Mr = np.reshape(M, (w*h, numBands))
target = detect.ACE(Mr, t)
self.target_map = np.reshape(target, (h, w))
if threshold != None:
self.target_map = self.target_map > threshold
return self.target_map
def __str__(self):
return 'pysptools.detection.detect_int.ACE object, hcube: {0}x{1}x{2}'.format(self.h, self.w, self.numBands)
@PlotInputValidation('ACE')
def plot(self, path, whiteOnBlack=True, suffix=None):
_plot_target_map(path, self.target_map, 'ACE', whiteOnBlack, suffix)
@DisplayInputValidation('ACE')
def display(self, whiteOnBlack=True, suffix=None):
_plot_target_map(None, self.target_map, 'ACE', whiteOnBlack, suffix)
_document(ACE)
class CEM(object):
"""
Performs the constrained energy minimization algorithm for target
detection.
"""
def __init__(self):
self.target_map = None
@DetectInputValidation1('CEM')
def detect(self, M, t, threshold=None):
"""
Parameters:
M: `numpy array`
A HSI cube (m x n x p).
t: `numpy array`
A target pixel (p).
threshold: `float or None [default None]`
Apply a threshold to the detection result.
Usefull to isolate the result.
Returns: `numpy array`
Vector of detector output (m x n x 1).
References:
Qian Du, Hsuan Ren, and Chein-I Cheng. A Comparative Study of
Orthogonal Subspace Projection and Constrained Energy Minimization.
IEEE TGRS. Volume 41. Number 6. June 2003.
"""
h,w,numBands = M.shape
self.h, self.w, self.numBands = M.shape
Mr = np.reshape(M, (w*h, numBands))
target = detect.CEM(Mr, t)
self.target_map = np.reshape(target, (h, w))
if threshold != None:
self.target_map = self.target_map > threshold
return self.target_map
def __str__(self):
return 'pysptools.detection.detect_int.CEM object, hcube: {0}x{1}x{2}'.format(self.h, self.w, self.numBands)
@PlotInputValidation('CEM')
def plot(self, path, whiteOnBlack=True, suffix=None):
_plot_target_map(path, self.target_map, 'CEM', whiteOnBlack, suffix)
@DisplayInputValidation('CEM')
def display(self, whiteOnBlack=True, suffix=None):
_plot_target_map(None, self.target_map, 'CEM', whiteOnBlack, suffix)
_document(CEM)
class GLRT(object):
"""
Performs the generalized likelihood test ratio algorithm for target
detection.
"""
def __init__(self):
self.target_map = None
@DetectInputValidation1('GLRT')
def detect(self, M, t, threshold=None):
"""
Parameters:
M: `numpy array`
A HSI cube (m x n x p).
t: `numpy array`
A target pixel (p).
threshold: `float or None [default None]`
Apply a threshold to the detection result.
Usefull to isolate the result.
Returns: `numpy array`
Vector of detector output (m x n x 1).
References
T. F. AyouB, "Modified GLRT Signal Detection Algorithm," IEEE
Transactions on Aerospace and Electronic Systems, Vol 36, No 3, July
2000.
"""
h,w,numBands = M.shape
self.h, self.w, self.numBands = M.shape
Mr = np.reshape(M, (w*h, numBands))
target = detect.GLRT(Mr, t)
self.target_map = np.reshape(target, (h, w))
if threshold != None:
self.target_map = self.target_map > threshold
return self.target_map
def __str__(self):
return 'pysptools.detection.detect_int.GLRT object, hcube: {0}x{1}x{2}'.format(self.h, self.w, self.numBands)
@PlotInputValidation('GLRT')
def plot(self, path, whiteOnBlack=True, suffix=None):
_plot_target_map(path, self.target_map, 'GLRT', whiteOnBlack, suffix)
@DisplayInputValidation('GLRT')
def display(self, whiteOnBlack=True, suffix=None):
_plot_target_map(None, self.target_map, 'GLRT', whiteOnBlack, suffix)
_document(GLRT)
class OSP(object):
"""
Performs the othogonal subspace projection algorithm for target
detection.
"""
def __init__(self):
self.target_map = None
@DetectInputValidation2('OSP')
def detect(self, M, E, t, threshold=None):
"""
Parameters:
M: `numpy array`
A HSI cube (m x n x p).
E: `numpy array`
Background pixels (n x p).
t: `numpy array`
A target pixel (p).
threshold: `float or None [default None]`
Apply a threshold to the detection result.
Usefull to isolate the result.
Returns: `numpy array`
Vector of detector output (m x n x 1).
References:
Qian Du, Hsuan Ren, and Chein-I Cheng. "A Comparative Study of
Orthogonal Subspace Projection and Constrained Energy Minimization."
IEEE TGRS. Volume 41. Number 6. June 2003.
"""
h,w,numBands = M.shape
self.h, self.w, self.numBands = M.shape
Mr = np.reshape(M, (w*h, numBands))
target = detect.OSP(Mr, E, t)
self.target_map = np.reshape(target, (h, w))
if threshold != None:
self.target_map = self.target_map > threshold
return self.target_map
def __str__(self):
return 'pysptools.detection.detect_int.OSP object, hcube: {0}x{1}x{2}'.format(self.h, self.w, self.numBands)
@PlotInputValidation('OSP')
def plot(self, path, whiteOnBlack=True, suffix=None):
_plot_target_map(path, self.target_map, 'OSP', whiteOnBlack, suffix)
@DisplayInputValidation('OSP')
def display(self, whiteOnBlack=True, suffix=None):
_plot_target_map(None, self.target_map, 'OSP', whiteOnBlack, suffix)
_document(OSP)
|
|
# Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import contextlib
import mock
from mox3 import mox
from oslo.config import cfg
from oslo import messaging
from oslo.serialization import jsonutils
from oslo.utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova.compute import arch
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import quotas as quotas_obj
from nova import quota
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import cast_as_call
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_server_actions
from nova.tests.unit import fake_utils
from nova import utils
CONF = cfg.CONF
CONF.import_opt('report_interval', 'nova.service')
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
fake_utils.stub_out_utils_spawn_n(self.stubs)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = arch.X86_64
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates, None)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db is None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args,
update_cells=True)
self.assertEqual(result, 'foo')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst, None, False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
{'fake': 'values'})
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertIsNone(result)
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', None)
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
@mock.patch.object(notifications, 'audit_period_bounds')
@mock.patch.object(notifications, 'bandwidth_usage')
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
def test_notify_usage_exists(self, mock_notify, mock_bw, mock_audit):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = objects.Instance(id=1, system_metadata={})
mock_audit.return_value = ('start', 'end')
mock_bw.return_value = 'bw_usage'
self.conductor.notify_usage_exists(self.context, instance, False, True,
system_metadata={},
extra_usage_info=dict(extra='info'))
class MatchInstance(object):
def __eq__(self, thing):
return thing.id == instance.id
notifier = self.conductor_manager.notifier
mock_audit.assert_called_once_with(False)
mock_bw.assert_called_once_with(MatchInstance(), 'start', True)
mock_notify.assert_called_once_with(notifier, self.context,
MatchInstance(),
'exists', system_metadata={},
extra_usage_info=info)
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'], None)
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 1, 'device_name': 'foo',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
fake_bdm2 = {'id': 1, 'device_name': 'foo2',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=False)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=False)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
None, False)
def test_instance_get_all_by_filters_use_slave(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=True)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None,
use_slave=True)
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host',
None, None)
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node', None)
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(messaging.ExpectedException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(host=None, topic=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj = TestObject()
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
tuple(), {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def _test_expected_exceptions(self, db_method, conductor_method, errors,
*args, **kwargs):
# Tests that expected exceptions are handled properly.
for error in errors:
with mock.patch.object(db, db_method, side_effect=error):
self.assertRaises(messaging.ExpectedException,
conductor_method,
self.context, *args, **kwargs)
def test_action_event_start_expected_exceptions(self):
error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
self._test_expected_exceptions(
'action_event_start', self.conductor.action_event_start, [error],
{'foo': 'bar'})
def test_action_event_finish_expected_exceptions(self):
errors = (exc.InstanceActionNotFound(request_id='1',
instance_uuid='2'),
exc.InstanceActionEventNotFound(event='1', action_id='2'))
self._test_expected_exceptions(
'action_event_finish', self.conductor.action_event_finish,
errors, {'foo': 'bar'})
def test_instance_update_expected_exceptions(self):
errors = (exc.InvalidUUID(uuid='foo'),
exc.InstanceNotFound(instance_id=1),
exc.UnexpectedTaskStateError(expected='foo',
actual='bar'))
self._test_expected_exceptions(
'instance_update', self.conductor.instance_update,
errors, None, {'foo': 'bar'}, None)
def test_instance_get_by_uuid_expected_exceptions(self):
error = exc.InstanceNotFound(instance_id=1)
self._test_expected_exceptions(
'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
[error], None, [])
def test_aggregate_host_add_expected_exceptions(self):
error = exc.AggregateHostExists(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_add', self.conductor.aggregate_host_add,
[error], {'id': 1}, None)
def test_aggregate_host_delete_expected_exceptions(self):
error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_delete', self.conductor.aggregate_host_delete,
[error], {'id': 1}, None)
def test_service_update_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_update',
self.conductor.service_update,
[error], {'id': 1}, None)
def test_service_destroy_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_destroy',
self.conductor.service_destroy,
[error], 1)
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertNotIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_destroy(self.context,
{'uuid': 'fake-uuid'})
self.assertEqual(result, 'fake-result')
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(topic=None, host=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_big(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 10)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=9)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_small(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 3)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=3)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_no_time(self, mock_prepare, mock_update):
CONF.set_override('report_interval', None)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with()
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host', None)
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise messaging.MessagingTimeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
with mock.patch('nova.objects.Instance._from_db_object'):
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
def _prepare_rebuild_args(self, update_args=None):
rebuild_args = {'new_pass': 'admin_password',
'injected_files': 'files_to_inject',
'image_ref': 'image_ref',
'orig_image_ref': 'orig_image_ref',
'orig_sys_metadata': 'orig_sys_meta',
'bdms': {},
'recreate': False,
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host'}
if update_args:
rebuild_args.update(update_args)
return rebuild_args
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
live_migrate.execute(self.context,
mox.IsA(objects.Instance),
'destination',
'block_migration',
'disk_over_commit')
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'live_migrate_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
def _test_cold_migrate(self, clean_shutdown=True):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(
self.conductor_manager.compute_rpcapi, 'prep_resize')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
flavor = flavors.get_default_flavor()
flavor.extra_specs = {'extra_specs': 'fake'}
request_spec = {'instance_type': obj_base.obj_to_primitive(flavor),
'instance_properties': {}}
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'image_ref', mox.IsA(objects.Instance)).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(objects.Instance)],
instance_type=mox.IsA(objects.Flavor)).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec, {})
hosts = [dict(host='host1', nodename=None, limits={})]
self.conductor_manager.scheduler_client.select_destinations(
self.context, request_spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(hosts)
filter_properties = {'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(objects.Instance),
mox.IsA(objects.Flavor), 'host1', [], request_spec=request_spec,
filter_properties=filter_properties, node=None,
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [],
clean_shutdown)
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [],
clean_shutdown)
def test_cold_migrate(self):
self._test_cold_migrate()
def test_cold_migrate_forced_shutdown(self):
self._test_cold_migrate(clean_shutdown=False)
def test_build_instances(self):
system_metadata = flavors.save_flavor_info({},
flavors.get_default_flavor())
instances = [fake_instance.fake_instance_obj(
self.context,
system_metadata=system_metadata,
expected_attrs=['system_metadata']) for i in xrange(2)]
instance_type = flavors.extract_flavor(instances[0])
instance_type['extra_specs'] = {}
instance_type_p = jsonutils.to_primitive(instance_type)
instance_properties = jsonutils.to_primitive(instances[0])
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
spec = {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': jsonutils.to_primitive(instances[0]),
'instance_type': instance_type_p,
'num_instances': 2}
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.instance_get_by_uuid(self.context, instances[0].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[0]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'retry': {'num_attempts': 1,
'hosts': [['host1', 'node1']]},
'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.instance_get_by_uuid(self.context, instances[1].uuid,
columns_to_join=['system_metadata'],
use_slave=False).AndReturn(
jsonutils.to_primitive(instances[1]))
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2', 'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
exception = exc.NoValidHost(reason='fake-reason')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(scheduler_utils, 'set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1,
'hosts': []}}).AndRaise(exception)
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
scheduler_utils.set_vm_state_and_notify(
self.context, instance.uuid, 'compute_task', 'build_instances',
updates, exception, spec, self.conductor_manager.db)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_unshelve_instance_on_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_offloaded_instance_glance_image_not_found(self):
shelved_image_id = "image_not_found"
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
e = exc.ImageNotFound(image_id=shelved_image_id)
self.conductor_manager.image_api.get(
self.context, shelved_image_id, show_deleted=False).AndRaise(e)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
system_metadata['shelved_image_id'] = shelved_image_id
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ERROR)
def test_unshelve_offloaded_instance_image_id_is_none(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
# 'shelved_image_id' is None for volumebacked instance
instance.system_metadata['shelved_image_id'] = None
with contextlib.nested(
mock.patch.object(self.conductor_manager,
'_schedule_instances'),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'unshelve_instance'),
) as (schedule_mock, unshelve_mock):
schedule_mock.return_value = [{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}]
self.conductor_manager.unshelve_instance(self.context, instance)
self.assertEqual(1, unshelve_mock.call_count)
def test_unshelve_instance_schedule_and_rebuild(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image='fake_image',
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
raise exc.NoValidHost(reason='')
with contextlib.nested(
mock.patch.object(self.conductor_manager.image_api, 'get',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances)
) as (_get_image, _schedule_instances):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image=None,
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_rebuild_instance(self):
inst_obj = self._create_fake_instance_obj()
rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations')
) as (rebuild_mock, select_dest_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.assertFalse(select_dest_mock.called)
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
expected_host = 'thebesthost'
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host}]),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
rebuild_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler_no_host(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason='')),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.assertRaises(exc.NoValidHost,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
self.assertFalse(rebuild_mock.called)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, flavor, None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
def _test_migrate_server_deals_with_expected_exceptions(self, ex):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(type(ex),
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InvalidCPUInfo,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_instancenotrunning_exception(self,
mock_live_migrate, mock_set_state):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
error = exc.InstanceNotRunning(instance_id="fake")
mock_live_migrate.side_effect = error
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InstanceNotRunning,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
request_spec = self._build_request_spec(inst_obj)
mock_set_state.assert_called_once_with(self.context, inst_obj.uuid,
'compute_task',
'migrate_server',
dict(vm_state=inst_obj.vm_state,
task_state=None,
expected_task_state=task_states.MIGRATING),
error, request_spec, self.conductor_manager.db)
def test_migrate_server_deals_with_DestinationHypervisorTooOld(self):
ex = exc.DestinationHypervisorTooOld()
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_HypervisorUnavailable(self):
ex = exc.HypervisorUnavailable(host='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_LiveMigrationWithOldNovaNotSafe(self):
ex = exc.LiveMigrationWithOldNovaNotSafe(server='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_unexpected_exceptions(self,
mock_live_migrate, mock_set_state):
expected_ex = IOError('fake error')
mock_live_migrate.side_effect = expected_ex
instance = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
ex = self.assertRaises(exc.MigrationError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
mock_set_state.assert_called_once_with(self.context,
instance['uuid'],
'compute_task', 'migrate_server',
dict(vm_state=vm_states.ERROR,
task_state=inst_obj.task_state,
expected_task_state=task_states.MIGRATING,),
expected_ex, request_spec, self.conductor.db)
self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex))
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 1, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 1, 'method', 'updates', 'ex', 'request_spec')
def test_cold_migrate_no_valid_host_back_in_active_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
flavor, filter_props, [resvs],
clean_shutdown=True)
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
def test_cold_migrate_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, sig_mock, set_vm_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
self.assertIn('cold migrate', nvh.message)
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
hosts = [dict(host='host1', nodename=None, limits={})]
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(scheduler_utils,
'populate_filter_properties')
self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
'prep_resize')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []},
'context': None}
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
expected_filter_props).AndReturn(hosts)
scheduler_utils.populate_filter_properties(filter_props,
hosts[0])
exc_info = test.TestingException('something happened')
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []}}
self.conductor.compute_rpcapi.prep_resize(
self.context, image, inst_obj,
'flavor', hosts[0]['host'], [resvs],
request_spec=request_spec,
filter_properties=expected_filter_props,
node=hosts[0]['nodename'],
clean_shutdown=True).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
filter_props, [resvs],
clean_shutdown=True)
def test_resize_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
flavor_new = flavors.get_flavor_by_name('m1.small')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, sig_mock, vm_st_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor_new, filter_props,
[resvs], clean_shutdown=True)
self.assertIn('resize', nvh.message)
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].refresh().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].refresh()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_utils, 'build_request_spec')
def test_build_instances_info_cache_not_found(self, build_request_spec,
setup_instance_group):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
spec = {'fake': 'specs',
'instance_properties': instances[0]}
build_request_spec.return_value = spec
with contextlib.nested(
mock.patch.object(instances[0], 'refresh',
side_effect=exc.InstanceInfoCacheNotFound(
instance_uuid=instances[0].uuid)),
mock.patch.object(instances[1], 'refresh'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations', return_value=destinations),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
) as (inst1_refresh, inst2_refresh, select_destinations,
build_and_run_instance):
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
# NOTE(sbauza): Due to populate_retry() later in the code,
# filter_properties is dynamically modified
setup_instance_group.assert_called_once_with(
self.context, spec, {'retry': {'num_attempts': 1,
'hosts': []}})
build_and_run_instance.assert_called_once_with(self.context,
instance=instances[1], host='host2', image={'fake-data':
'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=[])
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
|
|
from __future__ import absolute_import
import datetime
import decimal
import pytest
from django import VERSION
from django.core.management import call_command
from .django_app.models import Rabbit, models, Hole, Door, Customer, Simple, Client
from mixer.backend.django import Mixer
@pytest.fixture(autouse=True)
def mixer(request):
call_command('syncdb', interactive=False, verbosity=0)
request.addfinalizer(lambda: call_command('flush', interactive=False, verbosity=0))
return Mixer()
def test_base():
from mixer.backend.django import mixer
simple = mixer.blend('django_app.simple')
assert isinstance(simple.value, int)
def test_fields(mixer):
rabbit = mixer.blend('django_app.rabbit')
assert isinstance(rabbit, Rabbit)
assert rabbit.id
assert rabbit.pk
assert rabbit.pk == 1
assert len(rabbit.title) <= 16
assert isinstance(rabbit.active, bool)
assert isinstance(rabbit.object_id, int)
assert rabbit.object_id >= 0
assert isinstance(rabbit.error_code, int)
assert rabbit.error_code >= 0
assert isinstance(rabbit.created_at, datetime.date)
assert isinstance(rabbit.updated_at, datetime.datetime)
assert isinstance(rabbit.opened_at, datetime.time)
assert '@' in rabbit.email
assert isinstance(rabbit.speed, decimal.Decimal)
assert rabbit.custom
assert rabbit.text
assert len(rabbit.text) <= 512
assert rabbit.picture.read() == b'pylama\n'
assert rabbit.ip.count('.') == 3
for ip_section in rabbit.ip.split('.'):
assert 0 <= int(ip_section) <= 255
assert rabbit.ip6.count(':') == 7
for ip_section in rabbit.ip6.split(':'):
assert 0 <= int(ip_section, 16) <= 65535
assert isinstance(rabbit.file_path, str)
rabbit = mixer.blend('rabbit')
assert rabbit
def test_random_fields():
mixer = Mixer(fake=False)
hat = mixer.blend('django_app.hat', color=mixer.RANDOM)
assert hat.color in ('RD', 'GRN', 'BL')
def test_custom(mixer):
mixer.register(
Rabbit,
title=lambda: 'Mr. Rabbit',
speed=lambda: mixer.faker.small_positive_integer(99))
rabbit = mixer.blend(Rabbit, speed=mixer.RANDOM, percent=23)
assert isinstance(rabbit.speed, decimal.Decimal)
assert isinstance(rabbit.percent, float)
assert rabbit.title == 'Mr. Rabbit'
from mixer.backend.django import GenFactory
def getter(*args, **kwargs):
return "Always same"
class MyFactory(GenFactory):
generators = {models.CharField: getter}
fabric = MyFactory.get_fabric(models.CharField)
assert fabric() == "Always same"
mixer = Mixer(factory=MyFactory, fake=False)
assert mixer._Mixer__factory == MyFactory
test = mixer.blend(Rabbit)
assert test.title == "Always same"
@mixer.middleware('auth.user')
def encrypt_password(user): # noqa
user.set_password(user.password)
return user
user = mixer.blend('auth.User', password='test')
assert user.check_password('test')
user = user.__class__.objects.get(pk=user.pk)
assert user.check_password('test')
def test_select(mixer):
mixer.cycle(3).blend(Rabbit)
hole = mixer.blend(Hole, rabbit=mixer.SELECT)
assert not hole.rabbit
rabbits = Rabbit.objects.all()
hole = mixer.blend(Hole, owner=mixer.SELECT)
assert hole.owner in rabbits
rabbit = rabbits[0]
hole = mixer.blend(Hole, owner=mixer.SELECT(email=rabbit.email))
assert hole.owner == rabbit
def test_relation(mixer):
hat = mixer.blend('django_app.hat')
assert not hat.owner
silk = mixer.blend('django_app.silk')
assert not silk.hat.owner
silk = mixer.blend('django_app.silk', hat__owner__title='booble')
assert silk.hat.owner
assert silk.hat.owner.title == 'booble'
door = mixer.blend('django_app.door', hole__title='flash', hole__size=244)
assert door.hole.owner
assert door.hole.title == 'flash'
assert door.hole.size == 244
door = mixer.blend('django_app.door')
assert door.hole.title != 'flash'
num = mixer.blend('django_app.number', doors=[door])
assert num.doors.get() == door
num = mixer.blend('django_app.number')
assert num.doors.count() == 0
num = mixer.blend('django_app.number', doors__size=42)
assert num.doors.all()[0].size == 42
tag = mixer.blend('django_app.tag', customer=mixer.RANDOM)
assert tag.customer
def test_many_to_many_through(mixer):
pointa = mixer.blend('django_app.pointa', other=mixer.RANDOM)
assert pointa.other.all()
pointb = mixer.blend('pointb')
pointa = mixer.blend('pointa', other=pointb)
assert list(pointa.other.all()) == [pointb]
def test_random(mixer):
user = mixer.blend(
'auth.User', username=mixer.RANDOM('mixer', 'its', 'fun'))
assert user.username in ('mixer', 'its', 'fun')
rabbit = mixer.blend(Rabbit, url=mixer.RANDOM)
assert '/' in rabbit.url
def test_mix(mixer):
test = mixer.blend(Rabbit, title=mixer.MIX.username)
assert test.title == test.username
test = Rabbit.objects.get(pk=test.pk)
assert test.title == test.username
test = mixer.blend(Hole, title=mixer.MIX.owner.title)
assert test.title == test.owner.title
test = mixer.blend(Door, hole__title=mixer.MIX.owner.title)
assert test.hole.title == test.hole.owner.title
test = mixer.blend(Door, hole__title=mixer.MIX.owner.username(
lambda t: t + 's hole'
))
assert test.hole.owner.username in test.hole.title
assert 's hole' in test.hole.title
test = mixer.blend(Door, owner=mixer.MIX.hole.owner)
assert test.owner == test.hole.owner
def test_contrib(mixer):
from django.db import connection
_ = connection.connection.total_changes
assert mixer.blend('auth.user')
assert connection.connection.total_changes - _ == 1
_ = connection.connection.total_changes
assert mixer.blend(Customer)
assert connection.connection.total_changes - _ == 2
def test_invalid_scheme(mixer):
with pytest.raises(ValueError):
mixer.blend('django_app.Unknown')
@pytest.mark.skipif(
VERSION >= (1, 8, 0),
reason='Django 1.8 prevents unsaved model instances from being assigned to a ForeignKey')
def test_ctx(mixer):
with mixer.ctx(commit=False):
hole = mixer.blend(Hole)
assert hole
assert not Hole.objects.count()
with mixer.ctx(commit=True):
hole = mixer.blend(Hole)
assert hole
assert Hole.objects.count()
def test_skip(mixer):
rabbit = mixer.blend(Rabbit, created_at=mixer.SKIP, title=mixer.SKIP)
assert rabbit.created_at
assert not rabbit.title
def test_generic(mixer):
rabbit = mixer.blend(Rabbit)
assert rabbit.content_type
assert rabbit.content_type.model_class()
obj = mixer.blend(Simple)
with mixer.ctx(loglevel='DEBUG'):
rabbit = mixer.blend(Rabbit, content_object=obj)
assert rabbit.content_object == obj
assert rabbit.object_id == obj.pk
assert rabbit.content_type.model_class() == Simple
def test_deffered(mixer):
simples = mixer.cycle(3).blend(Simple)
rabbits = mixer.cycle(3).blend(
Rabbit, content_object=(s for s in simples)
)
assert rabbits
rabbit = rabbits[0]
rabbit = rabbit.__class__.objects.get(pk=rabbit.pk)
assert rabbit.content_object
def test_unique(mixer):
for _ in range(100):
mixer.blend(Client)
def test_guard(mixer):
r1 = mixer.guard(username='maxi').blend(Rabbit, username='maxi')
r2 = mixer.guard(username='maxi').blend(Rabbit, username='maxi')
assert r1
assert r1 == r2
def test_reload(mixer):
r1 = mixer.blend(Rabbit)
r1.title = 'wrong title'
r2 = mixer.reload(r1)
assert r2 == r1
assert r2.title != r1.title
s1 = mixer.blend(Simple)
r2, s2 = mixer.reload(r1, s1)
assert s1 == s2
|
|
#!/usr/bin/env python
# Convert line elements with overlapping endpoints into polylines in an
# SVG file.
import os
import sys
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
from collections import defaultdict
from optparse import OptionParser
SVG_NS = 'http://www.w3.org/2000/svg'
START = 1
END = 2
class Line(object):
def __init__(self, line_element):
a = line_element.attrib
self.x1 = float(a['x1'])
self.y1 = float(a['y1'])
self.x2 = float(a['x2'])
self.y2 = float(a['y2'])
self.strokeWidth = float(a['stroke-width'])
def reverse(self):
self.x1, self.x2 = self.x2, self.x1
self.y1, self.y2 = self.y2, self.y1
def start_hash(self):
return str(self.x1) + ',' + str(self.y1)
def end_hash(self):
return str(self.x2) + ',' + str(self.y2)
def endpoint(self, direction):
if direction == START:
return self.start_hash()
else:
return self.end_hash()
def get_other_hash(self, key):
h = self.start_hash()
if h == key:
h = self.end_hash()
return h
def __repr__(self):
return '((%s,%s),(%s,%s),sw:%s)' % (self.x1, self.y1,
self.x2, self.y2,
self.strokeWidth)
class EndpointHash(object):
def __init__(self, lines):
self.endpoints = defaultdict(list)
for l in lines:
self.endpoints[l.start_hash()].append(l)
self.endpoints[l.end_hash()].append(l)
def count_overlapping_points(self):
count = 0
for key, lines in self.endpoints.iteritems():
l = len(lines)
if l > 1:
count += 1
return count
def _del_line(self, key, line):
self.endpoints[key].remove(line)
if len(self.endpoints[key]) == 0:
del self.endpoints[key]
def remove_line(self, line):
key = line.start_hash()
self._del_line(key, line)
self._del_line(line.get_other_hash(key), line)
def pop_connected_line(self, line, key):
if key in self.endpoints:
line = self.endpoints[key][0]
self.remove_line(line)
return line
else:
return
def parse_svg(fname):
print "Parsing '%s'..." % (fname)
return etree.parse(fname)
def get_lines(svg):
lines = []
for l in svg.getroot().iter('{%s}line' % SVG_NS):
lines.append(Line(l))
return lines
def align_lines(l1, l2):
if ( l1.x1 == l2.x1 and l1.y1 == l2.y1
or l1.x2 == l2.x2 and l1.y2 == l2.y2):
l2.reverse()
def connect_lines(lines, endpoint_hash, line, direction, poly):
while True:
key = line.endpoint(direction)
connected_line = endpoint_hash.pop_connected_line(line, key)
if connected_line:
if direction == START:
poly.insert(0, connected_line)
else:
poly.append(connected_line)
align_lines(line, connected_line)
lines.remove(connected_line)
line = connected_line
else:
break
def find_polylines(lines, endpoint_hash):
polylines = []
while lines:
line = lines.pop()
endpoint_hash.remove_line(line)
poly = [line]
connect_lines(lines, endpoint_hash, line, START, poly)
connect_lines(lines, endpoint_hash, line, END, poly)
polylines.append(poly)
return polylines
def optimize(svg):
lines = get_lines(svg)
print '%s line segments found' % len(lines)
lines_by_width = defaultdict(list)
for l in lines:
lines_by_width[l.strokeWidth].append(l)
del lines
print '%s different stroke widths found:' % len(lines_by_width)
for width, lines in lines_by_width.iteritems():
print ' strokeWidth: %s (%s lines)' % (width, len(lines))
polylines = []
for width, lines in lines_by_width.iteritems():
print 'Finding polylines (strokeWidth: %s)... ' % width
endpoint_hash = EndpointHash(lines)
overlapping_points = endpoint_hash.count_overlapping_points()
print (' %s line segments, %s overlapping points'
% (len(lines), overlapping_points)),
p = find_polylines(lines, endpoint_hash)
print '-> %s polylines' % len(p)
polylines += p
return polylines
def write_svg(polylines, outfile):
print "Writing '%s'..." % outfile
f = open(outfile, 'w')
f.write("""<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg width="100%" height="100%" xmlns="http://www.w3.org/2000/svg" version="1.1">
""")
def point_to_str(x, y):
return '%s,%s ' % (x, y)
for p in polylines:
points = []
for line in p:
if not points:
points.append(point_to_str(line.x1, line.y1))
points.append(point_to_str(line.x2, line.y2))
f.write('<polyline fill="none" stroke="#000" stroke-width="%s" points="%s"/>\n'
% (p[0].strokeWidth, ' '.join(points)))
f.write('</svg>\n')
f.close()
def get_filesize(fname):
return os.stat(fname).st_size
def print_size_stats(infile, outfile):
insize = get_filesize(infile)
outsize = get_filesize(outfile)
print ('Original file size: %.2fKiB, new file size: %.2fKiB (%.2f)'
% (insize / 1024., outsize / 1024., float(outsize) / insize * 100))
def main():
usage = 'Usage: %prog INFILE OUTFILE'
parser = OptionParser(usage=usage)
options, args = parser.parse_args()
if len(args) < 2:
parser.error('input and output files must be specified')
return 2
infile = args[0]
outfile = args[1]
svg = parse_svg(infile)
polylines = optimize(svg)
print '%s polyline(s) found in total' % len(polylines)
write_svg(polylines, outfile)
print_size_stats(infile, outfile)
return 0
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
|
|
# -*- coding: utf-8 -*-
"""
In-memory data structures to hold the IR. We use a flow-graph of Operations.
An operation is
"""
from __future__ import print_function, division, absolute_import
from itertools import chain
from collections import defaultdict
from pykit import error, types
from pykit.adt import LinkedList
from pykit.ir import ops
from pykit.ir.pretty import pretty
from pykit.utils import (flatten, nestedmap, match, Delegate, traits, listify,
make_temper)
class Value(object):
__str__ = pretty
class Module(Value):
"""
A module containing global values and functions. This defines the scope
of functions that can see each other.
globals: { global_name: GlobalValue }
functions: { func_name : Function }
"""
def __init__(self, globals=None, functions=None, temper=None):
self.globals = globals or {}
self.functions = functions or {}
self.temp = temper or make_temper()
for value in chain(self.globals.values(), self.functions.values()):
assert value.parent is None, (value, value.parent)
value.parent = self
def add_function(self, function):
assert function.name not in self.functions, function.name
self.functions[function.name] = function
function.module = self
def add_global(self, globalvalue):
assert globalvalue.name not in self.globals, globalvalue.name
self.globals[globalvalue.name] = globalvalue
globalvalue.module = self
def get_function(self, funcname):
return self.functions.get(funcname)
def get_global(self, gvname):
return self.globals.get(gvname)
class Function(Value):
"""
Function consisting of basic blocks.
Attributes
----------
module: Module
Module owning the function
name:
name of the function
args: [FuncArg]
argnames:
argument names ([str])
blocks:
List of basic blocks in topological order
startblock: Block
The entry basic block
exitblock: Block
The last block in the list. This will only be the actual 'exit block'
if the function is actually populated and has an exit block.
values: { op_name: Operation }
uses: { Operation : [Operation] }
Operations that refer to this operation in their 'args' list
temp: function, name -> tempname
allocate a temporary name
"""
def __init__(self, name, argnames, type, temper=None):
self.module = None
self.name = name
self.type = type
self.temp = temper or make_temper()
self.blocks = LinkedList()
self.blockmap = dict((block.name, block) for block in self.blocks)
self.argnames = argnames
self.argdict = {}
self.uses = defaultdict(set)
# reserve names
for argname in argnames:
self.temp(argname)
@property
def args(self):
return [self.get_arg(argname) for argname in self.argnames]
@property
def startblock(self):
return self.blocks.head
@property
def exitblock(self):
return self.blocks.tail
@property
def ops(self):
"""Get a flat iterable of all Ops in this function"""
return chain(*self.blocks)
def new_block(self, label, ops=None, after=None):
"""Create a new block with name `label` and append it"""
assert label not in self.blockmap, label
label = self.temp(label)
return self.add_block(Block(label, self, ops), after)
def add_block(self, block, after=None):
"""Add a Block at the end, or after `after`"""
if block.parent is None:
block.parent = self
else:
assert block.parent is self
self.blockmap[block.name] = block
if after is None:
self.blocks.append(block)
else:
self.blocks.insert_after(block, after)
return block
def get_block(self, label):
return self.blockmap[label]
def del_block(self, block):
self.blocks.remove(block)
del self.blockmap[block.name]
def get_arg(self, argname):
"""Get argument as a Value"""
if argname in self.argdict:
return self.argdict[argname]
idx = self.argnames.index(argname)
type = self.type.argtypes[idx]
arg = FuncArg(self, argname, type)
self.argdict[argname] = arg
return arg
@property
def result(self):
"""We are a first-class value..."""
return self.name
# ______________________________________________________________________
# uses
def add_op(self, op):
"""
Register a new Op as part of the function.
Does NOT insert the Op in any basic block
"""
_add_args(self.uses, op, op.args)
def reset_uses(self):
from pykit.analysis import defuse
self.uses = defuse.defuse(self)
# ______________________________________________________________________
def __repr__(self):
return "FunctionGraph(%s)" % self.blocks
class GlobalValue(Value):
"""
GlobalValue in a Module.
"""
def __init__(self, name, type, external=False, address=None, value=None):
self.module = None
self.name = name
self.type = type
self.external = external
self.address = address
self.value = value
@property
def result(self):
"""We are a first-class value..."""
return self.name
@traits
class Block(Value):
"""
Basic block of Operations.
name: Name of block (unique within function)
parent: Function owning block
"""
head, tail = Delegate('ops'), Delegate('ops')
_prev, _next = None, None # LinkedList
def __init__(self, name, parent=None, ops=None):
self.name = name
self.parent = parent
self.ops = LinkedList(ops or [])
@property
def opcodes(self):
"""Returns [opcode] for all operations in the block"""
for op in self.ops:
yield op.opcode
@property
def optypes(self):
"""Returns [type] for all operations in the block"""
for op in self.ops:
yield op.type
def __iter__(self):
return iter(self.ops)
def append(self, op):
"""Append op to block"""
self.ops.append(op)
op.parent = self
self.parent.add_op(op)
def extend(self, ops):
"""Extend block with ops"""
for op in ops:
self.append(op)
@property
def result(self):
"""We are a first-class value..."""
return self.name
@property
@listify
def leaders(self):
"""
Return an iterator of basic block leaders
"""
for op in self.ops:
if ops.is_leader(op.opcode):
yield op
else:
break
@property
def terminator(self):
"""Block Op in block, which needs to be a terminator"""
assert self.is_terminated(), self.ops.tail
return self.ops.tail
def is_terminated(self):
"""Returns whether the block is terminated"""
return self.ops.tail and ops.is_terminator(self.ops.tail.opcode)
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return "Block(%s)" % self.name
class Local(Value):
"""
Local value in a Function. This is either a FuncArg or an Operation.
Constants do not belong to any function.
"""
@property
def function(self):
"""The Function owning this local value"""
raise NotImplementedError
def replace_uses(self, dst):
"""
Replace all uses of `self` with `dst`. This does not invalidate this
Operation!
"""
src = self
# Replace src with dst in use sites
for use in set(self.function.uses[src]):
def replace(op):
if op == src:
return dst
return op
newargs = nestedmap(replace, use.args)
use.set_args(newargs)
class FuncArg(Local):
"""
Argument to the function. Use Function.get_arg()
"""
def __init__(self, func, name, type):
self.parent = func
self.opcode = 'arg'
self.type = type
self.result = name
@property
def function(self):
return self.parent
def __repr__(self):
return "FuncArg(%%%s)" % self.result
class Operation(Local):
"""
Typed n-ary operation with a result. E.g.
%0 = add(%a, %b)
Attributes:
-----------
opcode:
ops.* opcode, e.g. "getindex"
type: types.Type
Result type of applying this operation
args:
(one level nested) list of argument Values
operands:
symbolic operands, e.g. ['%0'] (virtual registers)
result:
symbol result, e.g. '%0'
args:
Operand values, e.g. [Operation("getindex", ...)
"""
# __slots__ = ("parent", "opcode", "type", "args", "result", "metadata",
# "_prev", "_next")
def __init__(self, opcode, type, args, result=None, parent=None):
self.parent = parent
self.opcode = opcode
self.type = type
self._args = args
self.result = result
self.metadata = None
self._prev = None
self._next = None
@property
def uses(self):
"Enumerate all Operations referring to this value"
return self.function.uses[self]
@property
def args(self):
"""Operands to this Operation (readonly)"""
return self._args
# ______________________________________________________________________
# Placement
def insert_before(self, op):
"""Insert self before op"""
assert self.parent is None, op
self.parent = op.parent
self.parent.ops.insert_before(self, op)
self.function.add_op(self)
def insert_after(self, op):
"""Insert self after op"""
assert self.parent is None, self
self.parent = op.parent
self.parent.ops.insert_after(self, op)
self.function.add_op(self)
# ______________________________________________________________________
# Replace
def replace_op(self, opcode, args, type=None):
"""Replace this operation's opcode, args and optionally type"""
# Replace ourselves inplace
self.opcode = opcode
self.set_args(args)
if type is not None:
self.type = type
def replace_args(self, replacements):
"""
Replace arguments listed in the `replacements` dict. The replacement
instructions must dominate this instruction.
"""
if replacements:
newargs = nestedmap(lambda arg: replacements.get(arg, arg), self.args)
self.set_args(newargs)
@match
def replace(self, op):
"""
Replace this operation with a new operation, changing this operation.
"""
assert op.result is not None and op.result == self.result
self.replace_op(op.opcode, op.args, op.type)
@replace.case(op=list)
def replace_list(self, op):
"""
Replace this Op with a list of other Ops. If no Op has the same
result as this Op, the Op is deleted:
>>> print block
%0 = ...
>>> print [op0, op1, op2]
[%0 = ..., %1 = ..., %2 = ...]
>>> op0.replace_with([op1, op0, op2])
>>> print block
%1 = ...
%0 = ...
%2 = ...
>>> op0.replace_with([op3, op4])
%1 = ...
%3 = ...
%4 = ...
%2 = ...
"""
lst = self._set_registers(*op)
for i, op in enumerate(lst):
if op.result == self.result:
break
op.insert_before(self)
else:
self.delete()
return
self.replace(op)
last = op
for op in lst[i + 1:]:
op.insert_after(last)
last = op
# ______________________________________________________________________
def set_args(self, args):
"""Set a new argslist"""
_del_args(self.function.uses, self, self.args)
_add_args(self.function.uses, self, args)
self._args = args
# ______________________________________________________________________
def delete(self):
"""Delete this operation"""
if self.uses:
raise error.IRError(
"Operation %s is still in use and cannot be deleted" % (self,))
_del_args(self.function.uses, self, self.args)
self.unlink()
self.result = None
def unlink(self):
"""Unlink from the basic block"""
self.parent.ops.remove(self)
self.parent = None
# ______________________________________________________________________
def add_metadata(self, metadata):
if self.metadata is None:
self.metadata = metadata
else:
self.metadata.update(metadata)
@property
def function(self):
return self.parent.parent
@property
def block(self):
"""Containing block"""
return self.parent
@property
def operands(self):
"""
Operands to this operation, in the form of args with symbols
and constants.
>>> print Op("mul", Int32, [op_a, op_b]).operands
['a', 'b']
"""
non_constants = (Block, Operation, FuncArg, GlobalValue)
result = lambda x: x.result if isinstance(x, non_constants) else x
return nestedmap(result, self.args)
@property
def symbols(self):
"""Set of symbolic register operands"""
return [x for x in flatten(self.operands)]
# ______________________________________________________________________
def _set_registers(self, *ops):
"Set virtual register names if unset for each Op in ops"
for op in ops:
if not op.result:
op.result = self.function.temp()
return ops
# ______________________________________________________________________
def __repr__(self):
if self.result:
return "%s = %s(%s)" % (self.result, self.opcode,
repr(self.operands))
return "%s(%s)" % (self.opcode, repr(self.operands))
def __iter__(self):
return iter((self.result, self.type, self.opcode, self.args))
def _add_args(uses, newop, args):
"Update uses when a new instruction is inserted"
def add(arg):
if isinstance(arg, (Op, FuncArg, Block)):
uses[arg].add(newop)
nestedmap(add, args)
def _del_args(uses, oldop, args):
"Delete uses when an instruction is removed"
seen = set() # Guard against duplicates in 'args'
def remove(arg):
if isinstance(arg, Operation) and arg not in seen:
uses[arg].remove(oldop)
seen.add(arg)
nestedmap(remove, args)
class Constant(Value):
"""
Constant value.
"""
def __init__(self, pyval, type=None):
self.opcode = ops.constant
self.type = type or types.typeof(pyval)
self.args = [pyval]
self.result = None
def replace_op(self, opcode, args, type=None):
raise RuntimeError("Constants cannot be replaced")
def replace(self, newop):
raise RuntimeError("Constants cannot be replaced")
@property
def const(self):
const, = self.args
return const
def __repr__(self):
return "constant(%s)" % (self.const,)
class Undef(Value):
"""Undefined value"""
def __init__(self, type):
self.type = type
def __eq__(self, other):
return isinstance(other, Undef) and self.type == other.type
def __hash__(self):
return hash(type(self))
Op = Operation
Const = Constant
|
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adversarial training to learn trivial encryption functions,
from the paper "Learning to Protect Communications with
Adversarial Neural Cryptography", Abadi & Andersen, 2016.
https://arxiv.org/abs/1610.06918
This program creates and trains three neural networks,
termed Alice, Bob, and Eve. Alice takes inputs
in_m (message), in_k (key) and outputs 'ciphertext'.
Bob takes inputs in_k, ciphertext and tries to reconstruct
the message.
Eve is an adversarial network that takes input ciphertext
and also tries to reconstruct the message.
The main function attempts to train these networks and then
evaluates them, all on random plaintext and key values.
"""
# TensorFlow Python 3 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import signal
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_float('learning_rate', 0.0008, 'Constant learning rate')
flags.DEFINE_integer('batch_size', 4096, 'Batch size')
FLAGS = flags.FLAGS
# Input and output configuration.
TEXT_SIZE = 16
KEY_SIZE = 16
# Training parameters.
ITERS_PER_ACTOR = 1
EVE_MULTIPLIER = 2 # Train Eve 2x for every step of Alice/Bob
# Train until either max loops or Alice/Bob "good enough":
MAX_TRAINING_LOOPS = 1000
BOB_LOSS_THRESH = 0.02 # Exit when Bob loss < 0.02 and Eve > 7.7 bits
EVE_LOSS_THRESH = 7.7
BOB_LOSS_THRESH1 = 0.04 # Exit when Bob loss < 0.04 and Eve > 7.5 bits
EVE_LOSS_THRESH2 = 7.5
# Logging and evaluation.
PRINT_EVERY = 200 # In training, log every 200 steps.
EVE_EXTRA_ROUNDS = 2000 # At end, train eve a bit more.
RETRAIN_EVE_ITERS = 10000 # Retrain eve up to ITERS*LOOPS times.
RETRAIN_EVE_LOOPS = 25 # With an evaluation each loop
NUMBER_OF_EVE_RESETS = 5 # And do this up to 5 times with a fresh eve.
# Use EVAL_BATCHES samples each time we check accuracy.
EVAL_BATCHES = 1
def batch_of_random_bools(batch_size, n):
"""Return a batch of random "boolean" numbers.
Args:
batch_size: Batch size dimension of returned tensor.
n: number of entries per batch.
Returns:
A [batch_size, n] tensor of "boolean" numbers, where each number is
preresented as -1 or 1.
"""
as_int = tf.random_uniform(
[batch_size, n], minval=0, maxval=2, dtype=tf.int32)
expanded_range = (as_int * 2) - 1
return tf.cast(expanded_range, tf.float32)
class AdversarialCrypto(object):
"""Primary model implementation class for Adversarial Neural Crypto.
This class contains the code for the model itself,
and when created, plumbs the pathways from Alice to Bob and
Eve, creates the optimizers and loss functions, etc.
Attributes:
eve_loss: Eve's loss function.
bob_loss: Bob's loss function. Different units from eve_loss.
eve_optimizer: A tf op that runs Eve's optimizer.
bob_optimizer: A tf op that runs Bob's optimizer.
bob_reconstruction_loss: Bob's message reconstruction loss,
which is comparable to eve_loss.
reset_eve_vars: Execute this op to completely reset Eve.
"""
def get_message_and_key(self):
"""Generate random pseudo-boolean key and message values."""
batch_size = tf.placeholder_with_default(FLAGS.batch_size, shape=[])
in_m = batch_of_random_bools(batch_size, TEXT_SIZE)
in_k = batch_of_random_bools(batch_size, KEY_SIZE)
return in_m, in_k
def model(self, collection, message, key=None):
"""The model for Alice, Bob, and Eve. If key=None, the first FC layer
takes only the message as inputs. Otherwise, it uses both the key
and the message.
Args:
collection: The graph keys collection to add new vars to.
message: The input message to process.
key: The input key (if any) to use.
"""
if key is not None:
combined_message = tf.concat(axis=1, values=[message, key])
else:
combined_message = message
# Ensure that all variables created are in the specified collection.
with tf.contrib.framework.arg_scope(
[tf.contrib.layers.fully_connected, tf.contrib.layers.conv2d],
variables_collections=[collection]):
fc = tf.contrib.layers.fully_connected(
combined_message,
TEXT_SIZE + KEY_SIZE,
biases_initializer=tf.constant_initializer(0.0),
activation_fn=None)
# Perform a sequence of 1D convolutions (by expanding the message out to 2D
# and then squeezing it back down).
fc = tf.expand_dims(fc, 2)
# 2,1 -> 1,2
conv = tf.contrib.layers.conv2d(
fc, 2, 2, 2, 'SAME', activation_fn=tf.nn.sigmoid)
# 1,2 -> 1, 2
conv = tf.contrib.layers.conv2d(
conv, 2, 1, 1, 'SAME', activation_fn=tf.nn.sigmoid)
# 1,2 -> 1, 1
conv = tf.contrib.layers.conv2d(
conv, 1, 1, 1, 'SAME', activation_fn=tf.nn.tanh)
conv = tf.squeeze(conv, 2)
return conv
def __init__(self):
in_m, in_k = self.get_message_and_key()
encrypted = self.model('alice', in_m, in_k)
decrypted = self.model('bob', encrypted, in_k)
eve_out = self.model('eve', encrypted, None)
self.reset_eve_vars = tf.group(
*[w.initializer for w in tf.get_collection('eve')])
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
# Eve's goal is to decrypt the entire message:
eve_bits_wrong = tf.reduce_sum(
tf.abs((eve_out + 1.0) / 2.0 - (in_m + 1.0) / 2.0), [1])
self.eve_loss = tf.reduce_sum(eve_bits_wrong)
self.eve_optimizer = optimizer.minimize(
self.eve_loss, var_list=tf.get_collection('eve'))
# Alice and Bob want to be accurate...
self.bob_bits_wrong = tf.reduce_sum(
tf.abs((decrypted + 1.0) / 2.0 - (in_m + 1.0) / 2.0), [1])
# ... and to not let Eve do better than guessing.
self.bob_reconstruction_loss = tf.reduce_sum(self.bob_bits_wrong)
bob_eve_error_deviation = tf.abs(float(TEXT_SIZE) / 2.0 - eve_bits_wrong)
# 7-9 bits wrong is OK too, so we squish the error function a bit.
# Without doing this, we often tend to hang out at 0.25 / 7.5 error,
# and it seems bad to have continued, high communication error.
bob_eve_loss = tf.reduce_sum(
tf.square(bob_eve_error_deviation) / (TEXT_SIZE / 2)**2)
# Rescale the losses to [0, 1] per example and combine.
self.bob_loss = (self.bob_reconstruction_loss / TEXT_SIZE + bob_eve_loss)
self.bob_optimizer = optimizer.minimize(
self.bob_loss,
var_list=(tf.get_collection('alice') + tf.get_collection('bob')))
def doeval(s, ac, n, itercount):
"""Evaluate the current network on n batches of random examples.
Args:
s: The current TensorFlow session
ac: an instance of the AdversarialCrypto class
n: The number of iterations to run.
itercount: Iteration count label for logging.
Returns:
Bob and eve's loss, as a percent of bits incorrect.
"""
bob_loss_accum = 0
eve_loss_accum = 0
for _ in xrange(n):
bl, el = s.run([ac.bob_reconstruction_loss, ac.eve_loss])
bob_loss_accum += bl
eve_loss_accum += el
bob_loss_percent = bob_loss_accum / (n * FLAGS.batch_size)
eve_loss_percent = eve_loss_accum / (n * FLAGS.batch_size)
print('%d %.2f %.2f' % (itercount, bob_loss_percent, eve_loss_percent))
sys.stdout.flush()
return bob_loss_percent, eve_loss_percent
def train_until_thresh(s, ac):
for j in xrange(MAX_TRAINING_LOOPS):
for _ in xrange(ITERS_PER_ACTOR):
s.run(ac.bob_optimizer)
for _ in xrange(ITERS_PER_ACTOR * EVE_MULTIPLIER):
s.run(ac.eve_optimizer)
if j % PRINT_EVERY == 0:
bob_avg_loss, eve_avg_loss = doeval(s, ac, EVAL_BATCHES, j)
if (bob_avg_loss < BOB_LOSS_THRESH and eve_avg_loss > EVE_LOSS_THRESH):
print('Target losses achieved.')
return True
return False
def train_and_evaluate():
"""Run the full training and evaluation loop."""
ac = AdversarialCrypto()
init = tf.global_variables_initializer()
with tf.Session() as s:
s.run(init)
print('# Batch size: ', FLAGS.batch_size)
print('# Iter Bob_Recon_Error Eve_Recon_Error')
if train_until_thresh(s, ac):
for _ in xrange(EVE_EXTRA_ROUNDS):
s.run(ac.eve_optimizer)
print('Loss after eve extra training:')
doeval(s, ac, EVAL_BATCHES * 2, 0)
for _ in xrange(NUMBER_OF_EVE_RESETS):
print('Resetting Eve')
s.run(ac.reset_eve_vars)
eve_counter = 0
for _ in xrange(RETRAIN_EVE_LOOPS):
for _ in xrange(RETRAIN_EVE_ITERS):
eve_counter += 1
s.run(ac.eve_optimizer)
doeval(s, ac, EVAL_BATCHES, eve_counter)
doeval(s, ac, EVAL_BATCHES, eve_counter)
def main(unused_argv):
# Exit more quietly with Ctrl-C.
signal.signal(signal.SIGINT, signal.SIG_DFL)
train_and_evaluate()
if __name__ == '__main__':
tf.app.run()
|
|
import time
import unittest
from exonum_client import ExonumClient
from exonum_client.crypto import KeyPair
from exonum_launcher.action_result import ActionResult
from exonum_launcher.configuration import Configuration
from exonum_launcher.explorer import ExecutionFailError
from exonum_launcher.launcher import Launcher
from suite import (
assert_processes_exited_successfully,
ExonumCryptoAdvancedClient,
generate_config,
generate_migration_config,
run_4_nodes,
wait_network_to_start,
)
INSTANCE_NAME = "cryptocurrency"
class MigrationTests(unittest.TestCase):
"""Tests for a checking service migration mechanism."""
def setUp(self):
self.network = run_4_nodes("cryptocurrency-migration")
self.addCleanup(self._tear_down, False)
wait_network_to_start(self.network)
def wait_for_api_restart(self):
"""Waits until the API servers of nodes are restarted after the set
of active services has changed."""
time.sleep(0.25)
wait_network_to_start(self.network)
def full_migration_flow(self, action: str):
host, public_port, private_port = self.network.api_address(0)
client = ExonumClient(host, public_port, private_port)
# Deploy a service with 0.2.0 version.
instances = {INSTANCE_NAME: {"artifact": "cryptocurrency"}}
config_dict = generate_config(self.network, instances=instances)
deploy_config = Configuration(config_dict)
with Launcher(deploy_config) as launcher:
launcher.deploy_all()
launcher.wait_for_deploy()
self.wait_for_api_restart()
explorer = launcher.explorer()
for artifact in launcher.launch_state.completed_deployments():
deployed = explorer.is_deployed(artifact)
self.assertTrue(deployed)
# Create Alice's wallet with 0.1.0 version of the service
alice_keys = self._create_wallet(client, "Alice", "0.1.0")
# Stop the working service with version 0.1.0.
instances = {INSTANCE_NAME: {"artifact": "cryptocurrency", "action": action}}
stop_config_dict = generate_config(
self.network, instances=instances, artifact_action="none", artifact_version="0.1.0"
)
stop_config = Configuration(stop_config_dict)
with Launcher(stop_config) as launcher:
launcher.start_all()
launcher.wait_for_start()
self.wait_for_api_restart()
# Check that the service status has been changed to `stopped`.
for service in client.public_api.available_services().json()["services"]:
if service["spec"]["name"] == INSTANCE_NAME:
self.assertEqual(service["status"]["type"], "stopped" if action == "stop" else "frozen")
# Migrate service data from 0.1.0 to 0.2.0 version
migrations = {INSTANCE_NAME: {"runtime": "rust", "name": "exonum-cryptocurrency", "version": "0.2.0"}}
migrations_dict = generate_migration_config(self.network, migrations)
migration_config = Configuration(migrations_dict)
with Launcher(migration_config) as launcher:
launcher.migrate_all()
launcher.wait_for_migration()
for service in client.public_api.available_services().json()["services"]:
if service["spec"]["name"] == INSTANCE_NAME:
self.assertEqual(service["data_version"], "0.2.0")
# Switch service artifact from 0.1.0 to 0.2.0 version
with Launcher(migration_config) as launcher:
launcher.migrate_all()
launcher.wait_for_migration()
for service in client.public_api.available_services().json()["services"]:
if service["spec"]["name"] == INSTANCE_NAME:
self.assertEqual(service["spec"]["artifact"]["version"], "0.2.0")
# Resume service with a new logic version 0.2.0
instances = {INSTANCE_NAME: {"artifact": "cryptocurrency", "action": "resume"}}
resume_config_dict = generate_config(self.network, instances=instances, artifact_action="none")
resume_config = Configuration(resume_config_dict)
with Launcher(resume_config) as launcher:
launcher.start_all()
launcher.wait_for_start()
self.wait_for_api_restart()
# Check that the service status has been changed to `active`.
for service in client.public_api.available_services().json()["services"]:
if service["spec"]["name"] == INSTANCE_NAME:
self.assertEqual(service["status"]["type"], "active")
self.assertEqual(service["spec"]["artifact"]["version"], "0.2.0")
# Unload artifact with version 0.1.0
unload_config_dict = generate_config(
self.network, instances=instances, artifact_action="unload", artifact_version="0.1.0"
)
unload_config = Configuration(unload_config_dict)
with Launcher(unload_config) as launcher:
launcher.unload_all()
launcher.wait_for_unload()
self.wait_for_api_restart()
explorer = launcher.explorer()
for artifact in unload_config.artifacts.values():
deployed = explorer.is_deployed(artifact)
self.assertFalse(deployed)
# Create Bob's wallet with version 0.2.0 of the service.
bob_keys = self._create_wallet(client, "Bob", "0.2.0")
# Transfer some coins and check balances and history length.
with ExonumCryptoAdvancedClient(client, instance_name=INSTANCE_NAME) as crypto_client:
alice_balance = crypto_client.get_balance(alice_keys)
self.assertEqual(alice_balance, 100)
alice_history_len = crypto_client.get_history_len(alice_keys)
self.assertEqual(alice_history_len, 0)
bob_balance = crypto_client.get_balance(bob_keys)
self.assertEqual(bob_balance, 100)
crypto_client.transfer(20, alice_keys, bob_keys.public_key)
with client.create_subscriber("transactions") as subscriber:
subscriber.wait_for_new_event()
alice_balance = crypto_client.get_balance(alice_keys)
self.assertEqual(alice_balance, 80)
# Get a value from the new field `history_len`.
alice_history_len = crypto_client.get_history_len(alice_keys)
self.assertEqual(alice_history_len, 1)
bob_balance = crypto_client.get_balance(bob_keys)
self.assertEqual(bob_balance, 120)
def test_full_migration_flow_with_stopped_service(self):
"""Tests full service migration flow with stopped service."""
self.full_migration_flow("stop")
def test_full_migration_flow_with_frozen_service(self):
"""Tests full service migration flow with frozen service."""
self.full_migration_flow("freeze")
def test_migrate_running_service(self):
"""Tests migration flow when the migrating service is running."""
# Deploy a service with 0.2.0 version.
instances = {INSTANCE_NAME: {"artifact": "cryptocurrency"}}
config_dict = generate_config(self.network, instances=instances)
deploy_config = Configuration(config_dict)
with Launcher(deploy_config) as launcher:
launcher.deploy_all()
launcher.wait_for_deploy()
self.wait_for_api_restart()
explorer = launcher.explorer()
for artifact in launcher.launch_state.completed_deployments():
deployed = explorer.is_deployed(artifact)
self.assertTrue(deployed)
# Migrate service data from 0.1.0 to 0.2.0 version
migrations = {INSTANCE_NAME: {"runtime": "rust", "name": "exonum-cryptocurrency", "version": "0.2.0"}}
migrations_dict = generate_migration_config(self.network, migrations)
migration_config = Configuration(migrations_dict)
with Launcher(migration_config) as launcher:
launcher.migrate_all()
launcher.wait_for_migration()
for instance, (status, message) in launcher.launch_state.completed_migrations().items():
if instance == INSTANCE_NAME:
self.assertEqual(status, ActionResult.Fail)
self.assertIn("is not stopped or frozen", message)
def test_migration_without_switching_artifact(self):
"""Tests migration flow without migration logic stage."""
host, public_port, private_port = self.network.api_address(0)
client = ExonumClient(host, public_port, private_port)
# Deploy a service with 0.2.0 version.
instances = {INSTANCE_NAME: {"artifact": "cryptocurrency"}}
config_dict = generate_config(self.network, instances=instances)
deploy_config = Configuration(config_dict)
with Launcher(deploy_config) as launcher:
launcher.deploy_all()
launcher.wait_for_deploy()
self.wait_for_api_restart()
explorer = launcher.explorer()
for artifact in launcher.launch_state.completed_deployments():
deployed = explorer.is_deployed(artifact)
self.assertTrue(deployed)
# Stop the working service with version 0.1.0.
instances = {INSTANCE_NAME: {"artifact": "cryptocurrency", "action": "stop"}}
stop_config_dict = generate_config(
self.network, instances=instances, artifact_action="none", artifact_version="0.1.0"
)
stop_config = Configuration(stop_config_dict)
with Launcher(stop_config) as launcher:
launcher.start_all()
launcher.wait_for_start()
self.wait_for_api_restart()
# Check that the service status has been changed to `stopped`.
for service in client.public_api.available_services().json()["services"]:
if service["spec"]["name"] == INSTANCE_NAME:
self.assertEqual(service["status"]["type"], "stopped")
# Migrate service data from 0.1.0 to 0.2.0 version
migrations = {INSTANCE_NAME: {"runtime": "rust", "name": "exonum-cryptocurrency", "version": "0.2.0"}}
migrations_dict = generate_migration_config(self.network, migrations)
migration_config = Configuration(migrations_dict)
with Launcher(migration_config) as launcher:
launcher.migrate_all()
launcher.wait_for_migration()
for service in client.public_api.available_services().json()["services"]:
if service["spec"]["name"] == INSTANCE_NAME:
self.assertEqual(service["data_version"], "0.2.0")
# Try to resume the service without a new logic migration to version 0.2.0
instances = {INSTANCE_NAME: {"artifact": "cryptocurrency", "action": "resume"}}
resume_config_dict = generate_config(self.network, instances=instances, artifact_action="none")
resume_config = Configuration(resume_config_dict)
with Launcher(resume_config) as launcher:
launcher.start_all()
with self.assertRaises(ExecutionFailError) as e:
launcher.wait_for_start()
self.assertIn(
f"Service `{INSTANCE_NAME}` has data version (0.2.0) differing from its artifact version", e
)
def test_unload_artifact_of_running_service(self):
"""Tests unload logic when running service references to an artifact."""
# Deploy a service with 0.2.0 version.
instances = {INSTANCE_NAME: {"artifact": "cryptocurrency"}}
config_dict = generate_config(self.network, instances=instances)
deploy_config = Configuration(config_dict)
with Launcher(deploy_config) as launcher:
launcher.deploy_all()
launcher.wait_for_deploy()
self.wait_for_api_restart()
explorer = launcher.explorer()
for artifact in launcher.launch_state.completed_deployments():
deployed = explorer.is_deployed(artifact)
self.assertTrue(deployed)
# Try to unload artifact with version 0.1.0
unload_config_dict = generate_config(
self.network, instances=instances, artifact_action="unload", artifact_version="0.1.0"
)
unload_config = Configuration(unload_config_dict)
with Launcher(unload_config) as launcher:
launcher.unload_all()
launcher.wait_for_unload()
self.wait_for_api_restart()
explorer = launcher.explorer()
for artifact in unload_config.artifacts.values():
deployed = explorer.is_deployed(artifact)
self.assertTrue(deployed) # Not False !!!
status, message = launcher.launch_state.unload_status
self.assertEqual(status, ActionResult.Fail)
self.assertIn("service `101:cryptocurrency` references it as the current artifact", message)
def _tear_down(self, check_exit_codes=True):
"""Performs cleanup, removing network files."""
if self.network is not None:
outputs = self.network.stop()
self.network.deinitialize()
self.network = None
if check_exit_codes:
assert_processes_exited_successfully(self, outputs)
def tearDown(self):
self._tear_down()
def _create_wallet(self, client: ExonumClient, wallet_name: str, version: str) -> KeyPair:
with ExonumCryptoAdvancedClient(client, INSTANCE_NAME, version) as crypto_client:
keys = KeyPair.generate()
response = crypto_client.create_wallet(keys, wallet_name)
self.assertEqual(response.status_code, 200)
with client.create_subscriber("transactions") as subscriber:
subscriber.wait_for_new_event()
return keys
|
|
###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
__all__ = ['E']
import operator
import sys
import threading
import numpy
# Declare a double type that does not exist in Python space
double = numpy.double
# The default kind for undeclared variables
default_kind = 'double'
if sys.version_info[0] < 3:
int_ = int
long_ = long
else:
int_ = numpy.int32
long_ = numpy.int64
type_to_kind = {bool: 'bool', int_: 'int', long_: 'long', float: 'float',
double: 'double', complex: 'complex', bytes: 'bytes'}
kind_to_type = {'bool': bool, 'int': int_, 'long': long_, 'float': float,
'double': double, 'complex': complex, 'bytes': bytes}
kind_rank = ['bool', 'int', 'long', 'float', 'double', 'complex', 'none']
scalar_constant_types = [bool, int_, long, float, double, complex, bytes]
# Final corrections for Python 3 (mainly for PyTables needs)
if sys.version_info[0] > 2:
type_to_kind[str] = 'str'
kind_to_type['str'] = str
scalar_constant_types.append(str)
scalar_constant_types = tuple(scalar_constant_types)
from numexpr import interpreter
class Expression(object):
def __init__(self):
object.__init__(self)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return VariableNode(name, default_kind)
E = Expression()
class Context(threading.local):
def get(self, value, default):
return self.__dict__.get(value, default)
def get_current_context(self):
return self.__dict__
def set_new_context(self, dict_):
self.__dict__.update(dict_)
# This will be called each time the local object is used in a separate thread
_context = Context()
def get_optimization():
return _context.get('optimization', 'none')
# helper functions for creating __magic__ methods
def ophelper(f):
def func(*args):
args = list(args)
for i, x in enumerate(args):
if isConstant(x):
args[i] = x = ConstantNode(x)
if not isinstance(x, ExpressionNode):
raise TypeError("unsupported object type: %s" % type(x))
return f(*args)
func.__name__ = f.__name__
func.__doc__ = f.__doc__
func.__dict__.update(f.__dict__)
return func
def allConstantNodes(args):
"returns True if args are all ConstantNodes."
for x in args:
if not isinstance(x, ConstantNode):
return False
return True
def isConstant(ex):
"Returns True if ex is a constant scalar of an allowed type."
return isinstance(ex, scalar_constant_types)
def commonKind(nodes):
node_kinds = [node.astKind for node in nodes]
str_count = node_kinds.count('bytes') + node_kinds.count('str')
if 0 < str_count < len(node_kinds): # some args are strings, but not all
raise TypeError("strings can only be operated with strings")
if str_count > 0: # if there are some, all of them must be
return 'bytes'
n = -1
for x in nodes:
n = max(n, kind_rank.index(x.astKind))
return kind_rank[n]
max_int32 = 2147483647
min_int32 = -max_int32 - 1
def bestConstantType(x):
# ``numpy.string_`` is a subclass of ``bytes``
if isinstance(x, (bytes, str)):
return bytes
# Numeric conversion to boolean values is not tried because
# ``bool(1) == True`` (same for 0 and False), so 0 and 1 would be
# interpreted as booleans when ``False`` and ``True`` are already
# supported.
if isinstance(x, (bool, numpy.bool_)):
return bool
# ``long`` objects are kept as is to allow the user to force
# promotion of results by using long constants, e.g. by operating
# a 32-bit array with a long (64-bit) constant.
if isinstance(x, (long_, numpy.int64)):
return long_
# ``double`` objects are kept as is to allow the user to force
# promotion of results by using double constants, e.g. by operating
# a float (32-bit) array with a double (64-bit) constant.
if isinstance(x, double):
return double
if isinstance(x, (int, numpy.integer)):
# Constants needing more than 32 bits are always
# considered ``long``, *regardless of the platform*, so we
# can clearly tell 32- and 64-bit constants apart.
if not (min_int32 <= x <= max_int32):
return long_
return int_
# The duality of float and double in Python avoids that we have to list
# ``double`` too.
for converter in float, complex:
try:
y = converter(x)
except StandardError, err:
continue
if y == x:
return converter
def getKind(x):
converter = bestConstantType(x)
return type_to_kind[converter]
def binop(opname, reversed=False, kind=None):
# Getting the named method from self (after reversal) does not
# always work (e.g. int constants do not have a __lt__ method).
opfunc = getattr(operator, "__%s__" % opname)
@ophelper
def operation(self, other):
if reversed:
self, other = other, self
if allConstantNodes([self, other]):
return ConstantNode(opfunc(self.value, other.value))
else:
return OpNode(opname, (self, other), kind=kind)
return operation
def func(func, minkind=None, maxkind=None):
@ophelper
def function(*args):
if allConstantNodes(args):
return ConstantNode(func(*[x.value for x in args]))
kind = commonKind(args)
if kind in ('int', 'long'):
# Exception for following NumPy casting rules
#FIXME: this is not always desirable. The following
# functions which return ints (for int inputs) on numpy
# but not on numexpr: copy, abs, fmod, ones_like
kind = 'double'
else:
# Apply regular casting rules
if minkind and kind_rank.index(minkind) > kind_rank.index(kind):
kind = minkind
if maxkind and kind_rank.index(maxkind) < kind_rank.index(kind):
kind = maxkind
return FuncNode(func.__name__, args, kind)
return function
@ophelper
def where_func(a, b, c):
if isinstance(a, ConstantNode):
#FIXME: This prevents where(True, a, b)
raise ValueError("too many dimensions")
if allConstantNodes([a, b, c]):
return ConstantNode(numpy.where(a, b, c))
return FuncNode('where', [a, b, c])
def encode_axis(axis):
if isinstance(axis, ConstantNode):
axis = axis.value
if axis is None:
axis = interpreter.allaxes
else:
if axis < 0:
raise ValueError("negative axis are not supported")
if axis > 254:
raise ValueError("cannot encode axis")
return RawNode(axis)
def sum_func(a, axis=None):
axis = encode_axis(axis)
if isinstance(a, ConstantNode):
return a
if isinstance(a, (bool, int_, long_, float, double, complex)):
a = ConstantNode(a)
return FuncNode('sum', [a, axis], kind=a.astKind)
def prod_func(a, axis=None):
axis = encode_axis(axis)
if isinstance(a, (bool, int_, long_, float, double, complex)):
a = ConstantNode(a)
if isinstance(a, ConstantNode):
return a
return FuncNode('prod', [a, axis], kind=a.astKind)
@ophelper
def contains_func(a, b):
return FuncNode('contains', [a, b], kind='bool')
@ophelper
def div_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1. / b.value)])
return OpNode('div', [a, b])
@ophelper
def truediv_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1. / b.value)])
kind = commonKind([a, b])
if kind in ('bool', 'int', 'long'):
kind = 'double'
return OpNode('div', [a, b], kind=kind)
@ophelper
def rtruediv_op(a, b):
return truediv_op(b, a)
@ophelper
def pow_op(a, b):
if allConstantNodes([a, b]):
return ConstantNode(a ** b)
if isinstance(b, ConstantNode):
x = b.value
if get_optimization() == 'aggressive':
RANGE = 50 # Approximate break even point with pow(x,y)
# Optimize all integral and half integral powers in [-RANGE, RANGE]
# Note: for complex numbers RANGE could be larger.
if (int(2 * x) == 2 * x) and (-RANGE <= abs(x) <= RANGE):
n = int_(abs(x))
ishalfpower = int_(abs(2 * x)) % 2
def multiply(x, y):
if x is None: return y
return OpNode('mul', [x, y])
r = None
p = a
mask = 1
while True:
if (n & mask):
r = multiply(r, p)
mask <<= 1
if mask > n:
break
p = OpNode('mul', [p, p])
if ishalfpower:
kind = commonKind([a])
if kind in ('int', 'long'):
kind = 'double'
r = multiply(r, OpNode('sqrt', [a], kind))
if r is None:
r = OpNode('ones_like', [a])
if x < 0:
r = OpNode('div', [ConstantNode(1), r])
return r
if get_optimization() in ('moderate', 'aggressive'):
if x == -1:
return OpNode('div', [ConstantNode(1), a])
if x == 0:
return OpNode('ones_like', [a])
if x == 0.5:
kind = a.astKind
if kind in ('int', 'long'): kind = 'double'
return FuncNode('sqrt', [a], kind=kind)
if x == 1:
return a
if x == 2:
return OpNode('mul', [a, a])
return OpNode('pow', [a, b])
# The functions and the minimum and maximum types accepted
functions = {
'copy': func(numpy.copy),
'ones_like': func(numpy.ones_like),
'sqrt': func(numpy.sqrt, 'float'),
'sin': func(numpy.sin, 'float'),
'cos': func(numpy.cos, 'float'),
'tan': func(numpy.tan, 'float'),
'arcsin': func(numpy.arcsin, 'float'),
'arccos': func(numpy.arccos, 'float'),
'arctan': func(numpy.arctan, 'float'),
'sinh': func(numpy.sinh, 'float'),
'cosh': func(numpy.cosh, 'float'),
'tanh': func(numpy.tanh, 'float'),
'arcsinh': func(numpy.arcsinh, 'float'),
'arccosh': func(numpy.arccosh, 'float'),
'arctanh': func(numpy.arctanh, 'float'),
'fmod': func(numpy.fmod, 'float'),
'arctan2': func(numpy.arctan2, 'float'),
'log': func(numpy.log, 'float'),
'log1p': func(numpy.log1p, 'float'),
'log10': func(numpy.log10, 'float'),
'exp': func(numpy.exp, 'float'),
'expm1': func(numpy.expm1, 'float'),
'abs': func(numpy.absolute, 'float'),
'where': where_func,
'real': func(numpy.real, 'double', 'double'),
'imag': func(numpy.imag, 'double', 'double'),
'complex': func(complex, 'complex'),
'conj': func(numpy.conj, 'complex'),
'sum': sum_func,
'prod': prod_func,
'contains': contains_func,
}
class ExpressionNode(object):
"""An object that represents a generic number object.
This implements the number special methods so that we can keep
track of how this object has been used.
"""
astType = 'generic'
def __init__(self, value=None, kind=None, children=None):
object.__init__(self)
self.value = value
if kind is None:
kind = 'none'
self.astKind = kind
if children is None:
self.children = ()
else:
self.children = tuple(children)
def get_real(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).real)
return OpNode('real', (self,), 'double')
real = property(get_real)
def get_imag(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).imag)
return OpNode('imag', (self,), 'double')
imag = property(get_imag)
def __str__(self):
return '%s(%s, %s, %s)' % (self.__class__.__name__, self.value,
self.astKind, self.children)
def __repr__(self):
return self.__str__()
def __neg__(self):
return OpNode('neg', (self,))
def __invert__(self):
return OpNode('invert', (self,))
def __pos__(self):
return self
# The next check is commented out. See #24 for more info.
def __nonzero__(self):
raise TypeError("You can't use Python's standard boolean operators in "
"NumExpr expressions. You should use their bitwise "
"counterparts instead: '&' instead of 'and', "
"'|' instead of 'or', and '~' instead of 'not'.")
__add__ = __radd__ = binop('add')
__sub__ = binop('sub')
__rsub__ = binop('sub', reversed=True)
__mul__ = __rmul__ = binop('mul')
if sys.version_info[0] < 3:
__div__ = div_op
__rdiv__ = binop('div', reversed=True)
__truediv__ = truediv_op
__rtruediv__ = rtruediv_op
__pow__ = pow_op
__rpow__ = binop('pow', reversed=True)
__mod__ = binop('mod')
__rmod__ = binop('mod', reversed=True)
__lshift__ = binop('lshift')
__rlshift__ = binop('lshift', reversed=True)
__rshift__ = binop('rshift')
__rrshift__ = binop('rshift', reversed=True)
# boolean operations
__and__ = binop('and', kind='bool')
__or__ = binop('or', kind='bool')
__gt__ = binop('gt', kind='bool')
__ge__ = binop('ge', kind='bool')
__eq__ = binop('eq', kind='bool')
__ne__ = binop('ne', kind='bool')
__lt__ = binop('gt', reversed=True, kind='bool')
__le__ = binop('ge', reversed=True, kind='bool')
class LeafNode(ExpressionNode):
leafNode = True
class VariableNode(LeafNode):
astType = 'variable'
def __init__(self, value=None, kind=None, children=None):
LeafNode.__init__(self, value=value, kind=kind)
class RawNode(object):
"""Used to pass raw integers to interpreter.
For instance, for selecting what function to use in func1.
Purposely don't inherit from ExpressionNode, since we don't wan't
this to be used for anything but being walked.
"""
astType = 'raw'
astKind = 'none'
def __init__(self, value):
self.value = value
self.children = ()
def __str__(self):
return 'RawNode(%s)' % (self.value,)
__repr__ = __str__
class ConstantNode(LeafNode):
astType = 'constant'
def __init__(self, value=None, children=None):
kind = getKind(value)
# Python float constants are double precision by default
if kind == 'float':
kind = 'double'
LeafNode.__init__(self, value=value, kind=kind)
def __neg__(self):
return ConstantNode(-self.value)
def __invert__(self):
return ConstantNode(~self.value)
class OpNode(ExpressionNode):
astType = 'op'
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
ExpressionNode.__init__(self, value=opcode, kind=kind, children=args)
class FuncNode(OpNode):
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
OpNode.__init__(self, opcode, args, kind)
|
|
"""
Tests for the suite page server.
"""
from js_test_tool.tests.helpers import TempWorkspaceTestCase
import unittest
import mock
import re
import requests
import os
import pkg_resources
import json
from js_test_tool.suite import SuiteDescription, SuiteRenderer
from js_test_tool.suite_server import SuitePageServer, SuitePageHandler, \
TimeoutError, DuplicateSuiteNameError
from js_test_tool.coverage import SrcInstrumenter, SrcInstrumenterError
class SuitePageServerTest(TempWorkspaceTestCase):
NUM_SUITE_DESC = 2
def setUp(self):
# Call the superclass implementation to create the temp workspace
super(SuitePageServerTest, self).setUp()
# Create mock suite descriptions
self.suite_desc_list = [
mock.MagicMock(SuiteDescription)
for _ in range(self.NUM_SUITE_DESC)
]
# Configure the mock suite descriptions to have no dependencies
suite_num = 0
for suite in self.suite_desc_list:
suite.suite_name.return_value = 'test-suite-{}'.format(suite_num)
suite.lib_paths.return_value = []
suite.src_paths.return_value = []
suite.spec_paths.return_value = []
suite.fixture_paths.return_value = []
suite.root_dir.return_value = os.getcwd()
suite_num += 1
# Create a mock suite renderer
self.suite_renderer = mock.MagicMock(SuiteRenderer)
self.port = 54321
# Create the server
self.server = SuitePageServer(
self.suite_desc_list, self.suite_renderer, port=self.port
)
# Start the server
self.server.start()
def tearDown(self):
# Stop the server, which frees the port
self.server.stop()
def test_root_url(self):
# Check that the root URL has the right form
url_regex = re.compile('^http://0.0.0.0:{}/$'.format(self.port))
url = self.server.root_url()
result = url_regex.match(url)
self.assertIsNot(result, None,
msg="URL has incorrect format: '{}'".format(url))
def test_suite_url_list(self):
# Retrieve the urls for each test suite page
url_list = self.server.suite_url_list()
# Expect that we have the correct number of URLs
self.assertEqual(len(url_list), self.NUM_SUITE_DESC)
# Expect that the URLs have the correct form
for suite_num in range(self.NUM_SUITE_DESC):
expected_url = self.server.root_url() + u'suite/test-suite-{}'.format(suite_num)
self.assertIn(expected_url, url_list)
def test_enforce_unique_suite_names(self):
# Try to create a suite server in which two suites have the same name
suite_desc_list = [
mock.MagicMock(SuiteDescription)
for _ in range(4)
]
suite_desc_list[0].suite_name.return_value = 'test-suite-1'
suite_desc_list[1].suite_name.return_value = 'test-suite-2'
suite_desc_list[2].suite_name.return_value = 'test-suite-1'
suite_desc_list[3].suite_name.return_value = 'test-suite-3'
# Expect an error when initializing the server
with self.assertRaises(DuplicateSuiteNameError):
SuitePageServer(suite_desc_list, self.suite_renderer)
def test_serve_suite_pages(self):
# Configure the suite renderer to return a test string
expected_page = u'test suite mock'
self.suite_renderer.render_to_string.return_value = expected_page
# Check that we can load each page in the suite
for url in self.server.suite_url_list():
self._assert_page_equals(url, expected_page)
def test_serve_suite_pages_ignore_get_params(self):
# Configure the suite renderer to return a test string
expected_page = u'test suite mock'
self.suite_renderer.render_to_string.return_value = expected_page
# Check that we can load each page in the suite,
# even if we add additional GET params
for url in self.server.suite_url_list():
url = url + "?param=12345"
self._assert_page_equals(url, expected_page)
def test_serve_runners(self):
for path in ['jasmine/jasmine.css',
'jasmine/jasmine.js',
'jasmine/jasmine-json.js',
'jasmine/jasmine-html.js']:
pkg_path = 'runner/' + path
expected_page = pkg_resources.resource_string('js_test_tool', pkg_path)
url = self.server.root_url() + pkg_path
self._assert_page_equals(url, expected_page)
def test_ignore_runner_get_params(self):
for path in ['jasmine/jasmine.css',
'jasmine/jasmine.js',
'jasmine/jasmine-json.js',
'jasmine/jasmine-html.js']:
pkg_path = 'runner/' + path
expected_page = pkg_resources.resource_string('js_test_tool', pkg_path)
# Append GET params to the URL
url = self.server.root_url() + pkg_path + "?param=abc.123&another=87"
# Should still be able to load the page
self._assert_page_equals(url, expected_page)
def test_serve_lib_js(self):
# Configure the suite description to contain JS dependencies
lib_paths = ['lib/1.js', 'lib/subdir/2.js']
self.suite_desc_list[0].lib_paths.return_value = lib_paths
# Create fake files to serve
os.makedirs('lib/subdir')
expected_page = u'\u0236est \u023Dib file'
self._create_fake_files(lib_paths, expected_page)
# Expect that the server sends us the files
for path in lib_paths:
url = self.server.root_url() + 'suite/test-suite-0/include/' + path
self._assert_page_equals(url, expected_page)
def test_serve_src_js(self):
# Configure the suite description to contain JS source files
src_paths = ['src/1.js', 'src/subdir/2.js']
self.suite_desc_list[0].src_paths.return_value = src_paths
# Create fake files to serve
os.makedirs('src/subdir')
expected_page = u'test \u023Frc file'
self._create_fake_files(src_paths, expected_page)
# Expect that the server sends us the files
for path in src_paths:
url = self.server.root_url() + 'suite/test-suite-0/include/' + path
self._assert_page_equals(url, expected_page)
def test_serve_spec_js(self):
# Configure the suite description to contain JS spec files
spec_paths = ['spec/1.js', 'spec/subdir/2.js']
self.suite_desc_list[0].spec_paths.return_value = spec_paths
# Create fake files to serve
os.makedirs('spec/subdir')
expected_page = u'test spe\u023C file'
self._create_fake_files(spec_paths, expected_page)
# Expect that the server sends us the files
for path in spec_paths:
url = self.server.root_url() + 'suite/test-suite-0/include/' + path
self._assert_page_equals(url, expected_page)
def test_serve_text_fixtures(self):
# Configure the suite description to contain test fixture files
fixture_paths = ['fixtures/1.html', 'fixtures/subdir/2.html']
self.suite_desc_list[0].fixture_paths.return_value = fixture_paths
# Create fake files to serve
os.makedirs('fixtures/subdir')
expected_page = u'test fi\u039Eture'
self._create_fake_files(fixture_paths, expected_page)
# Expect that the server sends us the files
for path in fixture_paths:
url = self.server.root_url() + 'suite/test-suite-0/include/' + path
self._assert_page_equals(url, expected_page)
def test_serve_binary_fixtures(self):
# Configure the suite description to contain binary fixture files
fixture_paths = ['fixtures/test.mp4', 'fixtures/test.png']
self.suite_desc_list[0].fixture_paths.return_value = fixture_paths
# Create fake files to serve
os.mkdir('fixtures')
file_contents = '\x02\x03\x04\x05\x06'
self._create_fake_files(fixture_paths, file_contents, encoding=None)
# Expect that the server sends us the files as
# an un-decoded byte stream
for path in fixture_paths:
url = self.server.root_url() + 'suite/test-suite-0/include/' + path
self._assert_page_equals(url, file_contents, encoding=None)
def test_serve_byte_range_requests(self):
# Configure the suite description to contain a binary fixture file
fixture_paths = ['fixtures/test.mp4']
self.suite_desc_list[0].fixture_paths.return_value = fixture_paths
# Make this file fairly large, so we can access ranges of it
file_size = 10000
# Create a fake file to serve
os.mkdir('fixtures')
file_contents = '\x01' * file_size
self._create_fake_files(fixture_paths, file_contents, encoding=None)
# Check for byte range support
url = self.server.root_url() + 'suite/test-suite-0/include/fixtures/test.mp4'
resp = requests.get(url, headers={'Range': None})
# Expect that the server supports byte ranges
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers.get('Accept-Ranges'), 'bytes')
# Check that we can make requests for byte ranges
# Examples taken from the RFC:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1
test_cases = [
('0-499', 0, 499),
('0-', 0, 9999),
('9000-9999', 9000, 9999),
('9500-', 9500, 9999),
('-500', 9500, 9999),
]
for byte_range, content_start, content_end in test_cases:
print "Sending byte range '{0}'".format(byte_range)
resp = requests.get(url, headers={'Range': 'bytes=' + byte_range})
# Expect that we get a 206 (partial content)
self.assertEqual(resp.status_code, 206)
self.assertEqual(
resp.headers.get('Content-Range'),
'bytes {0}-{1}/{2}'.format(content_start, content_end, file_size)
)
content_len = content_end - content_start + 1
self.assertEqual(resp.headers.get('Content-Length'), str(content_len))
self.assertEqual(len(resp.content), content_len)
def test_serve_multiple_byte_ranges(self):
# Configure the suite description to contain a binary fixture file
fixture_paths = ['fixtures/test.mp4']
self.suite_desc_list[0].fixture_paths.return_value = fixture_paths
# Create a fake file to serve
# The file has \x01 as the first byte, \x02 as the middle bytes
# and \x03 as the last byte
os.mkdir('fixtures')
file_contents = '\x01' * 10
self._create_fake_files(fixture_paths, file_contents, encoding=None)
# Make a request for multiple byte range
byte_range = '0-3,4-7'
url = self.server.root_url() + 'suite/test-suite-0/include/fixtures/test.mp4'
resp = requests.get(url, headers={'Range': 'bytes=' + byte_range})
# Expect that the request for multiple ranges is ignored
# and the whole file is returned
# (we don't implement this part of the protocol)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, file_contents)
def test_invalid_byte_range(self):
# Configure the suite description to contain a binary fixture file
fixture_paths = ['fixtures/test.mp4']
self.suite_desc_list[0].fixture_paths.return_value = fixture_paths
# Make this file fairly large, so we can access ranges of it
file_size = 10000
# Create a fake file to serve
os.mkdir('fixtures')
file_contents = '\x01' * file_size
self._create_fake_files(fixture_paths, file_contents, encoding=None)
# Send invalid byte range headers and expect a 200 with the full file returned
url = self.server.root_url() + 'suite/test-suite-0/include/fixtures/test.mp4'
for invalid_range in ['not_bytes=0-10', 'bytes = space', 'bytes=text-text', 'bytes=-']:
resp = requests.get(url, headers={'Range': invalid_range})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, file_contents)
def test_unsatisfiable_range(self):
# Configure the suite description to contain a binary fixture file
fixture_paths = ['fixtures/test.mp4']
self.suite_desc_list[0].fixture_paths.return_value = fixture_paths
# Make this file fairly large, so we can access ranges of it
file_size = 10000
# Create a fake file to serve
os.mkdir('fixtures')
file_contents = '\x01' * file_size
self._create_fake_files(fixture_paths, file_contents, encoding=None)
# Send unsatisfiable range (start > end) and expect a 406
url = self.server.root_url() + 'suite/test-suite-0/include/fixtures/test.mp4'
resp = requests.get(url, headers={'Range': 'bytes=10-2'})
self.assertEqual(resp.status_code, 406)
def test_serve_iso_encoded_dependency(self):
# Configure the suite description to contain dependency files
# that are ISO encoded
dependencies = ['1.js', '2.js', '3.js', '4.js']
self.suite_desc_list[0].lib_paths.return_value = [dependencies[0]]
self.suite_desc_list[0].src_paths.return_value = [dependencies[1]]
self.suite_desc_list[0].spec_paths.return_value = [dependencies[2]]
self.suite_desc_list[0].fixture_paths.return_value = [dependencies[3]]
# Create fake files to serve with ISO-8859-1 chars
page_contents = '\xf6 \x9a \xa0'
self._create_fake_files(dependencies, page_contents, encoding=None)
# Expect that the server sends us the files,
# ignoring any GET parameters we pass in the URL
expected_page = u'\xf6 \x9a \xa0'
for path in dependencies:
url = self.server.root_url() + 'suite/test-suite-0/include/' + path + "?123456"
self._assert_page_equals(url, expected_page, encoding='iso-8859-1')
def test_ignore_dependency_get_params(self):
# Configure the suite description to contain dependency files
dependencies = ['1.js', '2.js', '3.js', '4.js']
self.suite_desc_list[0].lib_paths.return_value = [dependencies[0]]
self.suite_desc_list[0].src_paths.return_value = [dependencies[1]]
self.suite_desc_list[0].spec_paths.return_value = [dependencies[2]]
self.suite_desc_list[0].fixture_paths.return_value = [dependencies[3]]
# Create fake files to serve
expected_page = u'\u0236est dependency'
self._create_fake_files(dependencies, expected_page)
# Expect that the server sends us the files,
# ignoring any GET parameters we pass in the URL
for path in dependencies:
url = self.server.root_url() + 'suite/test-suite-0/include/' + path + "?123456"
self._assert_page_equals(url, expected_page)
def test_different_working_dir(self):
# Configure the suite description to contain JS dependencies
spec_paths = ['spec/1.js']
self.suite_desc_list[0].spec_paths.return_value = spec_paths
# Create fake files to serve
os.makedirs('spec/subdir')
expected_page = u'test spec file'
self._create_fake_files(spec_paths, expected_page)
# Should be able to change the working directory and still
# get the dependencies, because the suite description
# contains the root directory for dependency paths.
# The superclass `TemplateWorkspaceTestCase` will reset the working
# directory on `tearDown()`
os.mkdir('different_dir')
os.chdir('different_dir')
# Expect that we still get the files
for path in spec_paths:
url = self.server.root_url() + 'suite/test-suite-0/include/' + path
self._assert_page_equals(url, expected_page)
def test_404_pages(self):
# Try a URL that is not one of the suite urls
root_url = self.server.root_url()
bad_url_list = [root_url + 'invalid',
root_url + 'runner/not_found.txt',
root_url + 'suite/{}'.format(self.NUM_SUITE_DESC + 1),
root_url + 'suite/{}'.format(-1)]
# Expect that we get a page not found status
for bad_url in bad_url_list:
response = requests.get(bad_url)
self.assertEqual(response.status_code,
requests.codes.not_found,
msg=bad_url)
def test_missing_dependency(self):
# Configure the suite description to contain a file
self.suite_desc_list[0].src_paths.return_value = ['not_found.txt']
# The file does not exist, so expect that we
# get a not found response
response = requests.get(self.server.root_url() + 'not_found.txt')
self.assertEqual(response.status_code, requests.codes.not_found)
def _assert_page_equals(self, url, expected_content, encoding='utf-8'):
"""
Assert that the page at `url` contains `expected_content`.
Uses a GET HTTP request to retrieve the page and expects
a 200 status code, with UTF-8 encoding.
`encoding` is the expected encoding. If None, expect
an unencoded byte string.
"""
# HTTP GET request for the page
response = requests.get(url)
# Expect that we get a success result code
self.assertEqual(response.status_code, requests.codes.ok, msg=url)
# Expect that we got an accurate content length
if encoding is None:
expected_len = len(expected_content)
else:
expected_len = len(expected_content.encode(encoding))
self.assertEqual(
str(expected_len),
response.headers.get('content-length')
)
# Expect that the content is what we rendered
if encoding is not None:
self.assertIn(
expected_content,
response.content.decode(encoding),
msg=url
)
# If no encoding, just expect the byte string
else:
self.assertIn(expected_content, response.content, msg=url)
@staticmethod
def _create_fake_files(path_list, contents, encoding='utf8'):
"""
For each path in `path_list`, create a file containing `contents`
(a string).
"""
for path in path_list:
with open(path, 'w') as fake_file:
# If an encoding is specified, use it to convert
# the string to a byte str
if encoding is not None:
encoded_contents = contents.encode(encoding)
else:
encoded_contents = contents
# Write the byte string to the file
fake_file.write(encoded_contents)
class SuiteServerCoverageTest(TempWorkspaceTestCase):
"""
Test that the suite page server correctly collects
coverage info for JS source files.
"""
JSCOVER_PATH = '/usr/local/jscover.jar'
def setUp(self):
# Create the temp workspace
super(SuiteServerCoverageTest, self).setUp()
# Configure the server to timeout quickly, to keep the test suite fast
self._old_timeout = SuitePageServer.COVERAGE_TIMEOUT
SuitePageServer.COVERAGE_TIMEOUT = 0.01
def tearDown(self):
# Tear down the temp workspace
super(SuiteServerCoverageTest, self).tearDown()
# Restore the old timeout
SuitePageServer.COVERAGE_TIMEOUT = self._old_timeout
@mock.patch('js_test_tool.suite_server.SrcInstrumenter')
def test_creates_instrumenters_for_suites(self, instrumenter_cls):
# Configure the instrumenter class to return mocks
instr_mocks = [mock.MagicMock(SrcInstrumenter),
mock.MagicMock(SrcInstrumenter)]
instrumenter_cls.side_effect = instr_mocks
# Set up the descriptions
mock_desc_list = [self._mock_suite_desc('test-suite-0', '/root_1', ['src1.js', 'src2.js']),
self._mock_suite_desc('test-suite-1', '/root_2', ['src3.js', 'src4.js'])]
# Create a suite page server for those descriptions
server = SuitePageServer(mock_desc_list,
mock.MagicMock(SuiteRenderer),
jscover_path=self.JSCOVER_PATH)
# Start the server
server.start()
self.addCleanup(server.stop)
# Expect that there is a SrcInstrumenter for each suite,
# and it has been started.
instr_dict = server.src_instr_dict
self.assertEqual(len(instr_dict), len(mock_desc_list))
for instr in instr_dict.values():
instr.start.assert_called_once_with()
# Stop the server
# Expect that all the instrumenters are also stopped
server.stop()
for instr in instr_mocks:
instr.stop.assert_called_once_with()
@mock.patch('js_test_tool.suite_server.SrcInstrumenter')
def test_serves_instrumented_source_files(self, instrumenter_cls):
# Configure the instrumenter class to return a mock
instr_mock = mock.MagicMock(SrcInstrumenter)
instrumenter_cls.return_value = instr_mock
# Configure the instrumenter to always return fake output
fake_src = u"instr\u1205ented sr\u1239 output"
instr_mock.instrumented_src.return_value = fake_src
# Create a mock description with one source file
mock_desc = self._mock_suite_desc('test-suite-0', '/root', ['src.js'])
# Create a suite page server for those descriptions
server = SuitePageServer([mock_desc],
mock.MagicMock(SuiteRenderer),
jscover_path=self.JSCOVER_PATH)
# Start the server
server.start()
self.addCleanup(server.stop)
# Access the page, expecting to get the instrumented source
url = server.root_url() + "suite/test-suite-0/include/src.js"
response = requests.get(url, timeout=0.1)
self.assertEqual(response.text, fake_src)
@mock.patch('js_test_tool.suite_server.SrcInstrumenter')
def test_does_not_instrument_lib_or_spec_files(self, instrumenter_cls):
# Configure the instrumenter class to return a mock
instr_mock = mock.MagicMock(SrcInstrumenter)
instrumenter_cls.return_value = instr_mock
# Create a mock description with lib and spec files
mock_desc = self._mock_suite_desc(
'test-suite-0', '/root', ['src.js'],
lib_paths=['lib.js'], spec_paths=['spec.js']
)
# Create a suite page server for the description
server = SuitePageServer([mock_desc],
mock.MagicMock(SuiteRenderer),
jscover_path=self.JSCOVER_PATH)
# Start the server
server.start()
self.addCleanup(server.stop)
# Access the lib and spec pages
url_list = [server.root_url() + "suite/test-suite-0/include/lib.js",
server.root_url() + "suite/test-suite-0/include/spec.js"]
for url in url_list:
requests.get(url, timeout=0.1)
# Ensure that the instrumenter was NOT invoked,
# since these are not source files
self.assertFalse(instr_mock.instrumented_src.called)
@mock.patch('js_test_tool.suite_server.SrcInstrumenter')
def test_instrumenter_fails_gracefully(self, instrumenter_cls):
# Configure the instrumenter class to return a mock
instr_mock = mock.MagicMock(SrcInstrumenter)
instrumenter_cls.return_value = instr_mock
# Configure the mock to raise an exception
instr_mock.instrumented_src.side_effect = SrcInstrumenterError
# Create a mock description with one source file
mock_desc = self._mock_suite_desc('test-suite-0', os.getcwd(), ['src.js'])
# Create the uninstrumented version of the source file
expected_page = 'uninstrumented source'
with open('src.js', 'w') as src_file:
src_file.write(expected_page)
# Create a suite page server for those descriptions
server = SuitePageServer(
[mock_desc], mock.MagicMock(SuiteRenderer),
jscover_path=self.JSCOVER_PATH
)
# Start the server
server.start()
self.addCleanup(server.stop)
# Even though the instrumenter failed,
# we should STILL be able to get the uninstrumented
# version of the source file
url = server.root_url() + "suite/test-suite-0/include/src.js"
response = requests.get(url, timeout=0.1)
self.assertEqual(response.text, expected_page)
def test_collects_POST_coverage_info(self):
# Start the page server
server = SuitePageServer([self._mock_suite_desc('test-suite-0', '/root', ['src.js'])],
mock.MagicMock(SuiteRenderer),
jscover_path=self.JSCOVER_PATH)
server.start()
self.addCleanup(server.stop)
# POST some coverage data to the src page
# This test does NOT mock the CoverageData class created internally,
# so we need to pass valid JSON data.
# (Since CoverageData involves no network or file access, mocking
# it is not worth the effort).
coverage_data = {'/src.js': {'lineData': [1, 0, None, 2, 1, None, 0]}}
requests.post(server.root_url() + "jscoverage-store/test-suite-0",
data=json.dumps(coverage_data),
timeout=0.1)
# Get the results immediately from the server.
# It's the server's responsibility to block until all results are received.
result_data = server.all_coverage_data()
# Check the result
self.assertEqual(result_data.src_list(), ['/root/src.js'])
self.assertEqual(result_data.line_dict_for_src('/root/src.js'),
{0: True, 1: False, 3: True, 4: True, 6: False})
def test_uncovered_src(self):
# Create the source file -- we need to do this
# CoverageData can determine the number of uncovered
# lines (every line in the file)
num_lines = 5
with open('src.js', 'w') as src_file:
contents = '\n'.join(['test line' for _ in range(num_lines)])
src_file.write(contents)
# Start the page server
root_dir = self.temp_dir
server = SuitePageServer([self._mock_suite_desc('test-suite-0', root_dir, ['src.js'])],
mock.MagicMock(SuiteRenderer),
jscover_path=self.JSCOVER_PATH)
server.start()
self.addCleanup(server.stop)
# POST empty coverage data back to the server
# Since no coverage information is reported, we expect
# that the source file in the suite description is
# reported as uncovered.
coverage_data = {}
requests.post(server.root_url() + "jscoverage-store/test-suite-0",
data=json.dumps(coverage_data),
timeout=0.1)
# Get the results immediately from the server.
# It's the server's responsibility to block until all results are received.
result_data = server.all_coverage_data()
# Check the result -- expect that the source file
# is reported as completely uncovered
full_src_path = os.path.join(root_dir, 'src.js')
self.assertEqual(result_data.src_list(), [full_src_path])
self.assertEqual(result_data.line_dict_for_src(full_src_path),
{line_num: False for line_num in range(num_lines)})
def test_timeout_if_missing_coverage(self):
# Start the page server with multiple descriptions
mock_desc_list = [self._mock_suite_desc('test-suite-0', '/root_1', ['src1.js', 'src2.js']),
self._mock_suite_desc('test-suite-1', '/root_2', ['src.js'])]
server = SuitePageServer(mock_desc_list, mock.MagicMock(SuiteRenderer),
jscover_path=self.JSCOVER_PATH)
server.start()
self.addCleanup(server.stop)
# POST coverage data to one of the sources, but not the other
coverage_data = {'/suite/test-suite-0/include/src1.js': {'lineData': [1]}}
requests.post(server.root_url() + "jscoverage-store/test-suite-0",
data=json.dumps(coverage_data),
timeout=0.1)
# Try to get the coverage data; expect it to timeout
# We configured the timeout to be short in our setup method
# so this should return quickly.
with self.assertRaises(TimeoutError):
server.all_coverage_data()
@staticmethod
def _mock_suite_desc(suite_name, root_dir, src_paths,
lib_paths=None, spec_paths=None):
"""
Configure a mock `SuiteDescription` to have `root_dir` as its
base directory and to list `src_paths` as its JavaScript
sources.
`suite_name` is the name of the suite, which determines
the URL to access the suite pages.
If `lib_paths` or `spec_paths` (lists of paths) are used,
configure the description to use those lib and spec file paths.
"""
mock_desc = mock.MagicMock(SuiteDescription)
mock_desc.suite_name.return_value = suite_name
mock_desc.root_dir.return_value = root_dir
mock_desc.src_paths.return_value = src_paths
if lib_paths is not None:
mock_desc.lib_paths.return_value = lib_paths
else:
mock_desc.lib_paths.return_value = []
if spec_paths is not None:
mock_desc.spec_paths.return_value = spec_paths
else:
mock_desc.spec_paths.return_value = []
mock_desc.fixture_paths.return_value = []
return mock_desc
class SuitePageHandlerTest(unittest.TestCase):
"""
Tests for utility methods in `SuitePageHandler`.
"""
def test_safe_str_buffer(self):
# We should be able to put in any bytestring and
# get a buffer that we can safely typecast to a bytestring
for str_input in [u'\u5890', u'\xf1\xfc]\x83', u'&R\xa2o']:
# Create the string buffer
str_buffer = SuitePageHandler.safe_str_buffer(str_input)
# Try to read it as a byte string
try:
typecast = str(str_buffer.getvalue())
except UnicodeEncodeError:
self.fail("Could not encode {}".format(repr(str_input)))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create sample PR curve summary data.
We have 3 classes: R, G, and B. We generate colors within RGB space from 3
normal distributions (1 at each corner of the color triangle: [255, 0, 0],
[0, 255, 0], and [0, 0, 255]).
The true label of each random color is associated with the normal distribution
that generated it.
Using 3 other normal distributions (over the distance each color is from a
corner of the color triangle - RGB), we then compute the probability that each
color belongs to the class. We use those probabilities to generate PR curves.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from absl import app
from absl import flags
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorboard.plugins.pr_curve import summary
tf.compat.v1.disable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string(
"logdir",
"/tmp/pr_curve_demo",
"Directory into which to write TensorBoard data.",
)
flags.DEFINE_integer(
"steps", 10, "Number of steps to generate for each PR curve."
)
def start_runs(
logdir, steps, run_name, thresholds, mask_every_other_prediction=False
):
"""Generate a PR curve with precision and recall evenly weighted.
Arguments:
logdir: The directory into which to store all the runs' data.
steps: The number of steps to run for.
run_name: The name of the run.
thresholds: The number of thresholds to use for PR curves.
mask_every_other_prediction: Whether to mask every other prediction by
alternating weights between 0 and 1.
"""
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(42)
# Create a normal distribution layer used to generate true color labels.
distribution = tf.compat.v1.distributions.Normal(loc=0.0, scale=142.0)
# Sample the distribution to generate colors. Lets generate different numbers
# of each color. The first dimension is the count of examples.
# The calls to sample() are given fixed random seed values that are "magic"
# in that they correspond to the default seeds for those ops when the PR
# curve test (which depends on this code) was written. We've pinned these
# instead of continuing to use the defaults since the defaults are based on
# node IDs from the sequence of nodes added to the graph, which can silently
# change when this code or any TF op implementations it uses are modified.
# TODO(nickfelt): redo the PR curve test to avoid reliance on random seeds.
# Generate reds.
number_of_reds = 100
true_reds = tf.clip_by_value(
tf.concat(
[
255 - tf.abs(distribution.sample([number_of_reds, 1], seed=11)),
tf.abs(distribution.sample([number_of_reds, 2], seed=34)),
],
axis=1,
),
0,
255,
)
# Generate greens.
number_of_greens = 200
true_greens = tf.clip_by_value(
tf.concat(
[
tf.abs(distribution.sample([number_of_greens, 1], seed=61)),
255
- tf.abs(distribution.sample([number_of_greens, 1], seed=82)),
tf.abs(distribution.sample([number_of_greens, 1], seed=105)),
],
axis=1,
),
0,
255,
)
# Generate blues.
number_of_blues = 150
true_blues = tf.clip_by_value(
tf.concat(
[
tf.abs(distribution.sample([number_of_blues, 2], seed=132)),
255
- tf.abs(distribution.sample([number_of_blues, 1], seed=153)),
],
axis=1,
),
0,
255,
)
# Assign each color a vector of 3 booleans based on its true label.
labels = tf.concat(
[
tf.tile(tf.constant([[True, False, False]]), (number_of_reds, 1)),
tf.tile(tf.constant([[False, True, False]]), (number_of_greens, 1)),
tf.tile(tf.constant([[False, False, True]]), (number_of_blues, 1)),
],
axis=0,
)
# We introduce 3 normal distributions. They are used to predict whether a
# color falls under a certain class (based on distances from corners of the
# color triangle). The distributions vary per color. We have the distributions
# narrow over time.
initial_standard_deviations = [v + FLAGS.steps for v in (158, 200, 242)]
iteration = tf.compat.v1.placeholder(tf.int32, shape=[])
red_predictor = tf.compat.v1.distributions.Normal(
loc=0.0,
scale=tf.cast(
initial_standard_deviations[0] - iteration, dtype=tf.float32
),
)
green_predictor = tf.compat.v1.distributions.Normal(
loc=0.0,
scale=tf.cast(
initial_standard_deviations[1] - iteration, dtype=tf.float32
),
)
blue_predictor = tf.compat.v1.distributions.Normal(
loc=0.0,
scale=tf.cast(
initial_standard_deviations[2] - iteration, dtype=tf.float32
),
)
# Make predictions (assign 3 probabilities to each color based on each color's
# distance to each of the 3 corners). We seek double the area in the right
# tail of the normal distribution.
examples = tf.concat([true_reds, true_greens, true_blues], axis=0)
probabilities_colors_are_red = (
1
- red_predictor.cdf(
tf.norm(tensor=examples - tf.constant([255.0, 0, 0]), axis=1)
)
) * 2
probabilities_colors_are_green = (
1
- green_predictor.cdf(
tf.norm(tensor=examples - tf.constant([0, 255.0, 0]), axis=1)
)
) * 2
probabilities_colors_are_blue = (
1
- blue_predictor.cdf(
tf.norm(tensor=examples - tf.constant([0, 0, 255.0]), axis=1)
)
) * 2
predictions = (
probabilities_colors_are_red,
probabilities_colors_are_green,
probabilities_colors_are_blue,
)
# This is the crucial piece. We write data required for generating PR curves.
# We create 1 summary per class because we create 1 PR curve per class.
for i, color in enumerate(("red", "green", "blue")):
description = (
"The probabilities used to create this PR curve are "
"generated from a normal distribution. Its standard "
"deviation is initially %0.0f and decreases over time."
% initial_standard_deviations[i]
)
weights = None
if mask_every_other_prediction:
# Assign a weight of 0 to every even-indexed prediction. Odd-indexed
# predictions are assigned a default weight of 1.
consecutive_indices = tf.reshape(
tf.range(tf.size(input=predictions[i])),
tf.shape(input=predictions[i]),
)
weights = tf.cast(consecutive_indices % 2, dtype=tf.float32)
summary.op(
name=color,
labels=labels[:, i],
predictions=predictions[i],
num_thresholds=thresholds,
weights=weights,
display_name="classifying %s" % color,
description=description,
)
merged_summary_op = tf.compat.v1.summary.merge_all()
events_directory = os.path.join(logdir, run_name)
sess = tf.compat.v1.Session()
writer = tf.compat.v1.summary.FileWriter(events_directory, sess.graph)
for step in xrange(steps):
feed_dict = {
iteration: step,
}
merged_summary = sess.run(merged_summary_op, feed_dict=feed_dict)
writer.add_summary(merged_summary, step)
writer.close()
def run_all(logdir, steps, thresholds, verbose=False):
"""Generate PR curve summaries.
Arguments:
logdir: The directory into which to store all the runs' data.
steps: The number of steps to run for.
verbose: Whether to print the names of runs into stdout during execution.
thresholds: The number of thresholds to use for PR curves.
"""
# First, we generate data for a PR curve that assigns even weights for
# predictions of all classes.
run_name = "colors"
if verbose:
print("--- Running: %s" % run_name)
start_runs(
logdir=logdir, steps=steps, run_name=run_name, thresholds=thresholds
)
# Next, we generate data for a PR curve that assigns arbitrary weights to
# predictions.
run_name = "mask_every_other_prediction"
if verbose:
print("--- Running: %s" % run_name)
start_runs(
logdir=logdir,
steps=steps,
run_name=run_name,
thresholds=thresholds,
mask_every_other_prediction=True,
)
def main(unused_argv):
print("Saving output to %s." % FLAGS.logdir)
run_all(FLAGS.logdir, FLAGS.steps, 50, verbose=True)
print("Done. Output saved to %s." % FLAGS.logdir)
if __name__ == "__main__":
app.run(main)
|
|
#!/usr/bin/env python3
# Copyright 2015-2016 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import serial
import logging
from threading import Thread
from configparser import RawConfigParser
from litmus import _duts_
from litmus.core.util import check_output, find_pattern, decode
from litmus.device.cuttercleware4 import cuttercleware4
from litmus.device.cuttersmartpower import cuttersmartpower
class generate_topology_sdb_device(object):
"""docstring for generate_topology_sdb_device"""
devcatalog = [
{'dev_type': 'u3',
'cmd': 'printenv boardname',
'pattern': r'.*odroidu3.*',
'index': 1
},
{'dev_type': 'xu3',
'cmd': 'printenv fdtfile',
'pattern': r'.*odroidxu3.*',
'index': 1
},
]
uarts = None
smartpowers = None
cleware4s = None
topology_path = _duts_
open_mode = 'w+'
def __init__(self, *args, **kwargs):
super(generate_topology_sdb_device, self).__init__()
if 'append' in kwargs and kwargs['append']:
self.open_mode = 'a+'
if 'topology' in kwargs and kwargs['topology']:
self.topology_path = kwargs['topology']
def init_smartpowers(self):
"""docstring for init_smartpowers"""
def find_smartpower_names():
"""docstring for find_smartpowers"""
p = '.*Microchip Technology.*'
try:
smartpower_names = ['/dev/{}'.format(s)
for s in check_output('ls /dev '
'| grep hidraw',
shell=True).split()
if find_pattern(p, check_output(['cat',
'/sys/'
'class/'
'hidraw/'
'{}/'
'device/'
'uevent'
.format(s)
]))]
except AttributeError:
smartpower_names = []
logging.debug('smart powers : {0}'.format(smartpower_names))
return smartpower_names
smartpower_names = find_smartpower_names()
self.smartpowers = []
for l in smartpower_names:
obj = {'dev_id': '',
'cutter_type': 'smartpower',
'cutter_port': l
}
self.smartpowers.append(cuttersmartpower(**obj))
def init_cleware4s(self):
"""docstring for init_cleware4s"""
def find_cleware4_names():
"""docstring for find_cleware4s"""
p = '.*Switch1.*version:.(29|512),.*serial number:.([0-9]{6,7})'
cleware4s = [find_pattern(p, s, groupindex=2)
for s in check_output('clewarecontrol -l',
shell=True).split('\n')
if find_pattern(p, s)]
logging.debug('cleware4 cutters : {0}'.format(cleware4s))
return cleware4s
cleware4_names = find_cleware4_names()
self.cleware4s = []
for l in cleware4_names:
for idx in range(0, 4):
obj = {'dev_id': '',
'cutter_type': 'cleware4',
'cutter_port': l,
'cleware_index': idx
}
self.cleware4s.append(cuttercleware4(**obj))
def open_uarts(self):
"""docstring for open_uarts"""
def init_jig(uart):
"""docstring for init_jig"""
pass
def get_items():
"""docstring for splitter"""
out = check_output('ls /dev | egrep "(ttyUSB|ttyS0)"', shell=True)
if out:
return out.split()
else:
raise Exception('There\'s no /dev/ttyUSB for duts.')
def find_uart_names():
"""docstring for find_uarts"""
uarts = None
uarts = ['/dev/{}'.format(s)
for s in get_items()]
logging.debug('uarts : {0}'.format(uarts))
return uarts
self.uarts = []
uart_names = find_uart_names()
for l in uart_names:
uart = serial.Serial(port=l, baudrate=115200, timeout=0.5)
init_jig(uart)
self.uarts.append(uart)
def close_uarts(self):
"""docstring for close_uarts"""
for l in self.uarts:
l.close()
def enter_boot_prompt(self, uart, cnt):
"""docstring for enter_boot_command"""
for l in range(cnt):
uart.write(b'\r')
time.sleep(0.025)
def enter_bootloader_prompt_mode(self):
"""docstring for enter_bootloader_prompt"""
# create threads for entering bootloader prompt
delay = (5 + (len(self.cleware4s) * 2 * 4 +
len(self.smartpowers) * 2 * 2)) * 30
threads = []
for l in self.uarts:
t = Thread(target=self.enter_boot_prompt, args=(l, delay))
t.start()
threads.append(t)
# turn on duts
self.turn_on_smartpowers()
self.turn_on_cleware4s()
# join all threads
for l in threads:
l.join()
time.sleep(1)
def turn_on(self, cutters):
"""docstring for turn_on"""
for l in cutters:
l.off(0.5)
l.on(0.5)
def turn_off(self, cutters):
"""docstring for turn_off"""
for l in cutters:
l.off(0.5)
def turn_on_smartpowers(self):
"""docstring for turn_on_smartpowers"""
self.turn_on(self.smartpowers)
def turn_off_smartpowers(self):
"""docstring for turn_off_smartpowers"""
self.turn_off(self.smartpowers)
def turn_on_cleware4s(self):
"""docstring for turn_on_cleware4"""
self.turn_on(self.cleware4s)
def turn_off_cleware4s(self):
"""docstring for turn_off_cleware4"""
self.turn_off(self.cleware4s)
def recognize_device(self, config, uart):
"""docstring for recognize_device"""
for l in self.devcatalog:
logging.debug('Is {}'.format(l['dev_type'].upper()))
uart.flushInput()
time.sleep(0.1)
uart.flushOutput()
time.sleep(0.5)
uart.flush()
time.sleep(0.1)
uart.write(l['cmd'].encode() + b'\r')
time.sleep(0.5)
buf = uart.read(5000)
if find_pattern(l['pattern'], decode(buf)):
logging.debug('Yes')
name = '{0}_{1:0>3}'.format(l['dev_type'].upper(),
l['index'])
cfg = {'name': name,
'dev_type': l['dev_type'],
'uart_port': uart.name
}
l['index'] += 1
return cfg
def is_on(self, uart):
"""docstring for is_on"""
p = r'.*echo.*'
uart.flushInput()
time.sleep(0.1)
uart.flushOutput()
time.sleep(0.1)
uart.flush()
time.sleep(0.1)
uart.write(b'echo\r')
time.sleep(0.1)
data = decode(b' '.join(uart.readlines(500)))
return find_pattern(p, data)
def generate_device_topology(self):
"""docstring for generate_device_topology"""
# open config parser
config = RawConfigParser()
cfgs = []
# recognize device type
for l in self.uarts:
logging.debug('[Recognize device type for uart : '
'{}]'.format(l.name))
cfg = self.recognize_device(config, l)
if cfg:
cfgs.append(cfg)
else:
l.close()
# remove closed uart obj
self.uarts = [m for m in self.uarts if m.isOpen()]
logging.debug('[Generate topology configurations]')
for l in self.smartpowers:
l.off()
for l_uart in self.uarts:
if not self.is_on(l_uart):
dev = [m for m in cfgs if m['uart_port'] == l_uart.name][0]
dev['cutter_type'] = 'smartpower'
dev['cutter_port'] = l._cport
l_uart.close()
self.uarts.remove(l_uart)
logging.debug(dev)
break
for l in self.cleware4s:
l.off()
for l_uart in self.uarts:
if not self.is_on(l_uart):
dev = [m for m in cfgs if m['uart_port'] == l_uart.name][0]
dev['cutter_type'] = 'cleware4'
dev['cutter_port'] = l._cport
dev['cleware_index'] = l._cindex
l_uart.close()
self.uarts.remove(l_uart)
logging.debug(dev)
break
for l in self.uarts:
l.close()
for l in cfgs:
section_name = l['name']
l.pop('name')
config.add_section(section_name)
for key in sorted(l.keys()):
config.set(section_name, key, str(l[key]))
with open(self.topology_path, self.open_mode) as f:
config.write(f)
logging.debug('Done.')
def run(self):
"""docstring for run"""
# init peripherals
self.init_smartpowers()
self.init_cleware4s()
self.open_uarts()
# enter bootloader prompt
self.enter_bootloader_prompt_mode()
# generate cfg
self.generate_device_topology()
# turn off duts
self.turn_off_smartpowers()
self.turn_off_cleware4s()
# close uarts
self.close_uarts()
def main(topology):
"""docstring for main"""
try:
logging.debug('# phase 1 : detect all devices which use sdb')
phase_sdb = generate_topology_sdb_device(topology=topology)
phase_sdb.run()
except KeyboardInterrupt:
raise Exception('Keyboard Interrupt')
except Exception as e:
logging.debug(e)
raise Exception('Failed to generate topology')
|
|
"""
mbed SDK
Copyright (c) 2011-2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Title: GNU ARM Eclipse (http://gnuarmeclipse.github.io) exporter.
Description: Creates a managed build project that can be imported by
the GNU ARM Eclipse plug-ins.
Author: Liviu Ionescu <ilg@livius.net>
"""
import os
import copy
import tempfile
import shutil
import copy
from subprocess import call, Popen, PIPE
from os.path import splitext, basename, relpath, dirname, exists, join, dirname
from random import randint
from json import load
from tools.export.exporters import Exporter, filter_supported
from tools.options import list_profiles
from tools.targets import TARGET_MAP
from tools.utils import NotSupportedException
from tools.build_api import prepare_toolchain
# =============================================================================
class UID:
"""
Helper class, used to generate unique ids required by .cproject symbols.
"""
@property
def id(self):
return "%0.9u" % randint(0, 999999999)
# Global UID generator instance.
# Passed to the template engine, and referred as {{u.id}}.
# Each invocation generates a new number.
u = UID()
# =============================================================================
POST_BINARY_WHITELIST = set([
"TEENSY3_1Code.binary_hook",
"MCU_NRF51Code.binary_hook",
])
class GNUARMEclipse(Exporter):
NAME = 'GNU ARM Eclipse'
TOOLCHAIN = 'GCC_ARM'
TARGETS = filter_supported("GCC_ARM", POST_BINARY_WHITELIST)
# override
@property
def flags(self):
"""Returns a dictionary of toolchain flags.
Keys of the dictionary are:
cxx_flags - c++ flags
c_flags - c flags
ld_flags - linker flags
asm_flags - assembler flags
common_flags - common options
The difference from the parent function is that it does not
add macro definitions, since they are passed separately.
"""
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in self.toolchain.flags.iteritems()}
if config_header:
config_header = relpath(config_header,
self.resources.file_basepath[config_header])
flags['c_flags'] += self.toolchain.get_config_option(config_header)
flags['cxx_flags'] += self.toolchain.get_config_option(
config_header)
return flags
def toolchain_flags(self, toolchain):
"""Returns a dictionary of toolchain flags.
Keys of the dictionary are:
cxx_flags - c++ flags
c_flags - c flags
ld_flags - linker flags
asm_flags - assembler flags
common_flags - common options
The difference from the above is that it takes a parameter.
"""
# Note: use the config options from the currently selected toolchain.
config_header = self.toolchain.get_config_header()
flags = {key + "_flags": copy.deepcopy(value) for key, value
in toolchain.flags.iteritems()}
if config_header:
config_header = relpath(config_header,
self.resources.file_basepath[config_header])
header_options = self.toolchain.get_config_option(config_header)
flags['c_flags'] += header_options
flags['cxx_flags'] += header_options
return flags
# override
def generate(self):
"""
Generate the .project and .cproject files.
"""
if not self.resources.linker_script:
raise NotSupportedException("No linker script found.")
print
print 'Create a GNU ARM Eclipse C++ managed project'
print 'Project name: {0}'.format(self.project_name)
print 'Target: {0}'.format(self.toolchain.target.name)
print 'Toolchain: {0}'.format(self.TOOLCHAIN)
self.resources.win_to_unix()
# TODO: use some logger to display additional info if verbose
libraries = []
# print 'libraries'
# print self.resources.libraries
for lib in self.resources.libraries:
l, _ = splitext(basename(lib))
libraries.append(l[3:])
self.system_libraries = [
'stdc++', 'supc++', 'm', 'c', 'gcc', 'nosys'
]
# Read in all profiles, we'll extract compiler options.
profiles = self.get_all_profiles()
profile_ids = [s.lower() for s in profiles]
profile_ids.sort()
# TODO: get the list from existing .cproject
build_folders = [s.capitalize() for s in profile_ids]
build_folders.append('BUILD')
# print build_folders
objects = [self.filter_dot(s) for s in self.resources.objects]
for bf in build_folders:
objects = [o for o in objects if not o.startswith(bf + '/')]
# print 'objects'
# print objects
self.compute_exclusions()
self.include_path = [
self.filter_dot(s) for s in self.resources.inc_dirs]
print 'Include folders: {0}'.format(len(self.include_path))
self.as_defines = self.toolchain.get_symbols(True)
self.c_defines = self.toolchain.get_symbols()
self.cpp_defines = self.c_defines
print 'Symbols: {0}'.format(len(self.c_defines))
self.ld_script = self.filter_dot(
self.resources.linker_script)
print 'Linker script: {0}'.format(self.ld_script)
self.options = {}
for id in profile_ids:
# There are 4 categories of options, a category common too
# all tools and a specific category for each of the tools.
opts = {}
opts['common'] = {}
opts['as'] = {}
opts['c'] = {}
opts['cpp'] = {}
opts['ld'] = {}
opts['id'] = id
opts['name'] = opts['id'].capitalize()
print
print 'Build configuration: {0}'.format(opts['name'])
profile = profiles[id]
profile_toolchain = profile[self.TOOLCHAIN]
# A small hack, do not bother with src_path again,
# pass an empty string to avoid crashing.
src_paths = ['']
target_name = self.toolchain.target.name
toolchain = prepare_toolchain(
src_paths, "", target_name, self.TOOLCHAIN, build_profile=profile_toolchain)
# Hack to fill in build_dir
toolchain.build_dir = self.toolchain.build_dir
flags = self.toolchain_flags(toolchain)
print 'Common flags:', ' '.join(flags['common_flags'])
print 'C++ flags:', ' '.join(flags['cxx_flags'])
print 'C flags:', ' '.join(flags['c_flags'])
print 'ASM flags:', ' '.join(flags['asm_flags'])
print 'Linker flags:', ' '.join(flags['ld_flags'])
# Most GNU ARM Eclipse options have a parent,
# either debug or release.
if '-O0' in flags['common_flags'] or '-Og' in flags['common_flags']:
opts['parent_id'] = 'debug'
else:
opts['parent_id'] = 'release'
self.process_options(opts, flags)
opts['as']['defines'] = self.as_defines
opts['c']['defines'] = self.c_defines
opts['cpp']['defines'] = self.cpp_defines
opts['common']['include_paths'] = self.include_path
opts['common']['excluded_folders'] = '|'.join(
self.excluded_folders)
opts['ld']['library_paths'] = [
self.filter_dot(s) for s in self.resources.lib_dirs]
opts['ld']['object_files'] = objects
opts['ld']['user_libraries'] = libraries
opts['ld']['system_libraries'] = self.system_libraries
opts['ld']['script'] = self.ld_script
# Unique IDs used in multiple places.
# Those used only once are implemented with {{u.id}}.
uid = {}
uid['config'] = u.id
uid['tool_c_compiler'] = u.id
uid['tool_c_compiler_input'] = u.id
uid['tool_cpp_compiler'] = u.id
uid['tool_cpp_compiler_input'] = u.id
opts['uid'] = uid
self.options[id] = opts
jinja_ctx = {
'name': self.project_name,
# Compiler & linker command line options
'options': self.options,
# Must be an object with an `id` property, which
# will be called repeatedly, to generate multiple UIDs.
'u': u,
}
# TODO: it would be good to have jinja stop if one of the
# expected context values is not defined.
self.gen_file('gnuarmeclipse/.project.tmpl', jinja_ctx,
'.project', trim_blocks=True, lstrip_blocks=True)
self.gen_file('gnuarmeclipse/.cproject.tmpl', jinja_ctx,
'.cproject', trim_blocks=True, lstrip_blocks=True)
self.gen_file('gnuarmeclipse/makefile.targets.tmpl', jinja_ctx,
'makefile.targets', trim_blocks=True, lstrip_blocks=True)
if not exists('.mbedignore'):
print
print 'Create .mbedignore'
with open('.mbedignore', 'w') as f:
for bf in build_folders:
print bf + '/'
f.write(bf + '/\n')
print
print 'Done. Import the \'{0}\' project in Eclipse.'.format(self.project_name)
# override
@staticmethod
def build(project_name, log_name="build_log.txt", cleanup=True):
"""
Headless build an Eclipse project.
The following steps are performed:
- a temporary workspace is created,
- the project is imported,
- a clean build of all configurations is performed and
- the temporary workspace is removed.
The build results are in the Debug & Release folders.
All executables (eclipse & toolchain) must be in the PATH.
The general method to start a headless Eclipse build is:
$ eclipse \
--launcher.suppressErrors \
-nosplash \
-application org.eclipse.cdt.managedbuilder.core.headlessbuild \
-data /path/to/workspace \
-import /path/to/project \
-cleanBuild "project[/configuration] | all"
"""
# TODO: possibly use the log file.
# Create a temporary folder for the workspace.
tmp_folder = tempfile.mkdtemp()
cmd = [
'eclipse',
'--launcher.suppressErrors',
'-nosplash',
'-application org.eclipse.cdt.managedbuilder.core.headlessbuild',
'-data', tmp_folder,
'-import', os.getcwd(),
'-cleanBuild', project_name
]
p = Popen(' '.join(cmd), shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
ret_code = p.returncode
stdout_string = "=" * 10 + "STDOUT" + "=" * 10 + "\n"
err_string = "=" * 10 + "STDERR" + "=" * 10 + "\n"
err_string += err
ret_string = "SUCCESS\n"
if ret_code != 0:
ret_string += "FAILURE\n"
print "%s\n%s\n%s\n%s" % (stdout_string, out, err_string, ret_string)
if log_name:
# Write the output to the log file
with open(log_name, 'w+') as f:
f.write(stdout_string)
f.write(out)
f.write(err_string)
f.write(ret_string)
# Cleanup the exported and built files
if cleanup:
if exists(log_name):
os.remove(log_name)
os.remove('.project')
os.remove('.cproject')
if exists('Debug'):
shutil.rmtree('Debug')
if exists('Release'):
shutil.rmtree('Release')
if exists('makefile.targets'):
os.remove('makefile.targets')
# Always remove the temporary folder.
if exists(tmp_folder):
shutil.rmtree(tmp_folder)
if ret_code == 0:
# Return Success
return 0
# Seems like something went wrong.
return -1
# -------------------------------------------------------------------------
@staticmethod
def get_all_profiles():
tools_path = dirname(dirname(dirname(__file__)))
file_names = [join(tools_path, "profiles", fn) for fn in os.listdir(
join(tools_path, "profiles")) if fn.endswith(".json")]
# print file_names
profile_names = [basename(fn).replace(".json", "")
for fn in file_names]
# print profile_names
profiles = {}
for fn in file_names:
content = load(open(fn))
profile_name = basename(fn).replace(".json", "")
profiles[profile_name] = content
return profiles
# -------------------------------------------------------------------------
# Process source files/folders exclusions.
def compute_exclusions(self):
"""
With the project root as the only source folder known to CDT,
based on the list of source files, compute the folders to not
be included in the build.
The steps are:
- get the list of source folders, as dirname(source_file)
- compute the top folders (subfolders of the project folder)
- iterate all subfolders and add them to a tree, with all
nodes markes as 'not used'
- iterate the source folders and mark them as 'used' in the
tree, including all intermediate nodes
- recurse the tree and collect all unused folders; descend
the hierarchy only for used nodes
"""
source_folders = [self.filter_dot(s) for s in set(dirname(
src) for src in self.resources.c_sources + self.resources.cpp_sources + self.resources.s_sources)]
if '.' in source_folders:
source_folders.remove('.')
# print 'source folders'
# print source_folders
# Source folders were converted before and are guaranteed to
# use the POSIX separator.
top_folders = [f for f in set(s.split('/')[0]
for s in source_folders)]
# print 'top folders'
# print top_folders
self.source_tree = {}
for top_folder in top_folders:
for root, dirs, files in os.walk(top_folder, topdown=True):
# print root, dirs, files
# Paths returned by os.walk() must be split with os.dep
# to accomodate Windows weirdness.
parts = root.split(os.sep)
# Ignore paths that include parts starting with dot.
skip = False
for part in parts:
if part.startswith('.'):
skip = True
break
if skip:
continue
# Further process only leaf paths, (that do not have
# sub-folders).
if len(dirs) == 0:
# The path is reconstructed using POSIX separators.
self.add_source_folder_to_tree('/'.join(parts))
for folder in source_folders:
self.add_source_folder_to_tree(folder, True)
# print
# print self.source_tree
# self.dump_paths(self.source_tree)
# self.dump_tree(self.source_tree)
# print 'excludings'
self.excluded_folders = ['BUILD']
self.recurse_excludings(self.source_tree)
print 'Source folders: {0}, with {1} exclusions'.format(len(source_folders), len(self.excluded_folders))
def add_source_folder_to_tree(self, path, is_used=False):
"""
Decompose a path in an array of folder names and create the tree.
On the second pass the nodes should be already there; mark them
as used.
"""
# print path, is_used
# All paths arriving here are guaranteed to use the POSIX
# separators, os.walk() paths were also explicitly converted.
parts = path.split('/')
# print parts
node = self.source_tree
prev = None
for part in parts:
if part not in node.keys():
new_node = {}
new_node['name'] = part
new_node['children'] = {}
if prev != None:
new_node['parent'] = prev
node[part] = new_node
node[part]['is_used'] = is_used
prev = node[part]
node = node[part]['children']
def recurse_excludings(self, nodes):
"""
Recurse the tree and collect all unused folders; descend
the hierarchy only for used nodes.
"""
for k in nodes.keys():
node = nodes[k]
if node['is_used'] == False:
parts = []
cnode = node
while True:
parts.insert(0, cnode['name'])
if 'parent' not in cnode:
break
cnode = cnode['parent']
# Compose a POSIX path.
path = '/'.join(parts)
# print path
self.excluded_folders.append(path)
else:
self.recurse_excludings(node['children'])
# -------------------------------------------------------------------------
@staticmethod
def filter_dot(str):
"""
Remove the './' prefix, if present.
This function assumes that resources.win_to_unix()
replaced all windows backslashes with slashes.
"""
if str == None:
return None
if str[:2] == './':
return str[2:]
return str
# -------------------------------------------------------------------------
def dump_tree(self, nodes, depth=0):
for k in nodes.keys():
node = nodes[k]
parent_name = node['parent'][
'name'] if 'parent' in node.keys() else ''
print ' ' * depth, node['name'], node['is_used'], parent_name
if len(node['children'].keys()) != 0:
self.dump_tree(node['children'], depth + 1)
def dump_paths(self, nodes, depth=0):
for k in nodes.keys():
node = nodes[k]
parts = []
while True:
parts.insert(0, node['name'])
if 'parent' not in node:
break
node = node['parent']
path = '/'.join(parts)
print path, nodes[k]['is_used']
self.dump_paths(nodes[k]['children'], depth + 1)
# -------------------------------------------------------------------------
def process_options(self, opts, flags_in):
"""
CDT managed projects store lots of build options in separate
variables, with separate IDs in the .cproject file.
When the CDT build is started, all these options are brought
together to compose the compiler and linker command lines.
Here the process is reversed, from the compiler and linker
command lines, the options are identified and various flags are
set to control the template generation process.
Once identified, the options are removed from the command lines.
The options that were not identified are options that do not
have CDT equivalents and will be passed in the 'Other options'
categories.
Although this process does not have a very complicated logic,
given the large number of explicit configuration options
used by the GNU ARM Eclipse managed build plug-in, it is tedious...
"""
# Make a copy of the flags, to be one by one removed after processing.
flags = copy.deepcopy(flags_in)
if False:
print
print 'common_flags', flags['common_flags']
print 'asm_flags', flags['asm_flags']
print 'c_flags', flags['c_flags']
print 'cxx_flags', flags['cxx_flags']
print 'ld_flags', flags['ld_flags']
# Initialise the 'last resort' options where all unrecognised
# options will be collected.
opts['as']['other'] = ''
opts['c']['other'] = ''
opts['cpp']['other'] = ''
opts['ld']['other'] = ''
MCPUS = {
'Cortex-M0': {'mcpu': 'cortex-m0', 'fpu_unit': None},
'Cortex-M0+': {'mcpu': 'cortex-m0plus', 'fpu_unit': None},
'Cortex-M1': {'mcpu': 'cortex-m1', 'fpu_unit': None},
'Cortex-M3': {'mcpu': 'cortex-m3', 'fpu_unit': None},
'Cortex-M4': {'mcpu': 'cortex-m4', 'fpu_unit': None},
'Cortex-M4F': {'mcpu': 'cortex-m4', 'fpu_unit': 'fpv4spd16'},
'Cortex-M7': {'mcpu': 'cortex-m7', 'fpu_unit': None},
'Cortex-M7F': {'mcpu': 'cortex-m7', 'fpu_unit': 'fpv4spd16'},
'Cortex-M7FD': {'mcpu': 'cortex-m7', 'fpu_unit': 'fpv5d16'},
'Cortex-A9': {'mcpu': 'cortex-a9', 'fpu_unit': 'vfpv3'}
}
# Remove options that are supplied by CDT
self.remove_option(flags['common_flags'], '-c')
self.remove_option(flags['common_flags'], '-MMD')
# As 'plan B', get the CPU from the target definition.
core = self.toolchain.target.core
opts['common']['arm.target.family'] = None
# cortex-m0, cortex-m0-small-multiply, cortex-m0plus,
# cortex-m0plus-small-multiply, cortex-m1, cortex-m1-small-multiply,
# cortex-m3, cortex-m4, cortex-m7.
str = self.find_options(flags['common_flags'], '-mcpu=')
if str != None:
opts['common']['arm.target.family'] = str[len('-mcpu='):]
self.remove_option(flags['common_flags'], str)
self.remove_option(flags['ld_flags'], str)
else:
if core not in MCPUS:
raise NotSupportedException(
'Target core {0} not supported.'.format(core))
opts['common']['arm.target.family'] = MCPUS[core]['mcpu']
opts['common']['arm.target.arch'] = 'none'
str = self.find_options(flags['common_flags'], '-march=')
arch = str[len('-march='):]
archs = {'armv6-m': 'armv6-m', 'armv7-m': 'armv7-m', 'armv7-a': 'armv7-a'}
if arch in archs:
opts['common']['arm.target.arch'] = archs[arch]
self.remove_option(flags['common_flags'], str)
opts['common']['arm.target.instructionset'] = 'thumb'
if '-mthumb' in flags['common_flags']:
self.remove_option(flags['common_flags'], '-mthumb')
self.remove_option(flags['ld_flags'], '-mthumb')
elif '-marm' in flags['common_flags']:
opts['common']['arm.target.instructionset'] = 'arm'
self.remove_option(flags['common_flags'], '-marm')
self.remove_option(flags['ld_flags'], '-marm')
opts['common']['arm.target.thumbinterwork'] = False
if '-mthumb-interwork' in flags['common_flags']:
opts['common']['arm.target.thumbinterwork'] = True
self.remove_option(flags['common_flags'], '-mthumb-interwork')
opts['common']['arm.target.endianness'] = None
if '-mlittle-endian' in flags['common_flags']:
opts['common']['arm.target.endianness'] = 'little'
self.remove_option(flags['common_flags'], '-mlittle-endian')
elif '-mbig-endian' in flags['common_flags']:
opts['common']['arm.target.endianness'] = 'big'
self.remove_option(flags['common_flags'], '-mbig-endian')
opts['common']['arm.target.fpu.unit'] = None
# default, fpv4spd16, fpv5d16, fpv5spd16
str = self.find_options(flags['common_flags'], '-mfpu=')
if str != None:
fpu = str[len('-mfpu='):]
fpus = {
'fpv4-sp-d16': 'fpv4spd16',
'fpv5-d16': 'fpv5d16',
'fpv5-sp-d16': 'fpv5spd16'
}
if fpu in fpus:
opts['common']['arm.target.fpu.unit'] = fpus[fpu]
self.remove_option(flags['common_flags'], str)
self.remove_option(flags['ld_flags'], str)
if opts['common']['arm.target.fpu.unit'] == None:
if core not in MCPUS:
raise NotSupportedException(
'Target core {0} not supported.'.format(core))
if MCPUS[core]['fpu_unit']:
opts['common'][
'arm.target.fpu.unit'] = MCPUS[core]['fpu_unit']
# soft, softfp, hard.
str = self.find_options(flags['common_flags'], '-mfloat-abi=')
if str != None:
opts['common']['arm.target.fpu.abi'] = str[
len('-mfloat-abi='):]
self.remove_option(flags['common_flags'], str)
self.remove_option(flags['ld_flags'], str)
opts['common']['arm.target.unalignedaccess'] = None
if '-munaligned-access' in flags['common_flags']:
opts['common']['arm.target.unalignedaccess'] = 'enabled'
self.remove_option(flags['common_flags'], '-munaligned-access')
elif '-mno-unaligned-access' in flags['common_flags']:
opts['common']['arm.target.unalignedaccess'] = 'disabled'
self.remove_option(flags['common_flags'], '-mno-unaligned-access')
# Default optimisation level for Release.
opts['common']['optimization.level'] = '-Os'
# If the project defines an optimisation level, it is used
# only for the Release configuration, the Debug one used '-Og'.
str = self.find_options(flags['common_flags'], '-O')
if str != None:
levels = {
'-O0': 'none', '-O1': 'optimize', '-O2': 'more',
'-O3': 'most', '-Os': 'size', '-Og': 'debug'
}
if str in levels:
opts['common']['optimization.level'] = levels[str]
self.remove_option(flags['common_flags'], str)
include_files = []
for all_flags in [flags['common_flags'], flags['c_flags'], flags['cxx_flags']]:
while '-include' in all_flags:
ix = all_flags.index('-include')
str = all_flags[ix + 1]
if str not in include_files:
include_files.append(str)
self.remove_option(all_flags, '-include')
self.remove_option(all_flags, str)
opts['common']['include_files'] = include_files
if '-ansi' in flags['c_flags']:
opts['c']['compiler.std'] = '-ansi'
self.remove_option(flags['c_flags'], str)
else:
str = self.find_options(flags['c_flags'], '-std')
std = str[len('-std='):]
c_std = {
'c90': 'c90', 'c89': 'c90', 'gnu90': 'gnu90', 'gnu89': 'gnu90',
'c99': 'c99', 'c9x': 'c99', 'gnu99': 'gnu99', 'gnu9x': 'gnu98',
'c11': 'c11', 'c1x': 'c11', 'gnu11': 'gnu11', 'gnu1x': 'gnu11'
}
if std in c_std:
opts['c']['compiler.std'] = c_std[std]
self.remove_option(flags['c_flags'], str)
if '-ansi' in flags['cxx_flags']:
opts['cpp']['compiler.std'] = '-ansi'
self.remove_option(flags['cxx_flags'], str)
else:
str = self.find_options(flags['cxx_flags'], '-std')
std = str[len('-std='):]
cpp_std = {
'c++98': 'cpp98', 'c++03': 'cpp98',
'gnu++98': 'gnucpp98', 'gnu++03': 'gnucpp98',
'c++0x': 'cpp0x', 'gnu++0x': 'gnucpp0x',
'c++11': 'cpp11', 'gnu++11': 'gnucpp11',
'c++1y': 'cpp1y', 'gnu++1y': 'gnucpp1y',
'c++14': 'cpp14', 'gnu++14': 'gnucpp14',
'c++1z': 'cpp1z', 'gnu++1z': 'gnucpp1z',
}
if std in cpp_std:
opts['cpp']['compiler.std'] = cpp_std[std]
self.remove_option(flags['cxx_flags'], str)
# Common optimisation options.
optimization_options = {
'-fmessage-length=0': 'optimization.messagelength',
'-fsigned-char': 'optimization.signedchar',
'-ffunction-sections': 'optimization.functionsections',
'-fdata-sections': 'optimization.datasections',
'-fno-common': 'optimization.nocommon',
'-fno-inline-functions': 'optimization.noinlinefunctions',
'-ffreestanding': 'optimization.freestanding',
'-fno-builtin': 'optimization.nobuiltin',
'-fsingle-precision-constant': 'optimization.spconstant',
'-fPIC': 'optimization.PIC',
'-fno-move-loop-invariants': 'optimization.nomoveloopinvariants',
}
for option in optimization_options:
opts['common'][optimization_options[option]] = False
if option in flags['common_flags']:
opts['common'][optimization_options[option]] = True
self.remove_option(flags['common_flags'], option)
# Common warning options.
warning_options = {
'-fsyntax-only': 'warnings.syntaxonly',
'-pedantic': 'warnings.pedantic',
'-pedantic-errors': 'warnings.pedanticerrors',
'-w': 'warnings.nowarn',
'-Wunused': 'warnings.unused',
'-Wuninitialized': 'warnings.uninitialized',
'-Wall': 'warnings.allwarn',
'-Wextra': 'warnings.extrawarn',
'-Wmissing-declarations': 'warnings.missingdeclaration',
'-Wconversion': 'warnings.conversion',
'-Wpointer-arith': 'warnings.pointerarith',
'-Wpadded': 'warnings.padded',
'-Wshadow': 'warnings.shadow',
'-Wlogical-op': 'warnings.logicalop',
'-Waggregate-return': 'warnings.agreggatereturn',
'-Wfloat-equal': 'warnings.floatequal',
'-Werror': 'warnings.toerrors',
}
for option in warning_options:
opts['common'][warning_options[option]] = False
if option in flags['common_flags']:
opts['common'][warning_options[option]] = True
self.remove_option(flags['common_flags'], option)
# Common debug options.
debug_levels = {
'-g': 'default',
'-g1': 'minimal',
'-g3': 'max',
}
opts['common']['debugging.level'] = 'none'
for option in debug_levels:
if option in flags['common_flags']:
opts['common'][
'debugging.level'] = debug_levels[option]
self.remove_option(flags['common_flags'], option)
debug_formats = {
'-ggdb': 'gdb',
'-gstabs': 'stabs',
'-gstabs+': 'stabsplus',
'-gdwarf-2': 'dwarf2',
'-gdwarf-3': 'dwarf3',
'-gdwarf-4': 'dwarf4',
'-gdwarf-5': 'dwarf5',
}
opts['common']['debugging.format'] = ''
for option in debug_levels:
if option in flags['common_flags']:
opts['common'][
'debugging.format'] = debug_formats[option]
self.remove_option(flags['common_flags'], option)
opts['common']['debugging.prof'] = False
if '-p' in flags['common_flags']:
opts['common']['debugging.prof'] = True
self.remove_option(flags['common_flags'], '-p')
opts['common']['debugging.gprof'] = False
if '-pg' in flags['common_flags']:
opts['common']['debugging.gprof'] = True
self.remove_option(flags['common_flags'], '-gp')
# Assembler options.
opts['as']['usepreprocessor'] = False
while '-x' in flags['asm_flags']:
ix = flags['asm_flags'].index('-x')
str = flags['asm_flags'][ix + 1]
if str == 'assembler-with-cpp':
opts['as']['usepreprocessor'] = True
else:
# Collect all other assembler options.
opts['as']['other'] += ' -x ' + str
self.remove_option(flags['asm_flags'], '-x')
self.remove_option(flags['asm_flags'], 'assembler-with-cpp')
opts['as']['nostdinc'] = False
if '-nostdinc' in flags['asm_flags']:
opts['as']['nostdinc'] = True
self.remove_option(flags['asm_flags'], '-nostdinc')
opts['as']['verbose'] = False
if '-v' in flags['asm_flags']:
opts['as']['verbose'] = True
self.remove_option(flags['asm_flags'], '-v')
# C options.
opts['c']['nostdinc'] = False
if '-nostdinc' in flags['c_flags']:
opts['c']['nostdinc'] = True
self.remove_option(flags['c_flags'], '-nostdinc')
opts['c']['verbose'] = False
if '-v' in flags['c_flags']:
opts['c']['verbose'] = True
self.remove_option(flags['c_flags'], '-v')
warning_options = {
'-Wmissing-prototypes': 'warnings.missingprototypes',
'-Wstrict-prototypes': 'warnings.strictprototypes',
'-Wbad-function-cast': 'warnings.badfunctioncast',
}
for option in warning_options:
opts['c'][warning_options[option]] = False
if option in flags['common_flags']:
opts['c'][warning_options[option]] = True
self.remove_option(flags['common_flags'], option)
# C++ options.
opts['cpp']['nostdinc'] = False
if '-nostdinc' in flags['cxx_flags']:
opts['cpp']['nostdinc'] = True
self.remove_option(flags['cxx_flags'], '-nostdinc')
opts['cpp']['nostdincpp'] = False
if '-nostdinc++' in flags['cxx_flags']:
opts['cpp']['nostdincpp'] = True
self.remove_option(flags['cxx_flags'], '-nostdinc++')
optimization_options = {
'-fno-exceptions': 'optimization.noexceptions',
'-fno-rtti': 'optimization.nortti',
'-fno-use-cxa-atexit': 'optimization.nousecxaatexit',
'-fno-threadsafe-statics': 'optimization.nothreadsafestatics',
}
for option in optimization_options:
opts['cpp'][optimization_options[option]] = False
if option in flags['cxx_flags']:
opts['cpp'][optimization_options[option]] = True
self.remove_option(flags['cxx_flags'], option)
if option in flags['common_flags']:
opts['cpp'][optimization_options[option]] = True
self.remove_option(flags['common_flags'], option)
warning_options = {
'-Wabi': 'warnabi',
'-Wctor-dtor-privacy': 'warnings.ctordtorprivacy',
'-Wnoexcept': 'warnings.noexcept',
'-Wnon-virtual-dtor': 'warnings.nonvirtualdtor',
'-Wstrict-null-sentinel': 'warnings.strictnullsentinel',
'-Wsign-promo': 'warnings.signpromo',
'-Weffc++': 'warneffc',
}
for option in warning_options:
opts['cpp'][warning_options[option]] = False
if option in flags['cxx_flags']:
opts['cpp'][warning_options[option]] = True
self.remove_option(flags['cxx_flags'], option)
if option in flags['common_flags']:
opts['cpp'][warning_options[option]] = True
self.remove_option(flags['common_flags'], option)
opts['cpp']['verbose'] = False
if '-v' in flags['cxx_flags']:
opts['cpp']['verbose'] = True
self.remove_option(flags['cxx_flags'], '-v')
# Linker options.
linker_options = {
'-nostartfiles': 'nostart',
'-nodefaultlibs': 'nodeflibs',
'-nostdlib': 'nostdlibs',
}
for option in linker_options:
opts['ld'][linker_options[option]] = False
if option in flags['ld_flags']:
opts['ld'][linker_options[option]] = True
self.remove_option(flags['ld_flags'], option)
opts['ld']['gcsections'] = False
if '-Wl,--gc-sections' in flags['ld_flags']:
opts['ld']['gcsections'] = True
self.remove_option(flags['ld_flags'], '-Wl,--gc-sections')
opts['ld']['flags'] = []
to_remove = []
for opt in flags['ld_flags']:
if opt.startswith('-Wl,--wrap,'):
opts['ld']['flags'].append(
'--wrap=' + opt[len('-Wl,--wrap,'):])
to_remove.append(opt)
for opt in to_remove:
self.remove_option(flags['ld_flags'], opt)
# Other tool remaining options are separated by category.
opts['as']['otherwarnings'] = self.find_options(
flags['asm_flags'], '-W')
opts['c']['otherwarnings'] = self.find_options(
flags['c_flags'], '-W')
opts['c']['otheroptimizations'] = self.find_options(flags[
'c_flags'], '-f')
opts['cpp']['otherwarnings'] = self.find_options(
flags['cxx_flags'], '-W')
opts['cpp']['otheroptimizations'] = self.find_options(
flags['cxx_flags'], '-f')
# Other common remaining options are separated by category.
opts['common']['optimization.other'] = self.find_options(
flags['common_flags'], '-f')
opts['common']['warnings.other'] = self.find_options(
flags['common_flags'], '-W')
# Remaining common flags are added to each tool.
opts['as']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + \
' '.join(flags['asm_flags'])
opts['c']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + ' '.join(flags['c_flags'])
opts['cpp']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + \
' '.join(flags['cxx_flags'])
opts['ld']['other'] += ' ' + \
' '.join(flags['common_flags']) + ' ' + ' '.join(flags['ld_flags'])
if len(self.system_libraries) > 0:
opts['ld']['other'] += ' -Wl,--start-group '
opts['ld'][
'other'] += ' '.join('-l' + s for s in self.system_libraries)
opts['ld']['other'] += ' -Wl,--end-group '
# Strip all 'other' flags, since they might have leading spaces.
opts['as']['other'] = opts['as']['other'].strip()
opts['c']['other'] = opts['c']['other'].strip()
opts['cpp']['other'] = opts['cpp']['other'].strip()
opts['ld']['other'] = opts['ld']['other'].strip()
if False:
print
print opts
print
print 'common_flags', flags['common_flags']
print 'asm_flags', flags['asm_flags']
print 'c_flags', flags['c_flags']
print 'cxx_flags', flags['cxx_flags']
print 'ld_flags', flags['ld_flags']
@staticmethod
def find_options(lst, option):
tmp = [str for str in lst if str.startswith(option)]
if len(tmp) > 0:
return tmp[0]
else:
return None
@staticmethod
def find_options(lst, prefix):
other = ''
opts = [str for str in lst if str.startswith(prefix)]
if len(opts) > 0:
for opt in opts:
other += ' ' + opt
GNUARMEclipse.remove_option(lst, opt)
return other.strip()
@staticmethod
def remove_option(lst, option):
if option in lst:
lst.remove(option)
# =============================================================================
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from oslo_concurrency import processutils
from cinder.brick import exception
from cinder.brick.local_dev import lvm as brick
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
return configuration
class BrickLvmTestCase(test.TestCase):
def setUp(self):
self._mox = mox.Mox()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.volume_group_name = 'fake-vg'
super(BrickLvmTestCase, self).setUp()
#Stub processutils.execute for static methods
self.stubs.Set(processutils, 'execute',
self.fake_execute)
self.vg = brick.LVM(self.configuration.volume_group_name,
'sudo',
False, None,
'default',
self.fake_execute)
def failed_fake_execute(obj, *cmd, **kwargs):
return ("\n", "fake-error")
def fake_pretend_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.03.00 (2012-03-06)\n", "")
def fake_old_lvm_version(obj, *cmd, **kwargs):
# Does not support thin prov or snap activation
return (" LVM version: 2.02.65(2) (2012-03-06)\n", "")
def fake_customised_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "")
def fake_execute(obj, *cmd, **kwargs):
cmd_string = ', '.join(cmd)
data = "\n"
if ('env, LC_ALL=C, vgs, --noheadings, --unit=g, -o, name' ==
cmd_string):
data = " fake-vg\n"
data += " some-other-vg\n"
elif ('env, LC_ALL=C, vgs, --noheadings, -o, name, fake-vg' ==
cmd_string):
data = " fake-vg\n"
elif 'env, LC_ALL=C, vgs, --version' in cmd_string:
data = " LVM version: 2.02.95(2) (2012-03-06)\n"
elif ('env, LC_ALL=C, vgs, --noheadings, -o uuid, fake-vg' in
cmd_string):
data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
elif 'env, LC_ALL=C, vgs, --noheadings, --unit=g, ' \
'-o, name,size,free,lv_count,uuid, ' \
'--separator, :, --nosuffix' in cmd_string:
data = (" test-prov-cap-vg-unit:10.00:10.00:0:"
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
if 'test-prov-cap-vg-unit' in cmd_string:
return (data, "")
data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:"
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
if 'test-prov-cap-vg-no-unit' in cmd_string:
return (data, "")
data = " fake-vg:10.00:10.00:0:"\
"kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
if 'fake-vg' in cmd_string:
return (data, "")
data += " fake-vg-2:10.00:10.00:0:"\
"lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n"
data += " fake-vg-3:10.00:10.00:0:"\
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n"
elif ('env, LC_ALL=C, lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size, --nosuffix, '
'fake-vg/lv-nothere' in cmd_string):
raise processutils.ProcessExecutionError(
stderr="One or more specified logical volume(s) not found.")
elif ('env, LC_ALL=C, lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size' in cmd_string):
if 'fake-unknown' in cmd_string:
raise processutils.ProcessExecutionError(
stderr="One or more volume(s) not found."
)
if 'test-prov-cap-vg-unit' in cmd_string:
data = " fake-vg test-prov-cap-pool-unit 9.50g\n"
data += " fake-vg fake-volume-1 1.00g\n"
data += " fake-vg fake-volume-2 2.00g\n"
elif 'test-prov-cap-vg-no-unit' in cmd_string:
data = " fake-vg test-prov-cap-pool-no-unit 9.50\n"
data += " fake-vg fake-volume-1 1.00\n"
data += " fake-vg fake-volume-2 2.00\n"
elif 'test-found-lv-name' in cmd_string:
data = " fake-vg test-found-lv-name 9.50\n"
else:
data = " fake-vg fake-1 1.00g\n"
data += " fake-vg fake-2 1.00g\n"
elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Attr' in
cmd_string):
if 'test-volumes' in cmd_string:
data = ' wi-a-'
else:
data = ' owi-a-'
elif 'env, LC_ALL=C, pvs, --noheadings' in cmd_string:
data = " fake-vg|/dev/sda|10.00|1.00\n"
data += " fake-vg|/dev/sdb|10.00|1.00\n"
data += " fake-vg|/dev/sdc|10.00|8.99\n"
data += " fake-vg-2|/dev/sdd|10.00|9.99\n"
elif 'env, LC_ALL=C, lvs, --noheadings, --unit=g' \
', -o, size,data_percent, --separator, :' in cmd_string:
if 'test-prov-cap-pool' in cmd_string:
data = " 9.5:20\n"
else:
data = " 9:12\n"
elif 'lvcreate, -T, -L, ' in cmd_string:
pass
elif 'lvcreate, -T, -V, ' in cmd_string:
pass
elif 'lvcreate, --name, ' in cmd_string:
pass
else:
raise AssertionError('unexpected command called: %s' % cmd_string)
return (data, "")
def test_create_lv_snapshot(self):
self.assertEqual(self.vg.create_lv_snapshot('snapshot-1', 'fake-1'),
None)
self._mox.StubOutWithMock(self.vg, 'get_volume')
self.vg.get_volume('fake-non-existent').AndReturn(None)
self._mox.ReplayAll()
try:
self.vg.create_lv_snapshot('snapshot-1', 'fake-non-existent')
except exception.VolumeDeviceNotFound as e:
self.assertEqual(e.kwargs['device'], 'fake-non-existent')
else:
self.fail("Exception not raised")
def test_vg_exists(self):
self.assertEqual(self.vg._vg_exists(), True)
def test_get_vg_uuid(self):
self.assertEqual(self.vg._get_vg_uuid()[0],
'kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1')
def test_get_all_volumes(self):
out = self.vg.get_volumes()
self.assertEqual(out[0]['name'], 'fake-1')
self.assertEqual(out[0]['size'], '1.00g')
self.assertEqual(out[0]['vg'], 'fake-vg')
def test_get_volume(self):
self.assertEqual(self.vg.get_volume('fake-1')['name'], 'fake-1')
def test_get_volume_none(self):
self.assertEqual(self.vg.get_volume('fake-unknown'), None)
def test_get_lv_info_notfound(self):
self.assertEqual(
[],
self.vg.get_lv_info(
'sudo', vg_name='fake-vg', lv_name='lv-nothere')
)
def test_get_lv_info_found(self):
lv_info = [{'size': '9.50', 'name': 'test-found-lv-name',
'vg': 'fake-vg'}]
self.assertEqual(
lv_info,
self.vg.get_lv_info(
'sudo', vg_name='fake-vg',
lv_name='test-found-lv-name')
)
def test_get_lv_info_no_lv_name(self):
lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'},
{'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}]
self.assertEqual(
lv_info,
self.vg.get_lv_info(
'sudo', vg_name='fake-vg')
)
def test_get_all_physical_volumes(self):
# Filtered VG version
pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg')
self.assertEqual(len(pvs), 3)
# Non-Filtered, all VG's
pvs = self.vg.get_all_physical_volumes('sudo')
self.assertEqual(len(pvs), 4)
def test_get_physical_volumes(self):
pvs = self.vg.get_physical_volumes()
self.assertEqual(len(pvs), 3)
def test_get_volume_groups(self):
self.assertEqual(len(self.vg.get_all_volume_groups('sudo')), 3)
self.assertEqual(len(self.vg.get_all_volume_groups('sudo',
'fake-vg')), 1)
def test_thin_support(self):
# lvm.supports_thin() is a static method and doesn't
# use the self._executor fake we pass in on init
# so we need to stub processutils.execute appropriately
self.stubs.Set(processutils, 'execute', self.fake_execute)
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version)
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version)
self.assertFalse(self.vg.supports_thin_provisioning('sudo'))
self.stubs.Set(processutils,
'execute',
self.fake_customised_lvm_version)
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
def test_snapshot_lv_activate_support(self):
self.vg._supports_snapshot_lv_activation = None
self.stubs.Set(processutils, 'execute', self.fake_execute)
self.assertTrue(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version)
self.assertFalse(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
def test_lvchange_ignskipact_support_yes(self):
"""Tests if lvchange -K is available via a lvm2 version check."""
self.vg._supports_lvchange_ignoreskipactivation = None
self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version)
self.assertTrue(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version)
self.assertFalse(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
def test_thin_pool_creation(self):
# The size of fake-vg volume group is 10g, so the calculated thin
# pool size should be 9.5g (95% of 10g).
self.assertEqual("9.5g", self.vg.create_thin_pool())
# Passing a size parameter should result in a thin pool of that exact
# size.
for size in ("1g", "1.2g", "1.75g"):
self.assertEqual(size, self.vg.create_thin_pool(size_str=size))
def test_thin_pool_provisioned_capacity(self):
self.vg.vg_thin_pool = "test-prov-cap-pool-unit"
self.vg.vg_name = 'test-prov-cap-vg-unit'
self.assertEqual(
"9.5g",
self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
self.assertEqual("9.50", self.vg.vg_thin_pool_size)
self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit"
self.vg.vg_name = 'test-prov-cap-vg-no-unit'
self.assertEqual(
"9.5g",
self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
self.assertEqual("9.50", self.vg.vg_thin_pool_size)
self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
def test_thin_pool_free_space(self):
# The size of fake-vg-pool is 9g and the allocated data sums up to
# 12% so the calculated free space should be 7.92
self.assertEqual(float("7.92"),
self.vg._get_thin_pool_free_space("fake-vg",
"fake-vg-pool"))
def test_volume_create_after_thin_creation(self):
"""Test self.vg.vg_thin_pool is set to pool_name
See bug #1220286 for more info.
"""
vg_name = "vg-name"
pool_name = vg_name + "-pool"
pool_path = "%s/%s" % (vg_name, pool_name)
def executor(obj, *cmd, **kwargs):
self.assertEqual(pool_path, cmd[-1])
self.vg._executor = executor
self.vg.create_thin_pool(pool_name, "1G")
self.vg.create_volume("test", "1G", lv_type='thin')
self.assertEqual(self.vg.vg_thin_pool, pool_name)
def test_lv_has_snapshot(self):
self.assertTrue(self.vg.lv_has_snapshot('fake-vg'))
self.assertFalse(self.vg.lv_has_snapshot('test-volumes'))
def test_activate_lv(self):
self._mox.StubOutWithMock(self.vg, '_execute')
self.vg._supports_lvchange_ignoreskipactivation = True
self.vg._execute('lvchange', '-a', 'y', '--yes', '-K',
'fake-vg/my-lv',
root_helper='sudo', run_as_root=True)
self._mox.ReplayAll()
self.vg.activate_lv('my-lv')
self._mox.VerifyAll()
def test_get_mirrored_available_capacity(self):
self.assertEqual(self.vg.vg_mirror_free_space(1), 2.0)
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'DataBaseGUIdesign.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_DataBaseGUI(object):
def setupUi(self, DataBaseGUI):
DataBaseGUI.setObjectName(_fromUtf8("DataBaseGUI"))
DataBaseGUI.resize(2265, 1523)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(DataBaseGUI.sizePolicy().hasHeightForWidth())
DataBaseGUI.setSizePolicy(sizePolicy)
self.centralwidget = QtGui.QWidget(DataBaseGUI)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.splitter = QtGui.QSplitter(self.centralwidget)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.widget = QtGui.QWidget(self.splitter)
self.widget.setObjectName(_fromUtf8("widget"))
self.verticalLayout_8 = QtGui.QVBoxLayout(self.widget)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.gridLayout_18 = QtGui.QGridLayout()
self.gridLayout_18.setObjectName(_fromUtf8("gridLayout_18"))
self.EndDateEdit = QtGui.QDateEdit(self.widget)
self.EndDateEdit.setDate(QtCore.QDate(2017, 12, 31))
self.EndDateEdit.setObjectName(_fromUtf8("EndDateEdit"))
self.gridLayout_18.addWidget(self.EndDateEdit, 0, 3, 1, 1)
self.StartDateLabel_8 = QtGui.QLabel(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartDateLabel_8.sizePolicy().hasHeightForWidth())
self.StartDateLabel_8.setSizePolicy(sizePolicy)
self.StartDateLabel_8.setObjectName(_fromUtf8("StartDateLabel_8"))
self.gridLayout_18.addWidget(self.StartDateLabel_8, 0, 0, 1, 1)
self.StartDateLabel_7 = QtGui.QLabel(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartDateLabel_7.sizePolicy().hasHeightForWidth())
self.StartDateLabel_7.setSizePolicy(sizePolicy)
self.StartDateLabel_7.setObjectName(_fromUtf8("StartDateLabel_7"))
self.gridLayout_18.addWidget(self.StartDateLabel_7, 0, 2, 1, 1)
self.StartDateEdit = QtGui.QDateEdit(self.widget)
self.StartDateEdit.setDate(QtCore.QDate(2017, 1, 1))
self.StartDateEdit.setObjectName(_fromUtf8("StartDateEdit"))
self.gridLayout_18.addWidget(self.StartDateEdit, 0, 1, 1, 1)
self.verticalLayout_8.addLayout(self.gridLayout_18)
self.FilterPlotDatabasePushButton = QtGui.QPushButton(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.FilterPlotDatabasePushButton.sizePolicy().hasHeightForWidth())
self.FilterPlotDatabasePushButton.setSizePolicy(sizePolicy)
self.FilterPlotDatabasePushButton.setObjectName(_fromUtf8("FilterPlotDatabasePushButton"))
self.verticalLayout_8.addWidget(self.FilterPlotDatabasePushButton)
self.line = QtGui.QFrame(self.widget)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayout_8.addWidget(self.line)
self.tabWidget_8 = QtGui.QTabWidget(self.widget)
self.tabWidget_8.setObjectName(_fromUtf8("tabWidget_8"))
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.verticalLayout_32 = QtGui.QVBoxLayout(self.tab_4)
self.verticalLayout_32.setObjectName(_fromUtf8("verticalLayout_32"))
self.gridLayout_9 = QtGui.QGridLayout()
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.ReadFilePathWidget = QtGui.QLineEdit(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
self.ReadFilePathWidget.setFont(font)
self.ReadFilePathWidget.setObjectName(_fromUtf8("ReadFilePathWidget"))
self.gridLayout_9.addWidget(self.ReadFilePathWidget, 0, 1, 1, 1)
self.NewFilePushButton = QtGui.QPushButton(self.tab_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.NewFilePushButton.sizePolicy().hasHeightForWidth())
self.NewFilePushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.NewFilePushButton.setFont(font)
self.NewFilePushButton.setObjectName(_fromUtf8("NewFilePushButton"))
self.gridLayout_9.addWidget(self.NewFilePushButton, 0, 0, 1, 1)
self.SaveFilePathWidget = QtGui.QLineEdit(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
self.SaveFilePathWidget.setFont(font)
self.SaveFilePathWidget.setObjectName(_fromUtf8("SaveFilePathWidget"))
self.gridLayout_9.addWidget(self.SaveFilePathWidget, 1, 1, 1, 1)
self.SaveDataPushButton = QtGui.QPushButton(self.tab_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SaveDataPushButton.sizePolicy().hasHeightForWidth())
self.SaveDataPushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.SaveDataPushButton.setFont(font)
self.SaveDataPushButton.setObjectName(_fromUtf8("SaveDataPushButton"))
self.gridLayout_9.addWidget(self.SaveDataPushButton, 1, 0, 1, 1)
self.verticalLayout_32.addLayout(self.gridLayout_9)
self.gridLayout_14 = QtGui.QGridLayout()
self.gridLayout_14.setObjectName(_fromUtf8("gridLayout_14"))
self.Label_10 = QtGui.QLabel(self.tab_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_10.sizePolicy().hasHeightForWidth())
self.Label_10.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.Label_10.setFont(font)
self.Label_10.setObjectName(_fromUtf8("Label_10"))
self.gridLayout_14.addWidget(self.Label_10, 1, 0, 1, 1)
self.Label_9 = QtGui.QLabel(self.tab_4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_9.sizePolicy().hasHeightForWidth())
self.Label_9.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.Label_9.setFont(font)
self.Label_9.setObjectName(_fromUtf8("Label_9"))
self.gridLayout_14.addWidget(self.Label_9, 0, 0, 1, 1)
self.DatabaseRowsSpinBox = QtGui.QSpinBox(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
self.DatabaseRowsSpinBox.setFont(font)
self.DatabaseRowsSpinBox.setMaximum(100000)
self.DatabaseRowsSpinBox.setProperty("value", 50)
self.DatabaseRowsSpinBox.setObjectName(_fromUtf8("DatabaseRowsSpinBox"))
self.gridLayout_14.addWidget(self.DatabaseRowsSpinBox, 1, 1, 1, 1)
self.DatabaseSizeSpinBox = QtGui.QSpinBox(self.tab_4)
font = QtGui.QFont()
font.setPointSize(8)
self.DatabaseSizeSpinBox.setFont(font)
self.DatabaseSizeSpinBox.setReadOnly(True)
self.DatabaseSizeSpinBox.setMaximum(100000)
self.DatabaseSizeSpinBox.setProperty("value", 50)
self.DatabaseSizeSpinBox.setObjectName(_fromUtf8("DatabaseSizeSpinBox"))
self.gridLayout_14.addWidget(self.DatabaseSizeSpinBox, 0, 1, 1, 1)
self.verticalLayout_32.addLayout(self.gridLayout_14)
self.Table1Widget = QtGui.QTableWidget(self.tab_4)
self.Table1Widget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.Table1Widget.setObjectName(_fromUtf8("Table1Widget"))
self.Table1Widget.setColumnCount(0)
self.Table1Widget.setRowCount(0)
self.Table1Widget.horizontalHeader().setSortIndicatorShown(False)
self.Table1Widget.verticalHeader().setSortIndicatorShown(False)
self.verticalLayout_32.addWidget(self.Table1Widget)
self.tabWidget_8.addTab(self.tab_4, _fromUtf8(""))
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.verticalLayout_7 = QtGui.QVBoxLayout(self.tab_3)
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.SportsListWidget = QtGui.QListWidget(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SportsListWidget.sizePolicy().hasHeightForWidth())
self.SportsListWidget.setSizePolicy(sizePolicy)
self.SportsListWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.SportsListWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.SportsListWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.SportsListWidget.setObjectName(_fromUtf8("SportsListWidget"))
self.gridLayout_3.addWidget(self.SportsListWidget, 1, 0, 1, 1)
self.GearListWidget = QtGui.QListWidget(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.GearListWidget.sizePolicy().hasHeightForWidth())
self.GearListWidget.setSizePolicy(sizePolicy)
self.GearListWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.GearListWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.GearListWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.GearListWidget.setObjectName(_fromUtf8("GearListWidget"))
self.gridLayout_3.addWidget(self.GearListWidget, 1, 3, 1, 1)
self.ActivitiesListWidget = QtGui.QListWidget(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ActivitiesListWidget.sizePolicy().hasHeightForWidth())
self.ActivitiesListWidget.setSizePolicy(sizePolicy)
self.ActivitiesListWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.ActivitiesListWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.ActivitiesListWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.ActivitiesListWidget.setObjectName(_fromUtf8("ActivitiesListWidget"))
self.gridLayout_3.addWidget(self.ActivitiesListWidget, 1, 2, 1, 1)
self.ActivitiesLabel = QtGui.QLabel(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ActivitiesLabel.sizePolicy().hasHeightForWidth())
self.ActivitiesLabel.setSizePolicy(sizePolicy)
self.ActivitiesLabel.setAlignment(QtCore.Qt.AlignCenter)
self.ActivitiesLabel.setObjectName(_fromUtf8("ActivitiesLabel"))
self.gridLayout_3.addWidget(self.ActivitiesLabel, 0, 2, 1, 1)
self.SportsLabel = QtGui.QLabel(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SportsLabel.sizePolicy().hasHeightForWidth())
self.SportsLabel.setSizePolicy(sizePolicy)
self.SportsLabel.setAlignment(QtCore.Qt.AlignCenter)
self.SportsLabel.setObjectName(_fromUtf8("SportsLabel"))
self.gridLayout_3.addWidget(self.SportsLabel, 0, 0, 1, 1)
self.GearLabel = QtGui.QLabel(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.GearLabel.sizePolicy().hasHeightForWidth())
self.GearLabel.setSizePolicy(sizePolicy)
self.GearLabel.setAlignment(QtCore.Qt.AlignCenter)
self.GearLabel.setObjectName(_fromUtf8("GearLabel"))
self.gridLayout_3.addWidget(self.GearLabel, 0, 3, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_3)
self.line_2 = QtGui.QFrame(self.tab_3)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.verticalLayout_7.addWidget(self.line_2)
self.gridLayout_7 = QtGui.QGridLayout()
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.EndLocationLabel = QtGui.QLabel(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.EndLocationLabel.sizePolicy().hasHeightForWidth())
self.EndLocationLabel.setSizePolicy(sizePolicy)
self.EndLocationLabel.setAlignment(QtCore.Qt.AlignCenter)
self.EndLocationLabel.setObjectName(_fromUtf8("EndLocationLabel"))
self.gridLayout_7.addWidget(self.EndLocationLabel, 2, 2, 1, 1)
self.EndLocationListWidget = QtGui.QListWidget(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.EndLocationListWidget.sizePolicy().hasHeightForWidth())
self.EndLocationListWidget.setSizePolicy(sizePolicy)
self.EndLocationListWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.EndLocationListWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.EndLocationListWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.EndLocationListWidget.setObjectName(_fromUtf8("EndLocationListWidget"))
self.gridLayout_7.addWidget(self.EndLocationListWidget, 3, 2, 1, 1)
self.StartLocationLabel = QtGui.QLabel(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartLocationLabel.sizePolicy().hasHeightForWidth())
self.StartLocationLabel.setSizePolicy(sizePolicy)
self.StartLocationLabel.setAlignment(QtCore.Qt.AlignCenter)
self.StartLocationLabel.setObjectName(_fromUtf8("StartLocationLabel"))
self.gridLayout_7.addWidget(self.StartLocationLabel, 2, 0, 1, 1)
self.StartLocationListWidget = QtGui.QListWidget(self.tab_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartLocationListWidget.sizePolicy().hasHeightForWidth())
self.StartLocationListWidget.setSizePolicy(sizePolicy)
self.StartLocationListWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.StartLocationListWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.StartLocationListWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.StartLocationListWidget.setObjectName(_fromUtf8("StartLocationListWidget"))
self.gridLayout_7.addWidget(self.StartLocationListWidget, 3, 0, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_7)
self.tabWidget_8.addTab(self.tab_3, _fromUtf8(""))
self.tab_5 = QtGui.QWidget()
self.tab_5.setObjectName(_fromUtf8("tab_5"))
self.verticalLayout = QtGui.QVBoxLayout(self.tab_5)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.SIUnitsPushButton = QtGui.QPushButton(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SIUnitsPushButton.sizePolicy().hasHeightForWidth())
self.SIUnitsPushButton.setSizePolicy(sizePolicy)
self.SIUnitsPushButton.setObjectName(_fromUtf8("SIUnitsPushButton"))
self.verticalLayout.addWidget(self.SIUnitsPushButton)
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.XLabel1_3 = QtGui.QLabel(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.XLabel1_3.sizePolicy().hasHeightForWidth())
self.XLabel1_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.XLabel1_3.setFont(font)
self.XLabel1_3.setObjectName(_fromUtf8("XLabel1_3"))
self.gridLayout_4.addWidget(self.XLabel1_3, 1, 0, 1, 1)
self.SpeedUnitsComboBox = QtGui.QComboBox(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SpeedUnitsComboBox.sizePolicy().hasHeightForWidth())
self.SpeedUnitsComboBox.setSizePolicy(sizePolicy)
self.SpeedUnitsComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.SpeedUnitsComboBox.setFont(font)
self.SpeedUnitsComboBox.setDuplicatesEnabled(False)
self.SpeedUnitsComboBox.setObjectName(_fromUtf8("SpeedUnitsComboBox"))
self.gridLayout_4.addWidget(self.SpeedUnitsComboBox, 4, 1, 1, 1)
self.DistanceUnitsComboBox = QtGui.QComboBox(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.DistanceUnitsComboBox.sizePolicy().hasHeightForWidth())
self.DistanceUnitsComboBox.setSizePolicy(sizePolicy)
self.DistanceUnitsComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.DistanceUnitsComboBox.setFont(font)
self.DistanceUnitsComboBox.setDuplicatesEnabled(False)
self.DistanceUnitsComboBox.setObjectName(_fromUtf8("DistanceUnitsComboBox"))
self.gridLayout_4.addWidget(self.DistanceUnitsComboBox, 3, 1, 1, 1)
self.SizeLabel_2 = QtGui.QLabel(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SizeLabel_2.sizePolicy().hasHeightForWidth())
self.SizeLabel_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.SizeLabel_2.setFont(font)
self.SizeLabel_2.setObjectName(_fromUtf8("SizeLabel_2"))
self.gridLayout_4.addWidget(self.SizeLabel_2, 3, 0, 1, 1)
self.PositionUnitsComboBox = QtGui.QComboBox(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PositionUnitsComboBox.sizePolicy().hasHeightForWidth())
self.PositionUnitsComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.PositionUnitsComboBox.setFont(font)
self.PositionUnitsComboBox.setObjectName(_fromUtf8("PositionUnitsComboBox"))
self.gridLayout_4.addWidget(self.PositionUnitsComboBox, 1, 1, 1, 1)
self.LegendLabel_13 = QtGui.QLabel(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LegendLabel_13.sizePolicy().hasHeightForWidth())
self.LegendLabel_13.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.LegendLabel_13.setFont(font)
self.LegendLabel_13.setObjectName(_fromUtf8("LegendLabel_13"))
self.gridLayout_4.addWidget(self.LegendLabel_13, 4, 0, 1, 1)
self.TimeUnitsComboBox = QtGui.QComboBox(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TimeUnitsComboBox.sizePolicy().hasHeightForWidth())
self.TimeUnitsComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.TimeUnitsComboBox.setFont(font)
self.TimeUnitsComboBox.setObjectName(_fromUtf8("TimeUnitsComboBox"))
self.gridLayout_4.addWidget(self.TimeUnitsComboBox, 0, 1, 1, 1)
self.XLabel1_2 = QtGui.QLabel(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.XLabel1_2.sizePolicy().hasHeightForWidth())
self.XLabel1_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.XLabel1_2.setFont(font)
self.XLabel1_2.setObjectName(_fromUtf8("XLabel1_2"))
self.gridLayout_4.addWidget(self.XLabel1_2, 0, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_4)
self.UpdateUnitsPushButton = QtGui.QPushButton(self.tab_5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.UpdateUnitsPushButton.sizePolicy().hasHeightForWidth())
self.UpdateUnitsPushButton.setSizePolicy(sizePolicy)
self.UpdateUnitsPushButton.setObjectName(_fromUtf8("UpdateUnitsPushButton"))
self.verticalLayout.addWidget(self.UpdateUnitsPushButton)
self.tabWidget_8.addTab(self.tab_5, _fromUtf8(""))
self.verticalLayout_8.addWidget(self.tabWidget_8)
self.tabWidget = QtGui.QTabWidget(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tabWidget.sizePolicy().hasHeightForWidth())
self.tabWidget.setSizePolicy(sizePolicy)
self.tabWidget.setMinimumSize(QtCore.QSize(1100, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.tabWidget.setFont(font)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.tab_2)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.gridLayout_23 = QtGui.QGridLayout()
self.gridLayout_23.setObjectName(_fromUtf8("gridLayout_23"))
self.MapCheckBox = QtGui.QCheckBox(self.tab_2)
font = QtGui.QFont()
font.setPointSize(8)
self.MapCheckBox.setFont(font)
self.MapCheckBox.setObjectName(_fromUtf8("MapCheckBox"))
self.gridLayout_23.addWidget(self.MapCheckBox, 0, 1, 1, 1)
self.PlotMapPushButton = QtGui.QPushButton(self.tab_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PlotMapPushButton.sizePolicy().hasHeightForWidth())
self.PlotMapPushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.PlotMapPushButton.setFont(font)
self.PlotMapPushButton.setObjectName(_fromUtf8("PlotMapPushButton"))
self.gridLayout_23.addWidget(self.PlotMapPushButton, 0, 0, 1, 1)
self.verticalLayout_6.addLayout(self.gridLayout_23)
self.gridLayout_38 = QtGui.QGridLayout()
self.gridLayout_38.setObjectName(_fromUtf8("gridLayout_38"))
self.Label_11 = QtGui.QLabel(self.tab_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Label_11.sizePolicy().hasHeightForWidth())
self.Label_11.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.Label_11.setFont(font)
self.Label_11.setObjectName(_fromUtf8("Label_11"))
self.gridLayout_38.addWidget(self.Label_11, 0, 0, 1, 1)
self.MapActivitiesSpinBox = QtGui.QSpinBox(self.tab_2)
font = QtGui.QFont()
font.setPointSize(8)
self.MapActivitiesSpinBox.setFont(font)
self.MapActivitiesSpinBox.setMaximum(100000)
self.MapActivitiesSpinBox.setProperty("value", 5)
self.MapActivitiesSpinBox.setObjectName(_fromUtf8("MapActivitiesSpinBox"))
self.gridLayout_38.addWidget(self.MapActivitiesSpinBox, 0, 1, 1, 1)
self.LegendLabel_17 = QtGui.QLabel(self.tab_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LegendLabel_17.sizePolicy().hasHeightForWidth())
self.LegendLabel_17.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.LegendLabel_17.setFont(font)
self.LegendLabel_17.setObjectName(_fromUtf8("LegendLabel_17"))
self.gridLayout_38.addWidget(self.LegendLabel_17, 0, 2, 1, 1)
self.MapLegendComboBox = QtGui.QComboBox(self.tab_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MapLegendComboBox.sizePolicy().hasHeightForWidth())
self.MapLegendComboBox.setSizePolicy(sizePolicy)
self.MapLegendComboBox.setMinimumSize(QtCore.QSize(0, 0))
self.MapLegendComboBox.setDuplicatesEnabled(False)
self.MapLegendComboBox.setObjectName(_fromUtf8("MapLegendComboBox"))
self.gridLayout_38.addWidget(self.MapLegendComboBox, 0, 3, 1, 1)
self.verticalLayout_6.addLayout(self.gridLayout_38)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.SaveMapPushButton = QtGui.QPushButton(self.tab_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SaveMapPushButton.sizePolicy().hasHeightForWidth())
self.SaveMapPushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.SaveMapPushButton.setFont(font)
self.SaveMapPushButton.setObjectName(_fromUtf8("SaveMapPushButton"))
self.gridLayout.addWidget(self.SaveMapPushButton, 0, 1, 1, 1)
self.MapFilePathWidget = QtGui.QLineEdit(self.tab_2)
font = QtGui.QFont()
font.setPointSize(8)
self.MapFilePathWidget.setFont(font)
self.MapFilePathWidget.setObjectName(_fromUtf8("MapFilePathWidget"))
self.gridLayout.addWidget(self.MapFilePathWidget, 0, 2, 1, 1)
self.verticalLayout_6.addLayout(self.gridLayout)
self.MapWidgetContainer = QtGui.QVBoxLayout()
self.MapWidgetContainer.setObjectName(_fromUtf8("MapWidgetContainer"))
self.verticalLayout_6.addLayout(self.MapWidgetContainer)
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.Tab1 = QtGui.QWidget()
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Tab1.sizePolicy().hasHeightForWidth())
self.Tab1.setSizePolicy(sizePolicy)
self.Tab1.setObjectName(_fromUtf8("Tab1"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.Tab1)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.ScatterXComboBox = QtGui.QComboBox(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ScatterXComboBox.sizePolicy().hasHeightForWidth())
self.ScatterXComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.ScatterXComboBox.setFont(font)
self.ScatterXComboBox.setObjectName(_fromUtf8("ScatterXComboBox"))
self.gridLayout_2.addWidget(self.ScatterXComboBox, 0, 1, 1, 1)
self.LegendLabel = QtGui.QLabel(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LegendLabel.sizePolicy().hasHeightForWidth())
self.LegendLabel.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.LegendLabel.setFont(font)
self.LegendLabel.setObjectName(_fromUtf8("LegendLabel"))
self.gridLayout_2.addWidget(self.LegendLabel, 1, 2, 1, 1)
self.ScatterLegendComboBox = QtGui.QComboBox(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ScatterLegendComboBox.sizePolicy().hasHeightForWidth())
self.ScatterLegendComboBox.setSizePolicy(sizePolicy)
self.ScatterLegendComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.ScatterLegendComboBox.setFont(font)
self.ScatterLegendComboBox.setDuplicatesEnabled(False)
self.ScatterLegendComboBox.setObjectName(_fromUtf8("ScatterLegendComboBox"))
self.gridLayout_2.addWidget(self.ScatterLegendComboBox, 1, 3, 1, 1)
self.XLabel1 = QtGui.QLabel(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.XLabel1.sizePolicy().hasHeightForWidth())
self.XLabel1.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.XLabel1.setFont(font)
self.XLabel1.setObjectName(_fromUtf8("XLabel1"))
self.gridLayout_2.addWidget(self.XLabel1, 0, 0, 1, 1)
self.SizeLabel = QtGui.QLabel(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.SizeLabel.sizePolicy().hasHeightForWidth())
self.SizeLabel.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.SizeLabel.setFont(font)
self.SizeLabel.setObjectName(_fromUtf8("SizeLabel"))
self.gridLayout_2.addWidget(self.SizeLabel, 1, 0, 1, 1)
self.ScatterSizeComboBox = QtGui.QComboBox(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ScatterSizeComboBox.sizePolicy().hasHeightForWidth())
self.ScatterSizeComboBox.setSizePolicy(sizePolicy)
self.ScatterSizeComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.ScatterSizeComboBox.setFont(font)
self.ScatterSizeComboBox.setDuplicatesEnabled(False)
self.ScatterSizeComboBox.setObjectName(_fromUtf8("ScatterSizeComboBox"))
self.gridLayout_2.addWidget(self.ScatterSizeComboBox, 1, 1, 1, 1)
self.ScatterYComboBox = QtGui.QComboBox(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ScatterYComboBox.sizePolicy().hasHeightForWidth())
self.ScatterYComboBox.setSizePolicy(sizePolicy)
self.ScatterYComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.ScatterYComboBox.setFont(font)
self.ScatterYComboBox.setDuplicatesEnabled(False)
self.ScatterYComboBox.setObjectName(_fromUtf8("ScatterYComboBox"))
self.gridLayout_2.addWidget(self.ScatterYComboBox, 0, 3, 1, 1)
self.YLabel1 = QtGui.QLabel(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.YLabel1.sizePolicy().hasHeightForWidth())
self.YLabel1.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.YLabel1.setFont(font)
self.YLabel1.setObjectName(_fromUtf8("YLabel1"))
self.gridLayout_2.addWidget(self.YLabel1, 0, 2, 1, 1)
self.LegendLabel_12 = QtGui.QLabel(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LegendLabel_12.sizePolicy().hasHeightForWidth())
self.LegendLabel_12.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.LegendLabel_12.setFont(font)
self.LegendLabel_12.setObjectName(_fromUtf8("LegendLabel_12"))
self.gridLayout_2.addWidget(self.LegendLabel_12, 2, 0, 1, 1)
self.ScatterCMapComboBox = QtGui.QComboBox(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ScatterCMapComboBox.sizePolicy().hasHeightForWidth())
self.ScatterCMapComboBox.setSizePolicy(sizePolicy)
self.ScatterCMapComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.ScatterCMapComboBox.setFont(font)
self.ScatterCMapComboBox.setDuplicatesEnabled(False)
self.ScatterCMapComboBox.setObjectName(_fromUtf8("ScatterCMapComboBox"))
self.gridLayout_2.addWidget(self.ScatterCMapComboBox, 2, 1, 1, 1)
self.ScatterPushButton = QtGui.QPushButton(self.Tab1)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ScatterPushButton.sizePolicy().hasHeightForWidth())
self.ScatterPushButton.setSizePolicy(sizePolicy)
self.ScatterPushButton.setObjectName(_fromUtf8("ScatterPushButton"))
self.gridLayout_2.addWidget(self.ScatterPushButton, 2, 3, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_2)
self.PlotScatterWidgetContainer = QtGui.QVBoxLayout()
self.PlotScatterWidgetContainer.setObjectName(_fromUtf8("PlotScatterWidgetContainer"))
self.verticalLayout_3.addLayout(self.PlotScatterWidgetContainer)
self.tabWidget.addTab(self.Tab1, _fromUtf8(""))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.tab)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.gridLayout_20 = QtGui.QGridLayout()
self.gridLayout_20.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.gridLayout_20.setObjectName(_fromUtf8("gridLayout_20"))
self.XLabel1_5 = QtGui.QLabel(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.XLabel1_5.sizePolicy().hasHeightForWidth())
self.XLabel1_5.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.XLabel1_5.setFont(font)
self.XLabel1_5.setObjectName(_fromUtf8("XLabel1_5"))
self.gridLayout_20.addWidget(self.XLabel1_5, 0, 0, 1, 1)
self.LegendLabel_5 = QtGui.QLabel(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LegendLabel_5.sizePolicy().hasHeightForWidth())
self.LegendLabel_5.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.LegendLabel_5.setFont(font)
self.LegendLabel_5.setObjectName(_fromUtf8("LegendLabel_5"))
self.gridLayout_20.addWidget(self.LegendLabel_5, 0, 2, 1, 1)
self.HistLegendComboBox = QtGui.QComboBox(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.HistLegendComboBox.sizePolicy().hasHeightForWidth())
self.HistLegendComboBox.setSizePolicy(sizePolicy)
self.HistLegendComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.HistLegendComboBox.setFont(font)
self.HistLegendComboBox.setDuplicatesEnabled(False)
self.HistLegendComboBox.setObjectName(_fromUtf8("HistLegendComboBox"))
self.gridLayout_20.addWidget(self.HistLegendComboBox, 0, 3, 1, 1)
self.HistXComboBox = QtGui.QComboBox(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.HistXComboBox.sizePolicy().hasHeightForWidth())
self.HistXComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.HistXComboBox.setFont(font)
self.HistXComboBox.setObjectName(_fromUtf8("HistXComboBox"))
self.gridLayout_20.addWidget(self.HistXComboBox, 0, 1, 1, 1)
self.LegendLabel_7 = QtGui.QLabel(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.LegendLabel_7.sizePolicy().hasHeightForWidth())
self.LegendLabel_7.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.LegendLabel_7.setFont(font)
self.LegendLabel_7.setObjectName(_fromUtf8("LegendLabel_7"))
self.gridLayout_20.addWidget(self.LegendLabel_7, 1, 0, 1, 1)
self.HistCMapComboBox = QtGui.QComboBox(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.HistCMapComboBox.sizePolicy().hasHeightForWidth())
self.HistCMapComboBox.setSizePolicy(sizePolicy)
self.HistCMapComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.HistCMapComboBox.setFont(font)
self.HistCMapComboBox.setDuplicatesEnabled(False)
self.HistCMapComboBox.setObjectName(_fromUtf8("HistCMapComboBox"))
self.gridLayout_20.addWidget(self.HistCMapComboBox, 1, 1, 1, 1)
self.HistogramPushButton = QtGui.QPushButton(self.tab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.HistogramPushButton.sizePolicy().hasHeightForWidth())
self.HistogramPushButton.setSizePolicy(sizePolicy)
self.HistogramPushButton.setObjectName(_fromUtf8("HistogramPushButton"))
self.gridLayout_20.addWidget(self.HistogramPushButton, 1, 3, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayout_20)
self.PlotHistWidgetContainer = QtGui.QVBoxLayout()
self.PlotHistWidgetContainer.setObjectName(_fromUtf8("PlotHistWidgetContainer"))
self.verticalLayout_5.addLayout(self.PlotHistWidgetContainer)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.Tab3 = QtGui.QWidget()
self.Tab3.setObjectName(_fromUtf8("Tab3"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.Tab3)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.StartTimeLabel_4 = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartTimeLabel_4.sizePolicy().hasHeightForWidth())
self.StartTimeLabel_4.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.StartTimeLabel_4.setFont(font)
self.StartTimeLabel_4.setObjectName(_fromUtf8("StartTimeLabel_4"))
self.verticalLayout_2.addWidget(self.StartTimeLabel_4)
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.TraceStyleComboBox = QtGui.QComboBox(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TraceStyleComboBox.sizePolicy().hasHeightForWidth())
self.TraceStyleComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.TraceStyleComboBox.setFont(font)
self.TraceStyleComboBox.setObjectName(_fromUtf8("TraceStyleComboBox"))
self.gridLayout_5.addWidget(self.TraceStyleComboBox, 1, 3, 1, 1)
self.StartTimeLabel_5 = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartTimeLabel_5.sizePolicy().hasHeightForWidth())
self.StartTimeLabel_5.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.StartTimeLabel_5.setFont(font)
self.StartTimeLabel_5.setObjectName(_fromUtf8("StartTimeLabel_5"))
self.gridLayout_5.addWidget(self.StartTimeLabel_5, 2, 0, 1, 1)
self.StartTimeLabel_3 = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartTimeLabel_3.sizePolicy().hasHeightForWidth())
self.StartTimeLabel_3.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.StartTimeLabel_3.setFont(font)
self.StartTimeLabel_3.setObjectName(_fromUtf8("StartTimeLabel_3"))
self.gridLayout_5.addWidget(self.StartTimeLabel_3, 1, 0, 1, 1)
self.XLabel2 = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.XLabel2.sizePolicy().hasHeightForWidth())
self.XLabel2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.XLabel2.setFont(font)
self.XLabel2.setObjectName(_fromUtf8("XLabel2"))
self.gridLayout_5.addWidget(self.XLabel2, 0, 1, 1, 1)
self.YLabel2 = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.YLabel2.sizePolicy().hasHeightForWidth())
self.YLabel2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.YLabel2.setFont(font)
self.YLabel2.setObjectName(_fromUtf8("YLabel2"))
self.gridLayout_5.addWidget(self.YLabel2, 0, 2, 1, 1)
self.TraceBottomXComboBox = QtGui.QComboBox(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TraceBottomXComboBox.sizePolicy().hasHeightForWidth())
self.TraceBottomXComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.TraceBottomXComboBox.setFont(font)
self.TraceBottomXComboBox.setObjectName(_fromUtf8("TraceBottomXComboBox"))
self.gridLayout_5.addWidget(self.TraceBottomXComboBox, 2, 1, 1, 1)
self.TraceBottomYComboBox = QtGui.QComboBox(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TraceBottomYComboBox.sizePolicy().hasHeightForWidth())
self.TraceBottomYComboBox.setSizePolicy(sizePolicy)
self.TraceBottomYComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.TraceBottomYComboBox.setFont(font)
self.TraceBottomYComboBox.setDuplicatesEnabled(False)
self.TraceBottomYComboBox.setObjectName(_fromUtf8("TraceBottomYComboBox"))
self.gridLayout_5.addWidget(self.TraceBottomYComboBox, 2, 2, 1, 1)
self.TraceTopYComboBox = QtGui.QComboBox(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TraceTopYComboBox.sizePolicy().hasHeightForWidth())
self.TraceTopYComboBox.setSizePolicy(sizePolicy)
self.TraceTopYComboBox.setMinimumSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setPointSize(8)
self.TraceTopYComboBox.setFont(font)
self.TraceTopYComboBox.setDuplicatesEnabled(False)
self.TraceTopYComboBox.setObjectName(_fromUtf8("TraceTopYComboBox"))
self.gridLayout_5.addWidget(self.TraceTopYComboBox, 1, 2, 1, 1)
self.TraceTopXComboBox = QtGui.QComboBox(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TraceTopXComboBox.sizePolicy().hasHeightForWidth())
self.TraceTopXComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.TraceTopXComboBox.setFont(font)
self.TraceTopXComboBox.setObjectName(_fromUtf8("TraceTopXComboBox"))
self.gridLayout_5.addWidget(self.TraceTopXComboBox, 1, 1, 1, 1)
self.StartTimeLabel_6 = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartTimeLabel_6.sizePolicy().hasHeightForWidth())
self.StartTimeLabel_6.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.StartTimeLabel_6.setFont(font)
self.StartTimeLabel_6.setObjectName(_fromUtf8("StartTimeLabel_6"))
self.gridLayout_5.addWidget(self.StartTimeLabel_6, 0, 4, 1, 1)
self.TraceColourComboBox = QtGui.QComboBox(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TraceColourComboBox.sizePolicy().hasHeightForWidth())
self.TraceColourComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.TraceColourComboBox.setFont(font)
self.TraceColourComboBox.setObjectName(_fromUtf8("TraceColourComboBox"))
self.gridLayout_5.addWidget(self.TraceColourComboBox, 1, 4, 1, 1)
self.MapStyleComboBox = QtGui.QComboBox(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MapStyleComboBox.sizePolicy().hasHeightForWidth())
self.MapStyleComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.MapStyleComboBox.setFont(font)
self.MapStyleComboBox.setObjectName(_fromUtf8("MapStyleComboBox"))
self.gridLayout_5.addWidget(self.MapStyleComboBox, 2, 3, 1, 1)
self.StartTimeLabel_2 = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartTimeLabel_2.sizePolicy().hasHeightForWidth())
self.StartTimeLabel_2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.StartTimeLabel_2.setFont(font)
self.StartTimeLabel_2.setObjectName(_fromUtf8("StartTimeLabel_2"))
self.gridLayout_5.addWidget(self.StartTimeLabel_2, 0, 3, 1, 1)
self.StartTimeLabel_7 = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartTimeLabel_7.sizePolicy().hasHeightForWidth())
self.StartTimeLabel_7.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.StartTimeLabel_7.setFont(font)
self.StartTimeLabel_7.setObjectName(_fromUtf8("StartTimeLabel_7"))
self.gridLayout_5.addWidget(self.StartTimeLabel_7, 0, 5, 1, 1)
self.MapColourComboBox = QtGui.QComboBox(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MapColourComboBox.sizePolicy().hasHeightForWidth())
self.MapColourComboBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.MapColourComboBox.setFont(font)
self.MapColourComboBox.setObjectName(_fromUtf8("MapColourComboBox"))
self.gridLayout_5.addWidget(self.MapColourComboBox, 2, 4, 1, 1)
self.MapLegendCheckBox = QtGui.QCheckBox(self.Tab3)
self.MapLegendCheckBox.setObjectName(_fromUtf8("MapLegendCheckBox"))
self.gridLayout_5.addWidget(self.MapLegendCheckBox, 2, 5, 1, 1)
self.TraceLegendCheckBox = QtGui.QCheckBox(self.Tab3)
self.TraceLegendCheckBox.setObjectName(_fromUtf8("TraceLegendCheckBox"))
self.gridLayout_5.addWidget(self.TraceLegendCheckBox, 1, 5, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout_5)
self.line_3 = QtGui.QFrame(self.Tab3)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.verticalLayout_2.addWidget(self.line_3)
self.gridLayout_6 = QtGui.QGridLayout()
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.EndTimeLabel = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.EndTimeLabel.sizePolicy().hasHeightForWidth())
self.EndTimeLabel.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.EndTimeLabel.setFont(font)
self.EndTimeLabel.setObjectName(_fromUtf8("EndTimeLabel"))
self.gridLayout_6.addWidget(self.EndTimeLabel, 0, 2, 1, 1)
self.StartTimeDoubleSpinBox = QtGui.QDoubleSpinBox(self.Tab3)
font = QtGui.QFont()
font.setPointSize(8)
self.StartTimeDoubleSpinBox.setFont(font)
self.StartTimeDoubleSpinBox.setMaximum(10000000.0)
self.StartTimeDoubleSpinBox.setSingleStep(0.05)
self.StartTimeDoubleSpinBox.setObjectName(_fromUtf8("StartTimeDoubleSpinBox"))
self.gridLayout_6.addWidget(self.StartTimeDoubleSpinBox, 0, 1, 1, 1)
self.EndTimeDoubleSpinBox = QtGui.QDoubleSpinBox(self.Tab3)
font = QtGui.QFont()
font.setPointSize(8)
self.EndTimeDoubleSpinBox.setFont(font)
self.EndTimeDoubleSpinBox.setMaximum(10000000.0)
self.EndTimeDoubleSpinBox.setSingleStep(0.05)
self.EndTimeDoubleSpinBox.setProperty("value", 1000.0)
self.EndTimeDoubleSpinBox.setObjectName(_fromUtf8("EndTimeDoubleSpinBox"))
self.gridLayout_6.addWidget(self.EndTimeDoubleSpinBox, 0, 3, 1, 1)
self.StartTimeLabel = QtGui.QLabel(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StartTimeLabel.sizePolicy().hasHeightForWidth())
self.StartTimeLabel.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.StartTimeLabel.setFont(font)
self.StartTimeLabel.setObjectName(_fromUtf8("StartTimeLabel"))
self.gridLayout_6.addWidget(self.StartTimeLabel, 0, 0, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout_6)
self.PlotTracePushButton = QtGui.QPushButton(self.Tab3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PlotTracePushButton.sizePolicy().hasHeightForWidth())
self.PlotTracePushButton.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.PlotTracePushButton.setFont(font)
self.PlotTracePushButton.setObjectName(_fromUtf8("PlotTracePushButton"))
self.verticalLayout_2.addWidget(self.PlotTracePushButton)
self.PlotTraceWidgetContainer = QtGui.QVBoxLayout()
self.PlotTraceWidgetContainer.setObjectName(_fromUtf8("PlotTraceWidgetContainer"))
self.verticalLayout_2.addLayout(self.PlotTraceWidgetContainer)
self.tabWidget.addTab(self.Tab3, _fromUtf8(""))
self.Tab4 = QtGui.QWidget()
self.Tab4.setObjectName(_fromUtf8("Tab4"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.Tab4)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.WarningLabel2 = QtGui.QLabel(self.Tab4)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.WarningLabel2.sizePolicy().hasHeightForWidth())
self.WarningLabel2.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.WarningLabel2.setFont(font)
self.WarningLabel2.setAlignment(QtCore.Qt.AlignCenter)
self.WarningLabel2.setObjectName(_fromUtf8("WarningLabel2"))
self.verticalLayout_4.addWidget(self.WarningLabel2)
self.Table2Widget = QtGui.QTableWidget(self.Tab4)
self.Table2Widget.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.Table2Widget.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.Table2Widget.setObjectName(_fromUtf8("Table2Widget"))
self.Table2Widget.setColumnCount(0)
self.Table2Widget.setRowCount(0)
self.verticalLayout_4.addWidget(self.Table2Widget)
self.tabWidget.addTab(self.Tab4, _fromUtf8(""))
self.horizontalLayout.addWidget(self.splitter)
DataBaseGUI.setCentralWidget(self.centralwidget)
self.retranslateUi(DataBaseGUI)
self.tabWidget_8.setCurrentIndex(0)
self.tabWidget.setCurrentIndex(3)
QtCore.QMetaObject.connectSlotsByName(DataBaseGUI)
def retranslateUi(self, DataBaseGUI):
DataBaseGUI.setWindowTitle(_translate("DataBaseGUI", "Ana\'s GPS DataBase", None))
self.StartDateLabel_8.setText(_translate("DataBaseGUI", "Start date: ", None))
self.StartDateLabel_7.setText(_translate("DataBaseGUI", "End date: ", None))
self.FilterPlotDatabasePushButton.setText(_translate("DataBaseGUI", "Filter database", None))
self.NewFilePushButton.setText(_translate("DataBaseGUI", " Select New Database File: ", None))
self.SaveDataPushButton.setText(_translate("DataBaseGUI", " Save Database to File: ", None))
self.Label_10.setText(_translate("DataBaseGUI", "Number of rows to display: ", None))
self.Label_9.setText(_translate("DataBaseGUI", "Number of rows in the filtered database: ", None))
self.Table1Widget.setSortingEnabled(False)
self.tabWidget_8.setTabText(self.tabWidget_8.indexOf(self.tab_4), _translate("DataBaseGUI", "Database", None))
self.ActivitiesLabel.setText(_translate("DataBaseGUI", "Activities:", None))
self.SportsLabel.setText(_translate("DataBaseGUI", "Sports:", None))
self.GearLabel.setText(_translate("DataBaseGUI", "Gear:", None))
self.EndLocationLabel.setText(_translate("DataBaseGUI", "End Location:", None))
self.StartLocationLabel.setText(_translate("DataBaseGUI", "Start Location:", None))
self.tabWidget_8.setTabText(self.tabWidget_8.indexOf(self.tab_3), _translate("DataBaseGUI", "Filters", None))
self.SIUnitsPushButton.setText(_translate("DataBaseGUI", "Set units to SI", None))
self.XLabel1_3.setText(_translate("DataBaseGUI", "Position: ", None))
self.SizeLabel_2.setText(_translate("DataBaseGUI", "Distance: ", None))
self.LegendLabel_13.setText(_translate("DataBaseGUI", "Speed: ", None))
self.XLabel1_2.setText(_translate("DataBaseGUI", "Time: ", None))
self.UpdateUnitsPushButton.setText(_translate("DataBaseGUI", "Update database table", None))
self.tabWidget_8.setTabText(self.tabWidget_8.indexOf(self.tab_5), _translate("DataBaseGUI", "Units", None))
self.MapCheckBox.setText(_translate("DataBaseGUI", "Generate map when filtering. ", None))
self.PlotMapPushButton.setText(_translate("DataBaseGUI", "Update map (it can be very slow). ", None))
self.Label_11.setText(_translate("DataBaseGUI", "Number of activities to display: ", None))
self.LegendLabel_17.setText(_translate("DataBaseGUI", "Legend: ", None))
self.SaveMapPushButton.setText(_translate("DataBaseGUI", " Save Map to File: ", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("DataBaseGUI", "Map", None))
self.LegendLabel.setText(_translate("DataBaseGUI", "Legend: ", None))
self.XLabel1.setText(_translate("DataBaseGUI", "X Axis: ", None))
self.SizeLabel.setText(_translate("DataBaseGUI", "Marker Size: ", None))
self.YLabel1.setText(_translate("DataBaseGUI", "Y Axis: ", None))
self.LegendLabel_12.setText(_translate("DataBaseGUI", "Color Map: ", None))
self.ScatterPushButton.setText(_translate("DataBaseGUI", "Update scatter plot", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Tab1), _translate("DataBaseGUI", "Scatter", None))
self.XLabel1_5.setText(_translate("DataBaseGUI", "X Axis: ", None))
self.LegendLabel_5.setText(_translate("DataBaseGUI", "Legend: ", None))
self.LegendLabel_7.setText(_translate("DataBaseGUI", "Color Map: ", None))
self.HistogramPushButton.setText(_translate("DataBaseGUI", "Update histogram plot", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("DataBaseGUI", "Histogram", None))
self.StartTimeLabel_4.setText(_translate("DataBaseGUI", "Select rows from the database table to plot their data below.", None))
self.StartTimeLabel_5.setText(_translate("DataBaseGUI", "Bottom plot:", None))
self.StartTimeLabel_3.setText(_translate("DataBaseGUI", "Top plot:", None))
self.XLabel2.setText(_translate("DataBaseGUI", "X Axis: ", None))
self.YLabel2.setText(_translate("DataBaseGUI", "Y Axis: ", None))
self.StartTimeLabel_6.setText(_translate("DataBaseGUI", "Plot colour:", None))
self.StartTimeLabel_2.setText(_translate("DataBaseGUI", "Plot kind: ", None))
self.StartTimeLabel_7.setText(_translate("DataBaseGUI", "Plot legend:", None))
self.MapLegendCheckBox.setText(_translate("DataBaseGUI", "Visible", None))
self.TraceLegendCheckBox.setText(_translate("DataBaseGUI", "Visible", None))
self.EndTimeLabel.setText(_translate("DataBaseGUI", "End time: ", None))
self.StartTimeLabel.setText(_translate("DataBaseGUI", "Start time: ", None))
self.PlotTracePushButton.setText(_translate("DataBaseGUI", "Read activities and update database", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Tab3), _translate("DataBaseGUI", "Trace", None))
self.WarningLabel2.setText(_translate("DataBaseGUI", "Displays 50 rows maximum.", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Tab4), _translate("DataBaseGUI", "Table", None))
|
|
# -*- coding: utf-8 -*-
from datetime import datetime
import datetime as time
from gluon import *
import numpy as np
import random
from rank import Cost
from rank import Rank
import util
NUM_BINS = 2001
AVRG = NUM_BINS / 2
STDEV = NUM_BINS / 8
def read_db_for_get_item(venue_id):
""" The function fills containers for get_item function.
- subm_list - list of submissions
- qdistr_param - list of quality distributions parameters for
submissions such that i-th submission sub_list[i] has
parameters qdistr_param[2*i] - quality mean,
qdistr_param[2*i + 1] - quality standard deviation.
- subm_to_assigned - a dictionary: subm id -> number of times it was assigned
- subm_to_finished- a dictionary: subm id -> number of times it was
completed + rejected
"""
db = current.db
# List of all submissions id for given venue.
subm_list = []
subm_to_assigned = {}
subm_to_finished = {}
subm_records = db(db.submission.venue_id == venue_id).select()
# Fetching quality distributions parametes for each submission.
qdistr_param = []
for row in subm_records:
subm_list.append(row.id)
if row.quality is None or row.error is None:
qdistr_param.append(AVRG)
qdistr_param.append(STDEV)
else:
qdistr_param.append(row.quality)
qdistr_param.append(row.error)
subm_to_assigned[row.id] = row.n_assigned_reviews
subm_to_finished[row.id] = row.n_completed_reviews + row.n_rejected_reviews
return subm_list, qdistr_param, subm_to_assigned, subm_to_finished
def get_qdistr_param(venue_id, items_id):
db = current.db
if items_id == None:
return None
qdistr_param = []
for x in items_id:
quality_row = db((db.submission.venue_id == venue_id) &
(db.submission.id == x)).select(db.submission.quality,
db.submission.error).first()
if (quality_row is None or quality_row.quality is None or
quality_row.error is None):
qdistr_param.append(AVRG)
qdistr_param.append(STDEV)
else:
qdistr_param.append(quality_row.quality)
qdistr_param.append(quality_row.error)
return qdistr_param
def get_init_average_stdev():
""" Method returns tuple with average and stdev for initializing
field in table quality.
"""
return AVRG, STDEV
def get_subm_assigned_to_user(venue_id, user):
""" Method return three list:
- old_items - submissions assigned but not rejected by the user.
- rejected_items - submissions rejected by the user.
- users_items - submissions authored by the user."""
db = current.db
old_items = []
rejected_items = []
users_items = []
old_tasks = db((db.task.venue_id == venue_id) & (db.task.user == user)).select()
for task in old_tasks:
if task.rejected:
rejected_items.append(task.submission_id)
else:
old_items.append(task.submission_id)
# Fetching submissions authored by the user.
rows = db((db.submission.venue_id == venue_id) &
(db.submission.user == user)).select(db.submission.id)
if rows is not None:
users_items = [r.id for r in rows]
return old_items, rejected_items, users_items
def none_as_zero(el):
if el is None:
return 0
else:
return el
def get_list_min_subm(subm_list, subm_to_assigned,
subm_to_finished, subm_to_recent):
"""Gets the list of submissions that have received the least number of reviwes,
counting as a review also recently assigned review tasks (that are still likely
to be completed)."""
freq_list = []
for subm_id in subm_list:
count = min(subm_to_assigned[subm_id],
subm_to_finished[subm_id] + none_as_zero(subm_to_recent[subm_id]) + 1)
freq_list.append((subm_id, count))
m = min([x[1] for x in freq_list])
list_min_subm = [x[0] for x in freq_list if x[1] == m]
return list_min_subm
def has_min_count(subm_id, venue_id, subm_to_assigned, subm_to_finished,
subm_to_recent, time_window):
"""Checks that a submission indeed has minimum number of reviews that
have been done or recently assigned."""
db = current.db
if subm_to_assigned[subm_id] == subm_to_finished[subm_id]:
return True
if subm_to_recent[subm_id] is not None:
# We must have already read it.
return True
# We need to check how long ago has the review been assigned.
rows = db(db.task.submission_id == subm_id).select()
counter = 0
t = datetime.utcnow()
for r in rows:
if (not r.is_completed and not r.rejected
and t - r.assigned_date < time.timedelta(hours=time_window)):
counter += 1
if subm_to_recent[subm_id] == counter:
return True
subm_to_recent[subm_id] = counter
return False
def get_item(venue_id, user, can_rank_own_submissions=False,
sample_always_randomly=False, time_window=2):
"""
Description of a sampling method:
We always sample item which have minimum count, where
count = min(times assinged, times completed + times rejected + recent + 1)
If sample_always_randomly True then sample always randomly.
Otherwise, we sample randomly only in half cases, on other half cases
we sample proportional to misrank error.
"""
db = current.db
# Reading the db.
subm_list, qdistr_param, subm_to_assigned, subm_to_finished = read_db_for_get_item(venue_id)
# Get submissions assigned to the user.
old_items, rejected_items, users_items = get_subm_assigned_to_user(venue_id, user)
if can_rank_own_submissions:
users_items = []
# Filter subm_list by deleting old_items, rejected_items, users_items.
subm_list_filtered = [x for x in subm_list if x not in old_items and
x not in rejected_items and
x not in users_items]
# Check whether we have items to sample from or not.
if len(subm_list_filtered) == 0:
return None
# subm_to_recent is a dictionary mapping submission id to how many times
# the submission has been assigned and has not been reviewed recently.
# Since this is expensive to compute for each submission, we nitialize it with zeros,
# and we will fix it later to > 0 if we need on a per-item basis.
subm_to_recent = dict((subm_id, None) for subm_id in subm_list_filtered)
# List of submissions with min reviews, candidates for assignment.
list_min_subm = []
# Okay, now we are trying to sample a submissions.
while True:
if len(list_min_subm) == 0:
list_min_subm = get_list_min_subm(subm_list_filtered,
subm_to_assigned,
subm_to_finished, subm_to_recent)
if random.random() < 0.5 or sample_always_randomly:
# Sample randomly.
new_subm = random.sample(list_min_subm, 1)[0]
else:
# Sample using quality distributions.
# Constructing pool of items.
pool_items = list_min_subm[:]
pool_items.extend(old_items)
# Fetching quality distribution parameters.
qdistr_param_pool = []
for subm_id in pool_items:
idx = subm_list.index(subm_id)
qdistr_param_pool.append(qdistr_param[2 * idx])
qdistr_param_pool.append(qdistr_param[2 * idx + 1])
rankobj = Rank.from_qdistr_param(pool_items, qdistr_param_pool)
new_subm = rankobj.sample_item(old_items)
# Okay, we have sampled a new submission, now let's check that it has
# minimum count indeed.
if has_min_count(new_subm, venue_id, subm_to_assigned,
subm_to_finished, subm_to_recent, time_window):
return new_subm
else:
list_min_subm.remove(new_subm)
def process_comparison(venue_id, user, sorted_items, new_item,
alpha_annealing=0.6):
""" Function updates quality distributions and rank of submissions (items).
Arguments:
- sorted_items is a list of submissions id sorted by user such that
rank(sorted_items[i]) > rank(sorted_items[j]) for i > j
- new_item is an id of a submission from sorted_items which was new
to the user. If sorted_items contains only two elements then
new_item is None.
"""
db = current.db
if sorted_items == None or len(sorted_items) <= 1:
return None
qdistr_param = get_qdistr_param(venue_id, sorted_items)
# If qdistr_param is None then some submission does not have qualities yet,
# therefore we cannot process comparison.
if qdistr_param == None:
return None
rankobj = Rank.from_qdistr_param(sorted_items, qdistr_param,
alpha=alpha_annealing)
result = rankobj.update(sorted_items, new_item)
# Updating the DB.
for x in sorted_items:
perc, avrg, stdev = result[x]
# Updating submission table with its quality and error.
db((db.submission.id == x) &
(db.submission.venue_id == venue_id)).update(quality=avrg, error=stdev)
def get_or_0(d, k):
r = d.get(k, None)
if r == None:
return 0.0
else:
return r
def compute_final_grades_helper(list_of_users, user_to_subm_grade,
user_to_rev_grade, review_percentage=25):
"""This function computes the final grades. We assume that every user has only one submission.
Arguments:
- list_of_users contains all users who submitted or reviewed submissions
"""
# Review percentage as a [0, 1] float.
review_percentage_01 = review_percentage / 100.0
# Computes the final grade.
user_to_final_grade = {}
for u in list_of_users:
g = (get_or_0(user_to_subm_grade, u) * (1.0 - review_percentage_01) +
get_or_0(user_to_rev_grade, u) * review_percentage_01)
user_to_final_grade[u] = g
# Computes the final grade percentiles.
l = []
for u, g in user_to_final_grade.iteritems():
l.append((u, g))
sorted_l = sorted(l, key = lambda x: x[1], reverse=True)
user_to_perc = {}
n_users = float(len(sorted_l))
for i, el in enumerate(sorted_l):
user_to_perc[el[0]] = 100.0 * (n_users - float(i)) / n_users
return user_to_perc, user_to_final_grade
def read_db_for_rep_sys(venue_id):
db = current.db
logger = current.logger
# Containers to fill.
# Lists have l suffix, dictionaries user -> val have d suffix.
user_l = [] # This list contains submitters and reviewers.
subm_l = []
subm_d = {}
ordering_l = []
ordering_d = {}
qdist_param = []
# Reading submission table.
rows = db(db.submission.venue_id == venue_id).select()
for r in rows:
subm_l.append(r.id)
subm_d[r.user] = r.id
user_l.append(r.user)
qdist_param.append(r.quality)
qdist_param.append(r.error)
# Reading comparisons table.
rows = db((db.comparison.venue_id == venue_id) & (db.comparison.is_valid == True)).select()
for r in rows:
# Reverses the ordering.
sorted_items = util.get_list(r.ordering)[::-1]
if len(sorted_items) < 2:
continue
ordering_d[r.user] = sorted_items
# Initializing reviewers reputation and accuracy.
ordering_l.append((sorted_items, r.user))
# Adding reviewers to user_l.
for user in ordering_d.iterkeys():
if user not in user_l:
user_l.append(user)
return user_l, subm_l, ordering_l, subm_d, ordering_d, qdist_param
def read_reputations(venue_id, publish, run_id):
"""This returns rep_d."""
db = current.db
if publish:
rows = db(db.grades.venue_id == venue_id).select()
else:
rows = db((db.grades_exp.venue_id == venue_id ) &
(db.grades_exp.run_id == run_id)).select()
rep_d = {}
for r in rows:
rep_d[r.user] = r.reputation
return rep_d
def get_list_of_all_students(venue_id):
""" Gets the users that participate in the class."""
db = current.db
logger = current.logger
c = db.venue(venue_id)
ul = []
r = db.user_list(c.submit_constraint)
if r is not None:
ul = util.get_list(r.user_list)
if not c.raters_equal_submitters:
ulr = []
r = db.user_list(c.rate_constraint)
if r is not None:
ulr = util.get_list(r.user_list)
ul = util.union_list(ul, ulr)
return ul
def write_to_db_iteration(venue_id, rankobj_result, subm_l, user_l,
ordering_d, accuracy_d, rep_d, subm_d,
publish, run_id):
"""Writes to the db the result of an iteration."""
db = current.db
logger = current.logger
# Gets the users that participate in the class.
ul = get_list_of_all_students(venue_id)
if len(ul) == 0:
# Nothing to write.
if publish:
db(db.grades.venue_id == venue_id).delete()
db.commit()
return
else:
db((db.grades_exp.venue_id == venue_id) &
(db.grades_exp.run_id == run)).delete()
db.commit()
return
# Writes to the submission.
for u in ul:
subm_id = subm_d.get(u)
if subm_id is not None:
perc, avrg, stdev = rankobj_result[subm_id]
db(db.submission.id == subm_id).update(quality=avrg, error=stdev, percentile=perc)
if publish:
# Writes to db.grades table.
for u in ul:
if ordering_d.has_key(u):
n_ratings = len(ordering_d[u])
else:
n_ratings = 0
db.grades.update_or_insert((db.grades.venue_id == venue_id) &
(db.grades.user == u),
venue_id = venue_id,
user = u,
accuracy = accuracy_d.get(u),
reputation = rep_d.get(u),
)
else:
# Writes the grades for each user.
for u in ul:
db.grades_exp.update_or_insert((db.grades_exp.venue_id == venue_id) &
(db.grades_exp.user == u) &
(db.grades_exp.run_id == run_id),
venue_id = venue_id,
user = u,
run_id = run_id,
review_grade = accuracy_d.get(u),
reputation = rep_d.get(u),
)
db.commit()
def write_to_db_final_result(venue_id, rankobj_result, subm_l, user_l,
ordering_d, accuracy_d, rep_d, perc_final_d,
final_grade_d, subm_d, ranking_algo_description,
publish, run_id):
db = current.db
logger = current.logger
accuracy_perc_d = util.compute_percentile(accuracy_d)
# Gets the users that participate in the class.
ul = get_list_of_all_students(venue_id)
if len(ul) == 0:
# Nothing to write.
if publish:
db(db.grades.venue_id == venue_id).delete()
db.commit()
return
else:
db((db.grades_exp.venue_id == venue_id) &
(db.grades_exp.run_id == run)).delete()
db.commit()
return
# Writes to the submission.
user_to_subm_perc = {}
for u in ul:
subm_id = subm_d.get(u)
if subm_id is not None:
perc, avrg, stdev = rankobj_result[subm_id]
db(db.submission.id == subm_id).update(quality=avrg, error=stdev, percentile=perc)
submission_percentile = perc
else:
submission_percentile = None
user_to_subm_perc[u] = submission_percentile
if publish:
# Write grades to db.grades.
for u in ul:
if ordering_d.has_key(u):
n_ratings = len(ordering_d[u])
else:
n_ratings = 0
db.grades.update_or_insert((db.grades.venue_id == venue_id) &
(db.grades.user == u),
venue_id = venue_id,
user = u,
submission_percentile = user_to_subm_perc[u],
grade = None,
accuracy = accuracy_d.get(u),
accuracy_percentile = accuracy_perc_d.get(u),
reputation = rep_d.get(u),
n_ratings = n_ratings,
percentile = perc_final_d.get(u),
)
# Saving evaluation date.
t = datetime.utcnow()
db(db.venue.id == venue_id).update(latest_grades_date = t,
ranking_algo_description = ranking_algo_description)
else:
for u in ul:
# Write grades to db.grades_exp.
db.grades_exp.update_or_insert((db.grades_exp.venue_id == venue_id) &
(db.grades_exp.user == u) &
(db.grades_exp.run_id == run_id),
venue_id = venue_id,
user = u,
run_id = run_id,
subm_grade = None,
submission_percent = user_to_subm_perc[u],
review_grade = accuracy_d.get(u),
review_percent = accuracy_perc_d.get(u),
reputation = rep_d.get(u),
grade = perc_final_d.get(u),
)
db.commit()
def run_reputation_system(venue_id,
review_percentage=25,
alpha_annealing=0.5,
num_of_iterations=4,
num_small_iterations=14,
base_reputation=1.0,
startover=False,
publish=False,
run_id='default'):
""" Function calculates submission qualities, user's reputation, reviewer's
quality and final grades.
Arguments:
- num_small_iterations works as a switch between two types of reputation system
If the argument is None then we update using all comparisons one time in chronological order.
Otherwise we use "small alpha" approach, where num_small_iterations is
number of iterations.
"""
db = current.db
logger = current.logger
# Reading the DB to get submission and user information.
# Lists have l suffix, dictionaries user -> val have d suffix.
logger.info("Reading information for venue %d" % venue_id)
user_l, subm_l, ordering_l, subm_d, ordering_d, qdist_param = read_db_for_rep_sys(venue_id)
logger.info("Finished reading.")
logger.info("Starting iteration number %d" % num_of_iterations)
# Okay, now we are ready to run main iterations.
result = None
if startover:
logger.info("Starting the computation from defaults.")
# Initializing the rest of containers.
qdist_param_default = []
for subm in subm_l:
qdist_param_default.append(AVRG)
qdist_param_default.append(STDEV)
rep_d = {user: alpha_annealing for user in user_l}
rankobj = Rank.from_qdistr_param(subm_l, qdist_param_default,
alpha=alpha_annealing)
else:
logger.info("Using results from previous iteration.")
rep_d = read_reputations(venue_id, publish, run_id)
rankobj = Rank.from_qdistr_param(subm_l, qdist_param,
alpha=alpha_annealing)
logger.info("Doing small iterations")
for i in xrange(num_small_iterations):
# Genarating random permutation.
idxs = range(len(ordering_l))
random.shuffle(idxs)
for idx in idxs:
ordering, user = ordering_l[idx]
alpha = rep_d[user]
alpha = 1 - (1 - alpha) ** (1.0/(num_small_iterations))
# This processes one comparison.
result = rankobj.update(ordering, alpha_annealing=alpha,
annealing_type='after_normalization')
if result is None:
# Too few submissions; let's just say that they are all good.
result = {}
accuracy_d = {}
for u in subm_d:
result[subm_d[u]] = (100.0, 1.0, 1.0)
for u in rep_d:
accuracy_d[u] = 1.0
rep_d[u] = 1.0
else:
# Computing reputation.
logger.info("Computing user reputations")
accuracy_d = {}
rep_d = {}
for user in user_l:
if subm_d.has_key(user):
perc, avrg, stdev = result[subm_d[user]]
rank = perc / 100.0
else:
rank = 0.5
if ordering_d.has_key(user):
ordering = ordering_d[user]
accuracy = rankobj.evaluate_ordering_using_dirichlet(ordering)
else:
accuracy = 0.0
accuracy_d[user] = accuracy
# Computer user's reputation.
rep_d[user] = 0.1 + 0.9 * (accuracy * (rank ** 0.5))
# rep_d[user] = ((rank + base_reputation) * (accuracy + base_reputation)) ** 0.5 - base_reputation
if num_of_iterations == 1:
# Computing submission grades.
subm_grade_d = {}
for user, subm in subm_d.iteritems():
perc, avrg, stdev = result[subm]
subm_grade_d[user] = perc / 100.0
# Computing final grades.
logger.info("Computing final grade")
perc_final_d, final_grade_d = compute_final_grades_helper(user_l, subm_grade_d, rep_d,
review_percentage=review_percentage)
if num_small_iterations is None:
description = "Reputation system on all comparisons in chronological order"
if num_of_iterations == 1:
description = "Ranking without reputation system. All comparisons are used in chronological order"
else:
description = "Reputation system with small alpha and only last comparisons"
if num_of_iterations == 1:
description = "No reputation system and small alpha !?!?"
# Writing to the BD.
logger.info("Writing grades to db")
write_to_db_final_result(venue_id, result, subm_l, user_l, ordering_d,
accuracy_d, rep_d, perc_final_d,
final_grade_d, subm_d,
description, publish, run_id)
logger.info("Written grades to db")
return None
else:
# Writes the results of the iteration.
write_to_db_iteration(venue_id, result, subm_l, user_l, ordering_d,
accuracy_d, rep_d, subm_d, publish, run_id)
# Spawns one more iteration.
return URL('queues', 'run_rep_sys', vars={
current.REPUTATION_SYSTEM_PARAM_NUM_ITERATIONS: num_of_iterations - 1,
current.REPUTATION_SYSTEM_PARAM_VENUE_ID: venue_id,
current.REPUTATION_SYSTEM_RUN_ID: run_id,
current.REPUTATION_SYSTEM_PARAM_REVIEW_PERCENTAGE: review_percentage,
current.REPUTATION_SYSTEM_STARTOVER: 'False',
current.REPUTATION_SYSTEM_PUBLISH: publish,
})
|
|
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import tempfile
import shutil
import os
import datetime
import json
import time
import sys
import argparse
import hmac
import urllib
import fnmatch
import socket
import urllib.request
import subprocess
from http.client import HTTPConnection
from http.client import HTTPSConnection
"""
This tool builds a release from the a given elasticsearch branch.
In order to execute it go in the top level directory and run:
$ python3 dev_tools/build_release.py --branch 0.90 --publish --remote origin
By default this script runs in 'dry' mode which essentially simulates a release. If the
'--publish' option is set the actual release is done. The script takes over almost all
steps necessary for a release from a high level point of view it does the following things:
- run prerequisit checks ie. check for Java 1.7 being presend or S3 credentials available as env variables
- detect the version to release from the specified branch (--branch) or the current branch
- creates a release branch & updates pom.xml and Version.java to point to a release version rather than a snapshot
- builds the artifacts and runs smoke-tests on the build zip & tar.gz files
- commits the new version and merges the release branch into the source branch
- creates a tag and pushes the commit to the specified origin (--remote)
- publishes the releases to Sonatype and S3
Once it's done it will print all the remaining steps.
Prerequisites:
- Python 3k for script execution
- Boto for S3 Upload ($ apt-get install python-boto)
- RPM for RPM building ($ apt-get install rpm)
- S3 keys exported via ENV Variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
"""
env = os.environ
PLUGINS = [('bigdesk', 'lukas-vlcek/bigdesk'),
('paramedic', 'karmi/elasticsearch-paramedic'),
('segmentspy', 'polyfractal/elasticsearch-segmentspy'),
('inquisitor', 'polyfractal/elasticsearch-inquisitor'),
('head', 'mobz/elasticsearch-head')]
LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log')
def log(msg):
log_plain('\n%s' % msg)
def log_plain(msg):
f = open(LOG, mode='ab')
f.write(msg.encode('utf-8'))
f.close()
def run(command, quiet=False):
log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
if os.system('%s >> %s 2>&1' % (command, LOG)):
msg = ' FAILED: %s [see log %s]' % (command, LOG)
if not quiet:
print(msg)
raise RuntimeError(msg)
try:
JAVA_HOME = env['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
try:
JAVA_HOME = env['JAVA7_HOME']
except KeyError:
pass #no JAVA7_HOME - we rely on JAVA_HOME
try:
# make sure mvn3 is used if mvn3 is available
# some systems use maven 2 as default
subprocess.check_output('mvn3 --version', shell=True, stderr=subprocess.STDOUT)
MVN = 'mvn3'
except subprocess.CalledProcessError:
MVN = 'mvn'
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
# Verifies the java version. We guarantee that we run with Java 1.7
# If 1.7 is not available fail the build!
def verify_mvn_java_version(version, mvn):
s = os.popen('%s; %s --version 2>&1' % (java_exe(), mvn)).read()
if 'Java version: %s' % version not in s:
raise RuntimeError('got wrong java version for %s %s:\n%s' % (mvn, version, s))
# Returns the hash of the current git HEAD revision
def get_head_hash():
return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip()
# Returns the hash of the given tag revision
def get_tag_hash(tag):
return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip()
# Returns the name of the current branch
def get_current_branch():
return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip()
verify_java_version('1.7') # we require to build with 1.7
verify_mvn_java_version('1.7', MVN)
# Utility that returns the name of the release branch for a given version
def release_branch(version):
return 'release_branch_%s' % version
# runs get fetch on the given remote
def fetch(remote):
run('git fetch %s' % remote)
# Creates a new release branch from the given source branch
# and rebases the source branch from the remote before creating
# the release branch. Note: This fails if the source branch
# doesn't exist on the provided remote.
def create_release_branch(remote, src_branch, release):
run('git checkout %s' % src_branch)
run('git pull --rebase %s %s' % (remote, src_branch))
run('git checkout -b %s' % (release_branch(release)))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Walks the given directory path (defaults to 'docs')
# and replaces all 'coming[$version]' tags with
# 'added[$version]'. This method only accesses asciidoc files.
def update_reference_docs(release_version, path='docs'):
pattern = 'coming[%s' % (release_version)
replacement = 'added[%s' % (release_version)
pending_files = []
def callback(line):
return line.replace(pattern, replacement)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.asciidoc'):
full_path = os.path.join(root, file_name)
if process_file(full_path, callback):
pending_files.append(os.path.join(root, file_name))
return pending_files
# Moves the pom.xml file from a snapshot to a release
def remove_maven_snapshot(pom, release):
pattern = '<version>%s-SNAPSHOT</version>' % (release)
replacement = '<version>%s</version>' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(pom, callback)
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(version_file, callback)
# Stages the given files for the next git commit
def add_pending_files(*files):
for file in files:
run('git add %s' % (file))
# Executes a git commit with 'release [version]' as the commit message
def commit_release(release):
run('git commit -m "release [%s]"' % release)
def commit_feature_flags(release):
run('git commit -m "Update Documentation Feature Flags [%s]"' % release)
def tag_release(release):
run('git tag -a v%s -m "Tag release version %s"' % (release, release))
def run_mvn(*cmd):
for c in cmd:
run('%s; %s %s' % (java_exe(), MVN, c))
def build_release(run_tests=False, dry_run=True, cpus=1, bwc_version=None):
target = 'deploy'
if dry_run:
target = 'package'
if run_tests:
run_mvn('clean',
'test -Dtests.jvms=%s -Des.node.mode=local' % (cpus),
'test -Dtests.jvms=%s -Des.node.mode=network' % (cpus))
if bwc_version:
print('Running Backwards compatibilty tests against version [%s]' % (bwc_version))
run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version)
run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"')
run_mvn('clean %s -DskipTests' % (target))
success = False
try:
run_mvn('-DskipTests rpm:rpm')
success = True
finally:
if not success:
print("""
RPM Bulding failed make sure "rpm" tools are installed.
Use on of the following commands to install:
$ brew install rpm # on OSX
$ apt-get install rpm # on Ubuntu et.al
""")
# Uses the github API to fetch open tickets for the given release version
# if it finds any tickets open for that version it will throw an exception
def ensure_no_open_tickets(version):
version = "v%s" % version
conn = HTTPSConnection('api.github.com')
try:
log('Checking for open tickets on Github for version %s' % version)
log('Check if node is available')
conn.request('GET', '/repos/elasticsearch/elasticsearch/issues?state=open&labels=%s' % version, headers= {'User-Agent' : 'Elasticsearch version checker'})
res = conn.getresponse()
if res.status == 200:
issues = json.loads(res.read().decode("utf-8"))
if issues:
urls = []
for issue in issues:
urls.append(issue['url'])
raise RuntimeError('Found open issues for release version %s see - %s' % (version, urls))
else:
log("No open issues found for version %s" % version)
else:
raise RuntimeError('Failed to fetch issue list from Github for release version %s' % version)
except socket.error as e:
log("Failed to fetch issue list from Github for release version %s' % version - Exception: [%s]" % (version, e))
#that is ok it might not be there yet
finally:
conn.close()
def wait_for_node_startup(host='127.0.0.1', port=9200,timeout=15):
for _ in range(timeout):
conn = HTTPConnection(host, port, timeout)
try:
log('Waiting until node becomes available for 1 second')
time.sleep(1)
log('Check if node is available')
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
return True
except socket.error as e:
log("Failed while waiting for node - Exception: [%s]" % e)
#that is ok it might not be there yet
finally:
conn.close()
return False
# Ensures we are using a true Lucene release, not a snapshot build:
def verify_lucene_version():
s = open('pom.xml', encoding='utf-8').read()
if 'download.elasticsearch.org/lucenesnapshots' in s:
raise RuntimeError('pom.xml contains download.elasticsearch.org/lucenesnapshots repository: remove that before releasing')
m = re.search(r'<lucene.version>(.*?)</lucene.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.version in pom.xml')
lucene_version = m.group(1)
m = re.search(r'<lucene.maven.version>(.*?)</lucene.maven.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.maven.version in pom.xml')
lucene_maven_version = m.group(1)
if lucene_version != lucene_maven_version:
raise RuntimeError('pom.xml is still using a snapshot release of lucene (%s): cutover to a real lucene release before releasing' % lucene_maven_version)
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version(src_branch):
run('git checkout %s' % src_branch)
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch %s' % src_branch)
def artifact_names(release, path = ''):
return [os.path.join(path, 'elasticsearch-%s.%s' % (release, t)) for t in ['deb', 'tar.gz', 'zip']]
def get_artifacts(release):
common_artifacts = artifact_names(release, 'target/releases/')
for f in common_artifacts:
if not os.path.isfile(f):
raise RuntimeError('Could not find required artifact at %s' % f)
rpm = os.path.join('target/rpm/elasticsearch/RPMS/noarch/', 'elasticsearch-%s-1.noarch.rpm' % release)
if os.path.isfile(rpm):
log('RPM [%s] contains: ' % rpm)
run('rpm -pqli %s' % rpm)
# this is an oddness of RPM that is attches -1 so we have to rename it
renamed_rpm = os.path.join('target/rpm/elasticsearch/RPMS/noarch/', 'elasticsearch-%s.noarch.rpm' % release)
shutil.move(rpm, renamed_rpm)
common_artifacts.append(renamed_rpm)
else:
raise RuntimeError('Could not find required artifact at %s' % rpm)
return common_artifacts
# Generates sha1 checsums for all files
# and returns the checksum files as well
# as the given files in a list
def generate_checksums(files):
res = []
for release_file in files:
directory = os.path.dirname(release_file)
file = os.path.basename(release_file)
checksum_file = '%s.sha1.txt' % file
if os.system('cd %s; shasum %s > %s' % (directory, file, checksum_file)):
raise RuntimeError('Failed to generate checksum for file %s' % release_file)
res = res + [os.path.join(directory, checksum_file), release_file]
return res
def download_and_verify(release, files, plugins=None, base_url='https://download.elasticsearch.org/elasticsearch/elasticsearch'):
print('Downloading and verifying release %s from %s' % (release, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
for file in files:
name = os.path.basename(file)
url = '%s/%s' % (base_url, name)
abs_file_path = os.path.join(tmp_dir, name)
print(' Downloading %s' % (url))
downloaded_files.append(abs_file_path)
urllib.request.urlretrieve(url, abs_file_path)
url = ''.join([url, '.sha1.txt'])
checksum_file = os.path.join(tmp_dir, ''.join([abs_file_path, '.sha1.txt']))
urllib.request.urlretrieve(url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file)))
smoke_test_release(release, downloaded_files, get_tag_hash('v%s' % release), plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def smoke_test_release(release, files, expected_hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
log('Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release),'bin/plugin')
plugin_names = {}
for name, plugin in plugins:
print(' Install plugin [%s] from [%s]' % (name, plugin))
run('%s; %s %s %s' % (java_exe(), es_plugin_path, '-install', plugin))
plugin_names[name] = True
if release.startswith("0.90."):
background = '' # 0.90.x starts in background automatically
else:
background = '-d'
print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.discovery.zen.ping.multicast.enabled=false -Des.node.bench=true -Des.script.disable_dynamic=false %s'
% (java_exe(), es_run_path, background))
conn = HTTPConnection('127.0.0.1', 9200, 20);
wait_for_node_startup()
try:
try:
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
if version['build_hash'].strip() != expected_hash:
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Running REST Spec tests against package [%s]' % release_file)
run_mvn('test -Dtests.cluster=%s -Dtests.class=*.*RestTests' % ("127.0.0.1:9300"))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true')
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'], False):
raise RuntimeError('Unexpeced plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.request('POST', '/_cluster/nodes/_local/_shutdown')
time.sleep(1) # give the node some time to shut down
if conn.getresponse().status != 200:
raise RuntimeError('Expected HTTP 200 but got %s on node shutdown' % res.status)
finally:
conn.close()
shutil.rmtree(tmp_dir)
def merge_tag_push(remote, src_branch, release_version, dry_run):
run('git checkout %s' % src_branch)
run('git merge %s' % release_branch(release_version))
run('git tag v%s' % release_version)
if not dry_run:
run('git push %s %s' % (remote, src_branch)) # push the commit
run('git push %s v%s' % (remote, release_version)) # push the tag
else:
print(' dryrun [True] -- skipping push to remote %s' % remote)
def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=True):
location = os.path.dirname(os.path.realpath(__file__))
for artifact in artifacts:
if dry_run:
print('Skip Uploading %s to Amazon S3' % artifact)
else:
print('Uploading %s to Amazon S3' % artifact)
# requires boto to be installed but it is not available on python3k yet so we use a dedicated tool
run('python %s/upload-s3.py --file %s ' % (location, os.path.abspath(artifact)))
def print_sonatype_notice():
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
if os.path.isfile(settings):
with open(settings, encoding='utf-8') as settings_file:
for line in settings_file:
if line.strip() == '<id>sonatype-nexus-snapshots</id>':
# moving out - we found the indicator no need to print the warning
return
print("""
NOTE: No sonatype settings detected, make sure you have configured
your sonatype credentials in '~/.m2/settings.xml':
<settings>
...
<servers>
<server>
<id>sonatype-nexus-snapshots</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
<server>
<id>sonatype-nexus-staging</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
</servers>
...
</settings>
""")
def check_s3_credentials():
if not env.get('AWS_ACCESS_KEY_ID', None) or not env.get('AWS_SECRET_ACCESS_KEY', None):
raise RuntimeError('Could not find "AWS_ACCESS_KEY_ID" / "AWS_SECRET_ACCESS_KEY" in the env variables please export in order to upload to S3')
VERSION_FILE = 'src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml
print_sonatype_notice()
# finds the highest available bwc version to test against
def find_bwc_version(release_version, bwc_dir='backwards'):
log(' Lookup bwc version in directory [%s]' % bwc_dir)
bwc_version = None
if os.path.exists(bwc_dir) and os.path.isdir(bwc_dir):
max_version = [int(x) for x in release_version.split('.')]
for dir in os.listdir(bwc_dir):
if os.path.isdir(os.path.join(bwc_dir, dir)) and dir.startswith('elasticsearch-'):
version = [int(x) for x in dir[len('elasticsearch-'):].split('.')]
if version < max_version: # bwc tests only against smaller versions
if (not bwc_version) or version > [int(x) for x in bwc_version.split('.')]:
bwc_version = dir[len('elasticsearch-'):]
log(' Using bwc version [%s]' % bwc_version)
else:
log(' bwc directory [%s] does not exists or is not a directory - skipping' % bwc_dir)
return bwc_version
def ensure_checkout_is_clean(branchName):
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True)
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
raise RuntimeError('git status shows untracked files: got:\n%s' % s)
# Make sure we are on the right branch (NOTE: a bit weak, since we default to current branch):
if 'On branch %s' % branchName not in s:
raise RuntimeError('git status does not show branch %s: got:\n%s' % (branchName, s))
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin %s": got:\n%s' % (branchName, s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout %s", "git reset --hard origin/%s": got:\n%s' % (branchName, branchName, s))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--branch', '-b', metavar='RELEASE_BRANCH', default=get_current_branch(),
help='The branch to release from. Defaults to the current branch.')
parser.add_argument('--cpus', '-c', metavar='1', default=1,
help='The number of cpus to use for running the test. Default is [1]')
parser.add_argument('--skiptests', '-t', dest='tests', action='store_false',
help='Skips tests before release. Tests are run by default.')
parser.set_defaults(tests=True)
parser.add_argument('--remote', '-r', metavar='origin', default='origin',
help='The remote to push the release commit and tag to. Default is [origin]')
parser.add_argument('--publish', '-d', dest='dryrun', action='store_false',
help='Publishes the release. Disable by default.')
parser.add_argument('--smoke', '-s', dest='smoke', default='',
help='Smoke tests the given release')
parser.add_argument('--bwc', '-w', dest='bwc', metavar='backwards', default='backwards',
help='Backwards compatibility version path to use to run compatibility tests against')
parser.set_defaults(dryrun=True)
parser.set_defaults(smoke=None)
args = parser.parse_args()
bwc_path = args.bwc
src_branch = args.branch
remote = args.remote
run_tests = args.tests
dry_run = args.dryrun
cpus = args.cpus
build = not args.smoke
smoke_test_version = args.smoke
if os.path.exists(LOG):
raise RuntimeError('please remove old release log %s first' % LOG)
if not dry_run:
check_s3_credentials()
print('WARNING: dryrun is set to "false" - this will push and publish the release')
input('Press Enter to continue...')
print(''.join(['-' for _ in range(80)]))
print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run))
print(' JAVA_HOME is [%s]' % JAVA_HOME)
print(' Running with maven command: [%s] ' % (MVN))
if build:
ensure_checkout_is_clean(src_branch)
verify_lucene_version()
release_version = find_release_version(src_branch)
ensure_no_open_tickets(release_version)
if not dry_run:
smoke_test_version = release_version
head_hash = get_head_hash()
run_mvn('clean') # clean the env!
print(' Release version: [%s]' % release_version)
create_release_branch(remote, src_branch, release_version)
print(' Created release branch [%s]' % (release_branch(release_version)))
success = False
try:
pending_files = [POM_FILE, VERSION_FILE]
remove_maven_snapshot(POM_FILE, release_version)
remove_version_snapshot(VERSION_FILE, release_version)
print(' Done removing snapshot version')
add_pending_files(*pending_files) # expects var args use * to expand
commit_release(release_version)
pending_files = update_reference_docs(release_version)
version_head_hash = None
# split commits for docs and version to enable easy cherry-picking
if pending_files:
add_pending_files(*pending_files) # expects var args use * to expand
commit_feature_flags(release_version)
version_head_hash = get_head_hash()
print(' Committed release version [%s]' % release_version)
print(''.join(['-' for _ in range(80)]))
print('Building Release candidate')
input('Press Enter to continue...')
if not dry_run:
print(' Running maven builds now and publish to Sonatype - run-tests [%s]' % run_tests)
else:
print(' Running maven builds now run-tests [%s]' % run_tests)
build_release(run_tests=run_tests, dry_run=dry_run, cpus=cpus, bwc_version=find_bwc_version(release_version, bwc_path))
artifacts = get_artifacts(release_version)
artifacts_and_checksum = generate_checksums(artifacts)
smoke_test_release(release_version, artifacts, get_head_hash(), PLUGINS)
print(''.join(['-' for _ in range(80)]))
print('Finish Release -- dry_run: %s' % dry_run)
input('Press Enter to continue...')
print(' merge release branch, tag and push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run))
merge_tag_push(remote, src_branch, release_version, dry_run)
print(' publish artifacts to S3 -- dry_run: %s' % dry_run)
publish_artifacts(artifacts_and_checksum, dry_run=dry_run)
cherry_pick_command = '.'
if version_head_hash:
cherry_pick_command = ' and cherry-pick the documentation changes: \'git cherry-pick %s\' to the development branch' % (version_head_hash)
pending_msg = """
Release successful pending steps:
* create a new vX.Y.Z label on github for the next release, with label color #dddddd (https://github.com/elasticsearch/elasticsearch/labels)
* publish the maven artifacts on Sonatype: https://oss.sonatype.org/index.html
- here is a guide: https://docs.sonatype.org/display/Repository/Sonatype+OSS+Maven+Repository+Usage+Guide#SonatypeOSSMavenRepositoryUsageGuide-8a.ReleaseIt
* check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/elasticsearch/%(version)s
* announce the release on the website / blog post
* tweet about the release
* announce the release in the google group/mailinglist
* Move to a Snapshot version to the current branch for the next point release%(cherry_pick)s
"""
print(pending_msg % { 'version' : release_version, 'cherry_pick' : cherry_pick_command} )
success = True
finally:
if not success:
run('git reset --hard HEAD')
run('git checkout %s' % src_branch)
elif dry_run:
run('git reset --hard %s' % head_hash)
run('git tag -d v%s' % release_version)
# we delete this one anyways
run('git branch -D %s' % (release_branch(release_version)))
else:
print("Skipping build - smoketest only against version %s" % smoke_test_version)
run_mvn('clean') # clean the env!
if smoke_test_version:
fetch(remote)
download_and_verify(smoke_test_version, artifact_names(smoke_test_version), plugins=PLUGINS)
|
|
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Classes to handle image files.
Collection of classes to handle image upload/download to/from Image service
(like Glance image storage and retrieval service) from/to VMware server.
"""
import httplib
import urllib
import urllib2
import netaddr
import six.moves.urllib.parse as urlparse
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
LOG = logging.getLogger(__name__)
USER_AGENT = 'OpenStack-ESX-Adapter'
READ_CHUNKSIZE = 65536
class GlanceFileRead(object):
"""Glance file read handler class."""
def __init__(self, glance_read_iter):
self.glance_read_iter = glance_read_iter
self.iter = self.get_next()
def read(self, chunk_size):
"""Read an item from the queue.
The chunk size is ignored for the Client ImageBodyIterator
uses its own CHUNKSIZE.
"""
try:
return self.iter.next()
except StopIteration:
return ""
def get_next(self):
"""Get the next item from the image iterator."""
for data in self.glance_read_iter:
yield data
def close(self):
"""A dummy close just to maintain consistency."""
pass
class VMwareHTTPFile(object):
"""Base class for VMDK file access over HTTP."""
def __init__(self, file_handle):
self.eof = False
self.file_handle = file_handle
def close(self):
"""Close the file handle."""
try:
self.file_handle.close()
except Exception as exc:
LOG.exception(exc)
def __del__(self):
"""Close the file handle on garbage collection."""
self.close()
def _build_vim_cookie_headers(self, vim_cookies):
"""Build ESX host session cookie headers."""
cookie_header = ""
for vim_cookie in vim_cookies:
cookie_header = vim_cookie.name + '=' + vim_cookie.value
break
return cookie_header
def write(self, data):
"""Write data to the file."""
raise NotImplementedError()
def read(self, chunk_size):
"""Read a chunk of data."""
raise NotImplementedError()
def get_size(self):
"""Get size of the file to be read."""
raise NotImplementedError()
def _is_valid_ipv6(self, address):
"""Whether given host address is a valid IPv6 address."""
try:
return netaddr.valid_ipv6(address)
except Exception:
return False
def get_soap_url(self, scheme, host):
"""return IPv4/v6 compatible url constructed for host."""
if self._is_valid_ipv6(host):
return '%s://[%s]' % (scheme, host)
return '%s://%s' % (scheme, host)
def _fix_esx_url(self, url, host):
"""Fix netloc if it is a ESX host.
For a ESX host the netloc is set to '*' in the url returned in
HttpNfcLeaseInfo. The netloc is right IP when talking to a VC.
"""
urlp = urlparse.urlparse(url)
if urlp.netloc == '*':
scheme, _, path, params, query, fragment = urlp
url = urlparse.urlunparse((scheme, host, path, params,
query, fragment))
return url
def find_vmdk_url(self, lease_info, host):
"""Find the URL corresponding to a vmdk disk in lease info."""
url = None
for deviceUrl in lease_info.deviceUrl:
if deviceUrl.disk:
url = self._fix_esx_url(deviceUrl.url, host)
break
return url
class VMwareHTTPWriteFile(VMwareHTTPFile):
"""VMware file write handler class."""
def __init__(self, host, data_center_name, datastore_name, cookies,
file_path, file_size, scheme='https'):
soap_url = self.get_soap_url(scheme, host)
base_url = '%s/folder/%s' % (soap_url, file_path)
param_list = {'dcPath': data_center_name, 'dsName': datastore_name}
base_url = base_url + '?' + urllib.urlencode(param_list)
_urlparse = urlparse.urlparse(base_url)
scheme, netloc, path, params, query, fragment = _urlparse
if scheme == 'http':
conn = httplib.HTTPConnection(netloc)
elif scheme == 'https':
conn = httplib.HTTPSConnection(netloc)
conn.putrequest('PUT', path + '?' + query)
conn.putheader('User-Agent', USER_AGENT)
conn.putheader('Content-Length', file_size)
conn.putheader('Cookie', self._build_vim_cookie_headers(cookies))
conn.endheaders()
self.conn = conn
VMwareHTTPFile.__init__(self, conn)
def write(self, data):
"""Write to the file."""
self.file_handle.send(data)
def close(self):
"""Get the response and close the connection."""
try:
self.conn.getresponse()
except Exception as excep:
LOG.debug("Exception during HTTP connection close in "
"VMwareHTTPWrite. Exception is %s." % excep)
super(VMwareHTTPWriteFile, self).close()
class VMwareHTTPWriteVmdk(VMwareHTTPFile):
"""Write VMDK over HTTP using VMware HttpNfcLease."""
def __init__(self, session, host, rp_ref, vm_folder_ref, vm_create_spec,
vmdk_size):
"""Initialize a writer for vmdk file.
:param session: a valid api session to ESX/VC server
:param host: the ESX or VC host IP
:param rp_ref: resource pool into which backing VM is imported
:param vm_folder_ref: VM folder in ESX/VC inventory to use as parent
of backing VM
:param vm_create_spec: backing VM created using this create spec
:param vmdk_size: VMDK size to be imported into backing VM
"""
self._session = session
self._vmdk_size = vmdk_size
self._progress = 0
lease = session.invoke_api(session.vim, 'ImportVApp', rp_ref,
spec=vm_create_spec, folder=vm_folder_ref)
session.wait_for_lease_ready(lease)
self._lease = lease
lease_info = session.invoke_api(vim_util, 'get_object_property',
session.vim, lease, 'info')
self._vm_ref = lease_info.entity
# Find the url for vmdk device
url = self.find_vmdk_url(lease_info, host)
if not url:
msg = _("Could not retrieve URL from lease.")
LOG.exception(msg)
raise error_util.VimException(msg)
LOG.info(_("Opening vmdk url: %s for write.") % url)
# Prepare the http connection to the vmdk url
cookies = session.vim.client.options.transport.cookiejar
_urlparse = urlparse.urlparse(url)
scheme, netloc, path, params, query, fragment = _urlparse
if scheme == 'http':
conn = httplib.HTTPConnection(netloc)
elif scheme == 'https':
conn = httplib.HTTPSConnection(netloc)
if query:
path = path + '?' + query
conn.putrequest('PUT', path)
conn.putheader('User-Agent', USER_AGENT)
conn.putheader('Content-Length', str(vmdk_size))
conn.putheader('Overwrite', 't')
conn.putheader('Cookie', self._build_vim_cookie_headers(cookies))
conn.putheader('Content-Type', 'binary/octet-stream')
conn.endheaders()
self.conn = conn
VMwareHTTPFile.__init__(self, conn)
def write(self, data):
"""Write to the file."""
self._progress += len(data)
LOG.debug("Written %s bytes to vmdk." % self._progress)
self.file_handle.send(data)
def update_progress(self):
"""Updates progress to lease.
This call back to the lease is essential to keep the lease alive
across long running write operations.
"""
percent = int(float(self._progress) / self._vmdk_size * 100)
try:
LOG.debug("Updating progress to %s percent." % percent)
self._session.invoke_api(self._session.vim,
'HttpNfcLeaseProgress',
self._lease, percent=percent)
except error_util.VimException as ex:
LOG.exception(ex)
raise ex
def close(self):
"""End the lease and close the connection."""
state = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim,
self._lease, 'state')
if state == 'ready':
self._session.invoke_api(self._session.vim, 'HttpNfcLeaseComplete',
self._lease)
LOG.debug("Lease released.")
else:
LOG.debug("Lease is already in state: %s." % state)
super(VMwareHTTPWriteVmdk, self).close()
def get_imported_vm(self):
""""Get managed object reference of the VM created for import."""
return self._vm_ref
class VMwareHTTPReadVmdk(VMwareHTTPFile):
"""read VMDK over HTTP using VMware HttpNfcLease."""
def __init__(self, session, host, vm_ref, vmdk_path, vmdk_size):
"""Initialize a writer for vmdk file.
During an export operation the vmdk disk is converted to a
stream-optimized sparse disk format. So the size of the VMDK
after export may be smaller than the current vmdk disk size.
:param session: a valid api session to ESX/VC server
:param host: the ESX or VC host IP
:param vm_ref: backing VM whose vmdk is to be exported
:param vmdk_path: datastore relative path to vmdk file to be exported
:param vmdk_size: current disk size of vmdk file to be exported
"""
self._session = session
self._vmdk_size = vmdk_size
self._progress = 0
lease = session.invoke_api(session.vim, 'ExportVm', vm_ref)
session.wait_for_lease_ready(lease)
self._lease = lease
lease_info = session.invoke_api(vim_util, 'get_object_property',
session.vim, lease, 'info')
# find the right disk url corresponding to given vmdk_path
url = self.find_vmdk_url(lease_info, host)
if not url:
msg = _("Could not retrieve URL from lease.")
LOG.exception(msg)
raise error_util.VimException(msg)
LOG.info(_("Opening vmdk url: %s for read.") % url)
cookies = session.vim.client.options.transport.cookiejar
headers = {'User-Agent': USER_AGENT,
'Cookie': self._build_vim_cookie_headers(cookies)}
request = urllib2.Request(url, None, headers)
conn = urllib2.urlopen(request)
VMwareHTTPFile.__init__(self, conn)
def read(self, chunk_size):
"""Read a chunk from file."""
data = self.file_handle.read(READ_CHUNKSIZE)
self._progress += len(data)
LOG.debug("Read %s bytes from vmdk." % self._progress)
return data
def update_progress(self):
"""Updates progress to lease.
This call back to the lease is essential to keep the lease alive
across long running read operations.
"""
percent = int(float(self._progress) / self._vmdk_size * 100)
try:
LOG.debug("Updating progress to %s percent." % percent)
self._session.invoke_api(self._session.vim,
'HttpNfcLeaseProgress',
self._lease, percent=percent)
except error_util.VimException as ex:
LOG.exception(ex)
raise ex
def close(self):
"""End the lease and close the connection."""
state = self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim,
self._lease, 'state')
if state == 'ready':
self._session.invoke_api(self._session.vim, 'HttpNfcLeaseComplete',
self._lease)
LOG.debug("Lease released.")
else:
LOG.debug("Lease is already in state: %s." % state)
super(VMwareHTTPReadVmdk, self).close()
|
|
# -*- coding: utf-8 -*-
'''
Aggregation plug-in to copy all microscopy files for a gvien experiment to the user folder.
@author: Aaron Ponti
'''
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchCriteria
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchSubCriteria
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto.SearchCriteria import MatchClause
from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto.SearchCriteria import MatchClauseAttribute
from ch.systemsx.cisd.base.utilities import OSUtilities
import os
import subprocess
import sys
import re
import zipfile
import java.io.File
import logging
from ch.ethz.scu.obit.common.server.longrunning import LRCache
import uuid
from threading import Thread
def touch(full_file):
"""Touches a file.
"""
f = open(full_file, 'w')
f.close()
def c_unique(seq):
"""Implements 'unique' of a list.
"""
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x))]
def zip_folder(folder_path, output_path):
"""Zip the contents of an entire folder recursively. Please notice that
empty sub-folders will NOT be included in the archive.
"""
# Note: os.path.relpath() does not exist in Jython.
# target = os.path.relpath(folder_path, start=os.path.dirname(folder_path))
target = folder_path[folder_path.rfind(os.sep) + 1:]
# Simple trick to build relative paths
root_len = folder_path.find(target)
try:
# Open zip file (no compression)
zip_file = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_STORED, allowZip64=True)
# Now recurse into the folder
for root, folders, files in os.walk(folder_path):
# We do not process folders. This is only useful to store empty
# folders to the archive, but 1) jython's zipfile implementation
# throws:
#
# Exception: [Errno 21] Is a directory <directory_name>
#
# when trying to write a directory to a zip file (in contrast to
# Python's implementation) and 2) oBIT does not export empty
# folders in the first place.
# Build the relative directory path (current root)
relative_dir_path = os.path.abspath(root)[root_len:]
# If a folder only contains a subfolder, we disrupt the hierarchy,
# unless we add a file.
if len(files) == 0:
touch(os.path.join(root, '~'))
files.append('~')
# Include all files
for file_name in files:
# Full file path to add
full_file_path = os.path.join(root, file_name)
relative_file_path = os.path.join(relative_dir_path, file_name)
# Workaround problem with file name encoding
full_file_path = full_file_path.encode('latin-1')
relative_file_path = relative_file_path.encode('latin-1')
# Write to zip
zip_file.write(full_file_path, relative_file_path, \
zipfile.ZIP_STORED)
except IOError, message:
raise Exception(message)
except OSError, message:
raise Exception(message)
except zipfile.BadZipfile, message:
raise Exception(message)
finally:
zip_file.close()
class Mover():
"""
Takes care of organizing the files to be copied to the user folder and
performs the actual copying.
"""
def __init__(self, experimentId, sampleId, mode, userId, properties, logger):
"""Constructor
experimentId: id of the experiment (must be specified)
sampleId: id of the sample (optional, if specified, the sample id
will be used in the search criteria; if set to "" only
the experiment id will be used as filter).
mode: "normal", "zip", or "hrm". If mode is "normal", the files
will be copied to the user folder; if mode is "zip", the
files will be packaged into a zip files and served for
download via the browser; if mode is "hrm", the files
will be copied to the HRM source folder.
userId: user id.
properties: plug-in properties.
logger: logger.
"""
# Logger
self._logger = logger
# Store the valid file extensions
self._validExtensions = self._getValidExtensions()
# Store properties
self._properties = properties
# Experiment identifier
self._experimentId = experimentId
# Get the experiment
self._experiment = searchService.getExperiment(self._experimentId)
# Sample identifier
self._sampleId = sampleId
# Get the sample
self._sample = None
if not self._sampleId == "":
self._sample = searchService.getSample(self._sampleId)
# Experiment code (alias)
# If no / is found, _experimentCode will be the same as _experimentId
self._experimentCode = self._experimentId[self._experimentId.rfind("/") + 1:]
# User folder: depending on the 'mode' settings, the user folder changes
if mode =="normal":
# Standard user folder
self._userFolder = os.path.join(self._properties['base_dir'], \
userId, self._properties['export_dir'])
elif mode == "zip":
# Get the path to the user's Session Workspace
sessionWorkspace = sessionWorkspaceProvider.getSessionWorkspace()
# The user folder now will point to the Session Workspace
self._userFolder = sessionWorkspace.absolutePath
elif mode == "hrm":
# Standard user folder
self._userFolder = os.path.join(self._properties['hrm_base_dir'], \
userId, self._properties['hrm_src_subdir'])
else:
raise Exception("Bad value for argument 'mode' (" + mode +")")
# Store the mode
self._mode = mode
# Make sure the use folder (with export subfolder) exists and has
# the correct permissions
if not os.path.isdir(self._userFolder):
self._createDir(self._userFolder)
# Export full path in user/tmp folder
self._rootExportPath = os.path.join(self._userFolder,
self._experimentCode)
# Get the experiment name
self._experimentName = self._experiment.getPropertyValue("MICROSCOPY_EXPERIMENT_NAME")
# Experiment full path within the export path
self._experimentPath = os.path.join(self._rootExportPath,
self._experimentName)
# Info
self._logger.info("Export experiment with code " + \
self._experimentCode + " to " + \
str(self._userFolder))
self._logger.info("Export mode is " + self._mode)
# Message (in case of error)
self._message = ""
# Keep track of the number of copied files
self._numCopiedFiles = 0
# Public methods
# =========================================================================
def process(self):
"""
Finds the dataset that belongs to the experiment with stored id
and copies it to the user folder. If the processing was successful,
the method returns True. Otherwise, it returns False.
"""
# Check that the experiment could be retrieved
if self._experiment is None:
self._message = "Could not retrieve experiment with " \
"identifier " + self._experimentId + "!"
self._logger.error(self._message)
return False
# At this stage we can create the experiment folder in the user dir
# (and export root)
if not self._createRootAndExperimentFolder():
self._message = "Could not create experiment folder " + \
self._rootExportPath
return False
# Now point the current path to the newly created experiment folder
# And we copy the files contained in the Experiment
return (self._copyFilesForExperiment() and
self._copyAccessoryFilesForExperiment())
def compressIfNeeded(self):
"""Compresses the exported experiment folder to a zip archive
but only if the mode was "zip".
"""
if self._mode == "zip":
zip_folder(self._rootExportPath, self.getZipArchiveFullPath())
def getZipArchiveFullPath(self):
"""Return the full path of the zip archive (or "" if mode was "normal").
"""
if self._mode == "zip":
return self._rootExportPath + ".zip"
return ""
def getZipArchiveFileName(self):
"""Return the file name of the zip archive without path."""
if self._mode == "zip":
fullFile = java.io.File(self.getZipArchiveFullPath())
return fullFile.getName()
return ""
def getErrorMessage(self):
"""
Return the error message (in case process() returned failure)
"""
return self._message
def getNumberOfCopiedFiles(self):
"""
Return the number of copied files.
"""
return self._numCopiedFiles
def getRelativeRootExperimentPath(self):
"""
Return the experiment path relative to the user folder.
"""
return userId + "/" + \
self._rootExportPath[self._rootExportPath.rfind(self._properties['export_dir']):]
# Private methods
# =========================================================================
def _getValidExtensions(self):
"""Build an array with all valid microscopy file extensions."""
ext = [ "nd2", "czi", "zvi", "lsm", "stk", "tif", "tiff", "lif",
"liff", "ics", "ids", "ims", "oib", "oif", "ome", "r3d",
"dicom", "dm3", "lei", "png", "jp2", "jpg", "1sc", "2",
"2fl", "3", "4", "5", "acff", "afm", "aim", "al3d", "am",
"amiramesh", "apl", "arf", "avi", "bip", "bmp", "c01",
"cfg", "cr2", "crw", "cxd", "dat", "dcm", "dm2", "dti",
"dv", "eps", "epsi", "exp", "fdf", "fff", "ffr", "fits",
"flex", "fli", "frm", "gel", "gif", "grey", "hdr", "hed",
"his", "htd", "html", "hx", "img", "inr", "ipl", "ipm",
"ipw", "jpk", "jpx", "l2d", "labels", "lim", "mdb", "mea",
"mnc", "mng", "mod", "mov", "mrc", "mrw", "mtb", "mvd2",
"naf", "nd", "ndpi", "nef", "nhdr", "nrrd", "obsep", "par",
"pcx", "pds", "pgm", "pic", "pict", "pnl", "pr3", "ps",
"psd", "raw", "res", "scn", "sdt", "seq", "sld", "sm2",
"sm3", "spi", "stp", "svs", "sxm", "tfr", "tga", "tnb",
"top", "txt", "v", "vms", "vsi", "vws", "wat", "xdce",
"xml", "xqd", "xqf", "xv", "xys", "zfp", "zfr" ]
return ext
def _copyFilesForExperiment(self):
"""
Copies the microscopy files in the experiment to the user directory.
Folders are copied recursively.
Returns True for success. In case of error, returns False and sets
the error message in self._message -- to be retrieved with the
getErrorMessage() method.
"""
# Get the datasets for the experiment
dataSets = self._getDataSetsForExperiment()
if len(dataSets) == 0:
self._logger.error("Experiment does not contain datasets.")
return False
# Get the files for the datasets
dataSetFiles = self._getFilesForDataSets(dataSets)
if len(dataSetFiles) == 0:
self._logger.error("Datasets do not contain files.")
return False
# Since sub-series reference the same file, we make sure to keep
# a unique version of the file list
dataSetFiles = c_unique(dataSetFiles)
# Copy the files to the experiment folder
for micrFile in dataSetFiles:
if os.path.isdir(micrFile):
self._copyDir(micrFile, self._experimentPath)
else:
self._copyFile(micrFile, self._experimentPath)
# Return success
return True
def _copyAccessoryFilesForExperiment(self):
"""
Copies the microscopy files in the experiment to the user directory.
Folders are copied recursively.
Returns True for success. In case of error, returns False and sets
the error message in self._message -- to be retrieved with the
getErrorMessage() method.
"""
# Get the datasets for the experiment
dataSets = self._getAccessoryDataSetsForExperiment()
if len(dataSets) == 0:
return True
# Get the files for the datasets
dataSetFiles = self._getFilesForAccessoryDataSets(dataSets)
if len(dataSetFiles) == 0:
self._logger.error("Accessory datasets do not contain files.")
return False
# Since sub-series reference the same file, we make sure to keep
# a unique version of the file list
dataSetFiles = c_unique(dataSetFiles)
# Copy the files to the experiment folder
for micrFile in dataSetFiles:
if os.path.isdir(micrFile):
self._copyDir(micrFile, self._experimentPath)
else:
self._copyFile(micrFile, self._experimentPath)
# Return success
return True
def _getDataSetsForExperiment(self):
"""
Return a list of datasets belonging to the experiment and optionally
to the sample. If the sample ID is empty, only the experiment is used
in the search criteria.
If none are found, return [].
"""
# Set search criteria to retrieve all datasets of type MICROSCOPY_IMG_CONTAINER
# for the experiment. If the sample code is set, we also filter by it.
searchCriteria = SearchCriteria()
searchCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.TYPE, "MICROSCOPY_IMG_CONTAINER"))
expCriteria = SearchCriteria()
expCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.PERM_ID, self._experiment.permId))
searchCriteria.addSubCriteria(SearchSubCriteria.createExperimentCriteria(expCriteria))
if self._sample is not None:
self._logger.info("Filter by sample " + self._sampleId)
sampleCriteria = SearchCriteria()
sampleCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.PERM_ID, self._sample.permId))
searchCriteria.addSubCriteria(SearchSubCriteria.createSampleCriteria(sampleCriteria))
dataSets = searchService.searchForDataSets(searchCriteria)
if len(dataSets) == 0:
dataSets = []
self._message = "Could not retrieve datasets for experiment " \
"with id " + self._experimentId
if self._sampleId != "":
self._message = self._message + " and sample with id " + \
self._sampleId
self._logger.error(self._message)
# Return
return dataSets
def _getAccessoryDataSetsForExperiment(self):
"""
Return a list of datasets belonging to the experiment and optionally
to the sample. If the sample ID is empty, only the experiment is used
in the search criteria.
If none are found, return [].
"""
# Set search criteria to retrieve all datasets of type for the experiment.
# If the sample code is set, we also filter by it.
searchCriteria = SearchCriteria()
searchCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.TYPE, "MICROSCOPY_ACCESSORY_FILE"))
expCriteria = SearchCriteria()
expCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.PERM_ID, self._experiment.permId))
searchCriteria.addSubCriteria(SearchSubCriteria.createExperimentCriteria(expCriteria))
if self._sample is not None:
self._logger.info("Filter by sample " + self._sampleId)
sampleCriteria = SearchCriteria()
sampleCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.PERM_ID, self._sample.permId))
searchCriteria.addSubCriteria(SearchSubCriteria.createSampleCriteria(sampleCriteria))
accessoryDataSets = searchService.searchForDataSets(searchCriteria)
# Append the accessory datasets
if len(accessoryDataSets) != 0:
self._message = "Found " + str(len(accessoryDataSets)) + \
" accessory datasets for experiment " \
"with id " + self._experimentId
if self._sampleId != "":
self._message = self._message + " and sample with id " + \
self._sampleId
self._logger.info(self._message)
# Return
return accessoryDataSets
def _getFilesForDataSets(self, dataSets):
"""
Get the list of microscopy file paths that correspond to the input list
of datasets. If no files are found, returns [].
"""
if dataSets == []:
return []
dataSetFiles = []
for dataSet in dataSets:
content = contentProvider.getContent(dataSet.getDataSetCode())
nodes = content.listMatchingNodes("original", ".*")
if nodes is not None:
for node in nodes:
fileName = node.tryGetFile()
if fileName is not None:
fileName = str(fileName)
if os.path.isdir(str(fileName)):
dataSetFiles.append(fileName)
elif self._isValidMicroscopyFile(fileName):
dataSetFiles.append(fileName)
else:
raise("Unexpected file!")
if len(dataSetFiles) == 0:
self._message = "Could not retrieve dataset files!"
self._logger.error(self._message)
# Return the files
return dataSetFiles
def _getFilesForAccessoryDataSets(self, dataSets):
"""
Get the list of file paths that correspond to the input list
of accessory datasets. If no files are found, returns [].
"""
if dataSets == []:
return []
dataSetFiles = []
for dataSet in dataSets:
content = contentProvider.getContent(dataSet.getDataSetCode())
nodes = content.listMatchingNodes("original", ".*")
# All file types are allowed
if nodes is not None:
for node in nodes:
fileName = node.tryGetFile()
if fileName is not None:
fileName = str(fileName)
if os.path.isdir(str(fileName)):
dataSetFiles.append(fileName)
else:
dataSetFiles.append(fileName)
if len(dataSetFiles) == 0:
self._message = "Could not retrieve accessory dataset files!"
self._logger.error(self._message)
# Return the files
return dataSetFiles
def _isValidMicroscopyFile(self, fileName):
"""Checks whether the file has a compatible extension."""
for validExt in self._validExtensions:
fileName.lower().endswith("." + validExt)
return True
self._logger.error("File " + fileName + " is not a valid microscopy file.")
return False
def _copyFile(self, source, dstDir):
"""Copies the source file (with full path) to directory dstDir.
We use a trick to preserve the NFSv4 ACLs: since copying the file
loses them, we first touch the destination file to create it, and
then we overwrite it.
"""
dstFile = os.path.join(dstDir, os.path.basename(source))
touch = "/usr/bin/touch" if OSUtilities.isMacOS() else "/bin/touch"
subprocess.call([touch, dstFile])
subprocess.call(["/bin/cp", source, dstDir])
self._logger.info("Copying file " + source + " to " + dstDir)
self._numCopiedFiles += 1
def _copyDir(self, source, dstDir):
"""Copies the source directory (with full path) recursively to directory dstDir.
"""
dstSubDir = os.path.join(dstDir, os.path.basename(source))
self._logger.info("Creating directory " + dstDir)
self._createDir(dstSubDir)
# Info
self._logger.info("Copying directory " + source + " to " + dstDir)
# Now copy recursively (by preserving NFSv4 ACLs)
files = os.listdir(source)
for f in files:
fullPath = os.path.join(source, f)
if os.path.isdir(fullPath):
self._copyDir(fullPath, dstSubDir)
else:
self._copyFile(fullPath, dstSubDir)
def _createDir(self, dirFullPath):
"""Creates the passed directory (with full path).
"""
# Inform
self._logger.info("Creating directory " + dirFullPath)
# Create dir
if not os.path.exists(dirFullPath):
os.makedirs(dirFullPath)
def _createRootAndExperimentFolder(self):
"""
Create the experiment folder. Notice that it uses information already
stored in the object, but this info is filled in in the constructor, so
it is safe to assume it is there if nothing major went wrong. In this
case, the method will return False and no folder will be created.
Otherwise, the method returns True.
Please notice that if the experiment folder already exists, _{digit}
will be appended to the folder name, to ensure that the folder is
unique. The updated folder name will be stored in the _rootExportPath
property.
"""
# This should not happen
if self._rootExportPath == "":
self._logger.info("Root path is " + self._rootExportPath)
return False
# Make sure that the experiment folder does not already exist
expPath = self._rootExportPath
# Does the folder already exist?
if os.path.exists(expPath):
counter = 1
ok = False
while not ok:
tmpPath = expPath + "_" + str(counter)
if not os.path.exists(tmpPath):
expPath = tmpPath
ok = True
else:
counter += 1
# Update the root and experiment paths
self._rootExportPath = expPath
self._experimentPath = os.path.join(self._rootExportPath,
self._experimentName)
# Create the root folder
self._createDir(self._rootExportPath)
# And now create the experiment folder (in the root folder)
self._createDir(self._experimentPath)
# Return success
return True
# Parse properties file for custom settings
def parsePropertiesFile():
"""Parse properties file for custom plug-in settings."""
filename = "../core-plugins/microscopy/2/dss/reporting-plugins/export_microscopy_datasets/plugin.properties"
var_names = ['base_dir', 'export_dir', 'hrm_base_dir', 'hrm_src_subdir']
properties = {}
try:
fp = open(filename, "r")
except:
return properties
try:
for line in fp:
line = re.sub('[ \'\"\n]', '', line)
parts = line.split("=")
if len(parts) == 2:
if parts[0] in var_names:
properties[parts[0]] = parts[1]
finally:
fp.close()
# Check that all variables were found
if len(properties.keys()) == 0:
return None
found_vars = properties.keys()
for var_name in var_names:
if var_name not in found_vars:
return None
# Make sure that there are no Windows line endings
for var_name in var_names:
properties[var_name] = properties[var_name].replace('\r', '')
# Everything found
return properties
# Plug-in entry point
#
# Input parameters:
#
# uid : job unique identifier (see below)
# expPermId: experiment identifier
# sampleId : sample identifier
# mode : requested mode of operation: one of 'normal', 'hrm', zip'.
#
# This plug-in returns a table to the client with a different set of columns
# depending on whether the plug-in is called for the first time and the process
# is just started, or if it is queried for completeness at a later time.
#
# At the end of the first call, a table with following columns is returned:
#
# uid : unique identifier of the running plug-in
# completed: indicated if the plug-in has finished. This is set to False in the
# first call.
#
# Later calls return a table with the following columns:
#
# uid : unique identifier of the running plug-in. This was returned to
# the client in the first call and was passed on again as a parameter.
# Here it is returned again to make sure that client and server
# always know which task they are talking about.
# completed: True if the process has completed in the meanwhile, False if it
# is still running.
# success : True if the process completed successfully, False otherwise.
# message : error message in case success was False.
# nCopiedFiles: total number of copied files.
# relativeExpFolder: folder to the copied folder relative to the root of the
# export folder.
# zipArchiveFileName: file name of the zip in case compression was requested.
# mode : requested mode of operation.
def aggregate(parameters, tableBuilder):
# Get the ID of the call if it already exists
uid = parameters.get("uid");
if uid is None or uid == "":
# Create a unique id
uid = str(uuid.uuid4())
# Add the table headers
tableBuilder.addHeader("uid")
tableBuilder.addHeader("completed")
# Fill in relevant information
row = tableBuilder.addRow()
row.setCell("uid", uid)
row.setCell("completed", False)
# Launch the actual process in a separate thread
thread = Thread(target = aggregateProcess,
args = (parameters, tableBuilder, uid))
thread.start()
# Return immediately
return
# The process is already running in a separate thread. We get current
# results and return them
resultToSend = LRCache.get(uid);
if resultToSend is None:
# This should not happen
raise Exception("Could not retrieve results from result cache!")
# Add the table headers
tableBuilder.addHeader("uid")
tableBuilder.addHeader("completed")
tableBuilder.addHeader("success")
tableBuilder.addHeader("message")
tableBuilder.addHeader("nCopiedFiles")
tableBuilder.addHeader("relativeExpFolder")
tableBuilder.addHeader("zipArchiveFileName")
tableBuilder.addHeader("mode")
# Store current results in the table
row = tableBuilder.addRow()
row.setCell("uid", resultToSend["uid"])
row.setCell("completed", resultToSend["completed"])
row.setCell("success", resultToSend["success"])
row.setCell("message", resultToSend["message"])
row.setCell("nCopiedFiles", resultToSend["nCopiedFiles"])
row.setCell("relativeExpFolder", resultToSend["relativeExpFolder"])
row.setCell("zipArchiveFileName", resultToSend["zipArchiveFileName"])
row.setCell("mode", resultToSend["mode"])
# Actual work process
def aggregateProcess(parameters, tableBuilder, uid):
# Make sure to initialize and store the results. We need to have them since
# most likely the client will try to retrieve them again before the process
# is finished.
resultToStore = {}
resultToStore["uid"] = uid
resultToStore["success"] = True
resultToStore["completed"] = False
resultToStore["message"] = ""
resultToStore["nCopiedFiles"] = ""
resultToStore["relativeExpFolder"] = ""
resultToStore["zipArchiveFileName"] = ""
resultToStore["mode"] = ""
LRCache.set(uid, resultToStore)
# Get path to containing folder
# __file__ does not work (reliably) in Jython
dbPath = "../core-plugins/microscopy/2/dss/reporting-plugins/export_microscopy_datasets"
# Path to the logs subfolder
logPath = os.path.join(dbPath, "logs")
# Make sure the logs subforder exist
if not os.path.exists(logPath):
os.makedirs(logPath)
# Path for the log file
logFile = os.path.join(logPath, "log.txt")
# Set up logging
logging.basicConfig(filename=logFile, level=logging.DEBUG,
format='%(asctime)-15s %(levelname)s: %(message)s')
logger = logging.getLogger()
# Get parameters from plugin.properties
properties = parsePropertiesFile()
if properties is None:
raise Exception("Could not process plugin.properties")
# Get the experiment identifier
experimentId = parameters.get("experimentId")
# Get the sample identifier
sampleId = parameters.get("sampleId")
# Get the mode
mode = parameters.get("mode")
# Info
logger.info("Aggregation plug-in called with following parameters:")
logger.info("experimentId = " + experimentId)
logger.info("sampleId = " + sampleId)
logger.info("mode = " + mode)
logger.info("userId = " + userId)
logger.info("Aggregation plugin properties:")
logger.info("properties = " + str(properties))
# Instantiate the Mover object - userId is a global variable
# made available to the aggregation plug-in
mover = Mover(experimentId, sampleId, mode, userId, properties, logger)
# Process
success = mover.process()
# Compress
if mode == "zip":
mover.compressIfNeeded()
# Get some results info
nCopiedFiles = mover.getNumberOfCopiedFiles()
errorMessage = mover.getErrorMessage()
relativeExpFolder = mover.getRelativeRootExperimentPath()
zipFileName = mover.getZipArchiveFileName()
# Update results and store them
resultToStore["uid"] = uid
resultToStore["completed"] = True
resultToStore["success"] = success
resultToStore["message"] = errorMessage
resultToStore["nCopiedFiles"] = nCopiedFiles
resultToStore["relativeExpFolder"] = relativeExpFolder
resultToStore["zipArchiveFileName"] = zipFileName
resultToStore["mode"] = mode
LRCache.set(uid, resultToStore)
# Email result to the user
if success == True:
subject = "Microscopy: successfully processed requested data"
if nCopiedFiles == 1:
snip = "One file was "
else:
snip = str(nCopiedFiles) + " files were "
if mode == "normal":
body = snip + "successfully exported to {...}/" + relativeExpFolder + "."
elif mode == "hrm":
body = snip + "successfully exported to your HRM source folder."
else:
body = snip + "successfully packaged for download: " + zipFileName
else:
subject = "Microscopy: error processing request!"
body = "Sorry, there was an error processing your request. " + \
"Please send your administrator the following report:\n\n" + \
"\"" + errorMessage + "\"\n"
# Send
try:
mailService.createEmailSender().withSubject(subject).withBody(body).send()
except:
sys.stderr.write("export_microscopy_datasets: Failure sending email to user!")
|
|
#!/usr/bin/env python
"""Message registry for apitools."""
import collections
import contextlib
import json
from protorpc import descriptor
from protorpc import messages
import six
from apitools.gen import extended_descriptor
from apitools.gen import util
TypeInfo = collections.namedtuple('TypeInfo', ('type_name', 'variant'))
class MessageRegistry(object):
"""Registry for message types.
This closely mirrors a messages.FileDescriptor, but adds additional
attributes (such as message and field descriptions) and some extra
code for validation and cycle detection.
"""
# Type information from these two maps comes from here:
# https://developers.google.com/discovery/v1/type-format
PRIMITIVE_TYPE_INFO_MAP = {
'string': TypeInfo(type_name='string',
variant=messages.StringField.DEFAULT_VARIANT),
'integer': TypeInfo(type_name='integer',
variant=messages.IntegerField.DEFAULT_VARIANT),
'boolean': TypeInfo(type_name='boolean',
variant=messages.BooleanField.DEFAULT_VARIANT),
'number': TypeInfo(type_name='number',
variant=messages.FloatField.DEFAULT_VARIANT),
'any': TypeInfo(type_name='extra_types.JsonValue',
variant=messages.Variant.MESSAGE),
}
PRIMITIVE_FORMAT_MAP = {
'int32': TypeInfo(type_name='integer',
variant=messages.Variant.INT32),
'uint32': TypeInfo(type_name='integer',
variant=messages.Variant.UINT32),
'int64': TypeInfo(type_name='string',
variant=messages.Variant.INT64),
'uint64': TypeInfo(type_name='string',
variant=messages.Variant.UINT64),
'double': TypeInfo(type_name='number',
variant=messages.Variant.DOUBLE),
'float': TypeInfo(type_name='number',
variant=messages.Variant.FLOAT),
'byte': TypeInfo(type_name='byte',
variant=messages.BytesField.DEFAULT_VARIANT),
'date': TypeInfo(type_name='extra_types.DateField',
variant=messages.Variant.STRING),
'date-time': TypeInfo(
type_name='protorpc.message_types.DateTimeMessage',
variant=messages.Variant.MESSAGE),
}
def __init__(self, client_info, names, description,
root_package_dir, base_files_package):
self.__names = names
self.__client_info = client_info
self.__package = client_info.package
self.__description = util.CleanDescription(description)
self.__root_package_dir = root_package_dir
self.__base_files_package = base_files_package
self.__file_descriptor = extended_descriptor.ExtendedFileDescriptor(
package=self.__package, description=self.__description)
# Add required imports
self.__file_descriptor.additional_imports = [
'from protorpc import messages',
]
# Map from scoped names (i.e. Foo.Bar) to MessageDescriptors.
self.__message_registry = collections.OrderedDict()
# A set of types that we're currently adding (for cycle detection).
self.__nascent_types = set()
# A set of types for which we've seen a reference but no
# definition; if this set is nonempty, validation fails.
self.__unknown_types = set()
# Used for tracking paths during message creation
self.__current_path = []
# Where to register created messages
self.__current_env = self.__file_descriptor
# TODO(craigcitro): Add a `Finalize` method.
@property
def file_descriptor(self):
self.Validate()
return self.__file_descriptor
def WriteProtoFile(self, printer):
"""Write the messages file to out as proto."""
self.Validate()
extended_descriptor.WriteMessagesFile(
self.__file_descriptor, self.__package, self.__client_info.version,
printer)
def WriteFile(self, printer):
"""Write the messages file to out."""
self.Validate()
extended_descriptor.WritePythonFile(
self.__file_descriptor, self.__package, self.__client_info.version,
printer)
def Validate(self):
mysteries = self.__nascent_types or self.__unknown_types
if mysteries:
raise ValueError('Malformed MessageRegistry: %s' % mysteries)
def __ComputeFullName(self, name):
return '.'.join(map(six.text_type, self.__current_path[:] + [name]))
def __AddImport(self, new_import):
if new_import not in self.__file_descriptor.additional_imports:
self.__file_descriptor.additional_imports.append(new_import)
def __DeclareDescriptor(self, name):
self.__nascent_types.add(self.__ComputeFullName(name))
def __RegisterDescriptor(self, new_descriptor):
"""Register the given descriptor in this registry."""
if not isinstance(new_descriptor, (
extended_descriptor.ExtendedMessageDescriptor,
extended_descriptor.ExtendedEnumDescriptor)):
raise ValueError('Cannot add descriptor of type %s' % (
type(new_descriptor),))
full_name = self.__ComputeFullName(new_descriptor.name)
if full_name in self.__message_registry:
raise ValueError(
'Attempt to re-register descriptor %s' % full_name)
if full_name not in self.__nascent_types:
raise ValueError('Directly adding types is not supported')
new_descriptor.full_name = full_name
self.__message_registry[full_name] = new_descriptor
if isinstance(new_descriptor,
extended_descriptor.ExtendedMessageDescriptor):
self.__current_env.message_types.append(new_descriptor)
elif isinstance(new_descriptor,
extended_descriptor.ExtendedEnumDescriptor):
self.__current_env.enum_types.append(new_descriptor)
self.__unknown_types.discard(full_name)
self.__nascent_types.remove(full_name)
def LookupDescriptor(self, name):
return self.__GetDescriptorByName(name)
def LookupDescriptorOrDie(self, name):
message_descriptor = self.LookupDescriptor(name)
if message_descriptor is None:
raise ValueError('No message descriptor named "%s"', name)
return message_descriptor
def __GetDescriptor(self, name):
return self.__GetDescriptorByName(self.__ComputeFullName(name))
def __GetDescriptorByName(self, name):
if name in self.__message_registry:
return self.__message_registry[name]
if name in self.__nascent_types:
raise ValueError(
'Cannot retrieve type currently being created: %s' % name)
return None
@contextlib.contextmanager
def __DescriptorEnv(self, message_descriptor):
# TODO(craigcitro): Typecheck?
previous_env = self.__current_env
self.__current_path.append(message_descriptor.name)
self.__current_env = message_descriptor
yield
self.__current_path.pop()
self.__current_env = previous_env
def AddEnumDescriptor(self, name, description,
enum_values, enum_descriptions):
"""Add a new EnumDescriptor named name with the given enum values."""
message = extended_descriptor.ExtendedEnumDescriptor()
message.name = self.__names.ClassName(name)
message.description = util.CleanDescription(description)
self.__DeclareDescriptor(message.name)
for index, (enum_name, enum_description) in enumerate(
zip(enum_values, enum_descriptions)):
enum_value = extended_descriptor.ExtendedEnumValueDescriptor()
enum_value.name = self.__names.NormalizeEnumName(enum_name)
if enum_value.name != enum_name:
message.enum_mappings.append(
extended_descriptor.ExtendedEnumDescriptor.JsonEnumMapping(
python_name=enum_value.name, json_name=enum_name))
self.__AddImport('from %s import encoding' %
self.__base_files_package)
enum_value.number = index
enum_value.description = util.CleanDescription(
enum_description or '<no description>')
message.values.append(enum_value)
self.__RegisterDescriptor(message)
def __DeclareMessageAlias(self, schema, alias_for):
"""Declare schema as an alias for alias_for."""
# TODO(craigcitro): This is a hack. Remove it.
message = extended_descriptor.ExtendedMessageDescriptor()
message.name = self.__names.ClassName(schema['id'])
message.alias_for = alias_for
self.__DeclareDescriptor(message.name)
self.__AddImport('from %s import extra_types' %
self.__base_files_package)
self.__RegisterDescriptor(message)
def __AddAdditionalProperties(self, message, schema, properties):
"""Add an additionalProperties field to message."""
additional_properties_info = schema['additionalProperties']
entries_type_name = self.__AddAdditionalPropertyType(
message.name, additional_properties_info)
description = util.CleanDescription(
additional_properties_info.get('description'))
if description is None:
description = 'Additional properties of type %s' % message.name
attrs = {
'items': {
'$ref': entries_type_name,
},
'description': description,
'type': 'array',
}
field_name = 'additionalProperties'
message.fields.append(self.__FieldDescriptorFromProperties(
field_name, len(properties) + 1, attrs))
self.__AddImport('from %s import encoding' % self.__base_files_package)
message.decorators.append(
'encoding.MapUnrecognizedFields(%r)' % field_name)
def AddDescriptorFromSchema(self, schema_name, schema):
"""Add a new MessageDescriptor named schema_name based on schema."""
# TODO(craigcitro): Is schema_name redundant?
if self.__GetDescriptor(schema_name):
return
if schema.get('enum'):
self.__DeclareEnum(schema_name, schema)
return
if schema.get('type') == 'any':
self.__DeclareMessageAlias(schema, 'extra_types.JsonValue')
return
if schema.get('type') != 'object':
raise ValueError('Cannot create message descriptors for type %s',
schema.get('type'))
message = extended_descriptor.ExtendedMessageDescriptor()
message.name = self.__names.ClassName(schema['id'])
message.description = util.CleanDescription(schema.get(
'description', 'A %s object.' % message.name))
self.__DeclareDescriptor(message.name)
with self.__DescriptorEnv(message):
properties = schema.get('properties', {})
for index, (name, attrs) in enumerate(sorted(properties.items())):
field = self.__FieldDescriptorFromProperties(
name, index + 1, attrs)
message.fields.append(field)
if field.name != name:
message.field_mappings.append(
type(message).JsonFieldMapping(
python_name=field.name, json_name=name))
self.__AddImport(
'from %s import encoding' % self.__base_files_package)
if 'additionalProperties' in schema:
self.__AddAdditionalProperties(message, schema, properties)
self.__RegisterDescriptor(message)
def __AddAdditionalPropertyType(self, name, property_schema):
"""Add a new nested AdditionalProperty message."""
new_type_name = 'AdditionalProperty'
property_schema = dict(property_schema)
# We drop the description here on purpose, so the resulting
# messages are less repetitive.
property_schema.pop('description', None)
description = 'An additional property for a %s object.' % name
schema = {
'id': new_type_name,
'type': 'object',
'description': description,
'properties': {
'key': {
'type': 'string',
'description': 'Name of the additional property.',
},
'value': property_schema,
},
}
self.AddDescriptorFromSchema(new_type_name, schema)
return new_type_name
def __AddEntryType(self, entry_type_name, entry_schema, parent_name):
"""Add a type for a list entry."""
entry_schema.pop('description', None)
description = 'Single entry in a %s.' % parent_name
schema = {
'id': entry_type_name,
'type': 'object',
'description': description,
'properties': {
'entry': {
'type': 'array',
'items': entry_schema,
},
},
}
self.AddDescriptorFromSchema(entry_type_name, schema)
return entry_type_name
def __FieldDescriptorFromProperties(self, name, index, attrs):
"""Create a field descriptor for these attrs."""
field = descriptor.FieldDescriptor()
field.name = self.__names.CleanName(name)
field.number = index
field.label = self.__ComputeLabel(attrs)
new_type_name_hint = self.__names.ClassName(
'%sValue' % self.__names.ClassName(name))
type_info = self.__GetTypeInfo(attrs, new_type_name_hint)
field.type_name = type_info.type_name
field.variant = type_info.variant
if 'default' in attrs:
# TODO(craigcitro): Correctly handle non-primitive default values.
default = attrs['default']
if not (field.type_name == 'string' or
field.variant == messages.Variant.ENUM):
default = str(json.loads(default))
if field.variant == messages.Variant.ENUM:
default = self.__names.NormalizeEnumName(default)
field.default_value = default
extended_field = extended_descriptor.ExtendedFieldDescriptor()
extended_field.name = field.name
extended_field.description = util.CleanDescription(
attrs.get('description', 'A %s attribute.' % field.type_name))
extended_field.field_descriptor = field
return extended_field
@staticmethod
def __ComputeLabel(attrs):
if attrs.get('required', False):
return descriptor.FieldDescriptor.Label.REQUIRED
elif attrs.get('type') == 'array':
return descriptor.FieldDescriptor.Label.REPEATED
elif attrs.get('repeated'):
return descriptor.FieldDescriptor.Label.REPEATED
return descriptor.FieldDescriptor.Label.OPTIONAL
def __DeclareEnum(self, enum_name, attrs):
description = util.CleanDescription(attrs.get('description', ''))
enum_values = attrs['enum']
enum_descriptions = attrs.get(
'enumDescriptions', [''] * len(enum_values))
self.AddEnumDescriptor(enum_name, description,
enum_values, enum_descriptions)
self.__AddIfUnknown(enum_name)
return TypeInfo(type_name=enum_name, variant=messages.Variant.ENUM)
def __AddIfUnknown(self, type_name):
type_name = self.__names.ClassName(type_name)
full_type_name = self.__ComputeFullName(type_name)
if (full_type_name not in self.__message_registry.keys() and
type_name not in self.__message_registry.keys()):
self.__unknown_types.add(type_name)
def __GetTypeInfo(self, attrs, name_hint):
"""Return a TypeInfo object for attrs, creating one if needed."""
type_ref = self.__names.ClassName(attrs.get('$ref'))
type_name = attrs.get('type')
if not (type_ref or type_name):
raise ValueError('No type found for %s' % attrs)
if type_ref:
self.__AddIfUnknown(type_ref)
# We don't actually know this is a message -- it might be an
# enum. However, we can't check that until we've created all the
# types, so we come back and fix this up later.
return TypeInfo(
type_name=type_ref, variant=messages.Variant.MESSAGE)
if 'enum' in attrs:
enum_name = '%sValuesEnum' % name_hint
return self.__DeclareEnum(enum_name, attrs)
if 'format' in attrs:
type_info = self.PRIMITIVE_FORMAT_MAP.get(attrs['format'])
if type_info is None:
# If we don't recognize the format, the spec says we fall back
# to just using the type name.
if type_name in self.PRIMITIVE_TYPE_INFO_MAP:
return self.PRIMITIVE_TYPE_INFO_MAP[type_name]
raise ValueError('Unknown type/format "%s"/"%s"' % (
attrs['format'], type_name))
if (type_info.type_name.startswith('protorpc.message_types.') or
type_info.type_name.startswith('message_types.')):
self.__AddImport('from protorpc import message_types')
if type_info.type_name.startswith('extra_types.'):
self.__AddImport(
'from %s import extra_types' % self.__base_files_package)
return type_info
if type_name in self.PRIMITIVE_TYPE_INFO_MAP:
type_info = self.PRIMITIVE_TYPE_INFO_MAP[type_name]
return type_info
if type_name == 'array':
items = attrs.get('items')
if not items:
raise ValueError('Array type with no item type: %s' % attrs)
entry_name_hint = self.__names.ClassName(
items.get('title') or '%sListEntry' % name_hint)
entry_label = self.__ComputeLabel(items)
if entry_label == descriptor.FieldDescriptor.Label.REPEATED:
parent_name = self.__names.ClassName(
items.get('title') or name_hint)
entry_type_name = self.__AddEntryType(
entry_name_hint, items.get('items'), parent_name)
return TypeInfo(type_name=entry_type_name,
variant=messages.Variant.MESSAGE)
else:
return self.__GetTypeInfo(items, entry_name_hint)
elif type_name == 'any':
self.__AddImport('from %s import extra_types' %
self.__base_files_package)
return self.PRIMITIVE_TYPE_INFO_MAP['any']
elif type_name == 'object':
# TODO(craigcitro): Think of a better way to come up with names.
if not name_hint:
raise ValueError(
'Cannot create subtype without some name hint')
schema = dict(attrs)
schema['id'] = name_hint
self.AddDescriptorFromSchema(name_hint, schema)
self.__AddIfUnknown(name_hint)
return TypeInfo(
type_name=name_hint, variant=messages.Variant.MESSAGE)
raise ValueError('Unknown type: %s' % type_name)
def FixupMessageFields(self):
for message_type in self.file_descriptor.message_types:
self._FixupMessage(message_type)
def _FixupMessage(self, message_type):
with self.__DescriptorEnv(message_type):
for field in message_type.fields:
if field.field_descriptor.variant == messages.Variant.MESSAGE:
field_type_name = field.field_descriptor.type_name
field_type = self.LookupDescriptor(field_type_name)
if isinstance(field_type,
extended_descriptor.ExtendedEnumDescriptor):
field.field_descriptor.variant = messages.Variant.ENUM
for submessage_type in message_type.message_types:
self._FixupMessage(submessage_type)
|
|
#!/usr/bin/env python
# Copyright 2013 AlchemyAPI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import requests
try:
from urllib.request import urlopen
from urllib.parse import urlparse
from urllib.parse import urlencode
except ImportError:
from urlparse import urlparse
from urllib2 import urlopen
from urllib import urlencode
try:
import json
except ImportError:
# Older versions of Python (i.e. 2.4) require simplejson instead of json
import simplejson as json
if __name__ == '__main__':
"""
Writes the API key to api_key.txt file. It will create the file if it doesn't exist.
This function is intended to be called from the Python command line using: python alchemyapi YOUR_API_KEY
If you don't have an API key yet, register for one at: http://www.alchemyapi.com/api/register.html
INPUT:
argv[1] -> Your API key from AlchemyAPI. Should be 40 hex characters
OUTPUT:
none
"""
import sys
if len(sys.argv) == 2 and sys.argv[1]:
if len(sys.argv[1]) == 40:
# write the key to the file
f = open('api_key.txt', 'w')
f.write(sys.argv[1])
f.close()
print('Key: ' + sys.argv[1] + ' was written to api_key.txt')
print(
'You are now ready to start using AlchemyAPI. For an example, run: python example.py')
else:
print(
'The key appears to invalid. Please make sure to use the 40 character key assigned by AlchemyAPI')
class AlchemyAPI:
# Setup the endpoints
ENDPOINTS = {}
ENDPOINTS['sentiment'] = {}
ENDPOINTS['sentiment']['url'] = '/url/URLGetTextSentiment'
ENDPOINTS['sentiment']['text'] = '/text/TextGetTextSentiment'
ENDPOINTS['sentiment']['html'] = '/html/HTMLGetTextSentiment'
ENDPOINTS['sentiment_targeted'] = {}
ENDPOINTS['sentiment_targeted']['url'] = '/url/URLGetTargetedSentiment'
ENDPOINTS['sentiment_targeted']['text'] = '/text/TextGetTargetedSentiment'
ENDPOINTS['sentiment_targeted']['html'] = '/html/HTMLGetTargetedSentiment'
ENDPOINTS['author'] = {}
ENDPOINTS['author']['url'] = '/url/URLGetAuthor'
ENDPOINTS['author']['html'] = '/html/HTMLGetAuthor'
ENDPOINTS['keywords'] = {}
ENDPOINTS['keywords']['url'] = '/url/URLGetRankedKeywords'
ENDPOINTS['keywords']['text'] = '/text/TextGetRankedKeywords'
ENDPOINTS['keywords']['html'] = '/html/HTMLGetRankedKeywords'
ENDPOINTS['concepts'] = {}
ENDPOINTS['concepts']['url'] = '/url/URLGetRankedConcepts'
ENDPOINTS['concepts']['text'] = '/text/TextGetRankedConcepts'
ENDPOINTS['concepts']['html'] = '/html/HTMLGetRankedConcepts'
ENDPOINTS['entities'] = {}
ENDPOINTS['entities']['url'] = '/url/URLGetRankedNamedEntities'
ENDPOINTS['entities']['text'] = '/text/TextGetRankedNamedEntities'
ENDPOINTS['entities']['html'] = '/html/HTMLGetRankedNamedEntities'
ENDPOINTS['category'] = {}
ENDPOINTS['category']['url'] = '/url/URLGetCategory'
ENDPOINTS['category']['text'] = '/text/TextGetCategory'
ENDPOINTS['category']['html'] = '/html/HTMLGetCategory'
ENDPOINTS['relations'] = {}
ENDPOINTS['relations']['url'] = '/url/URLGetRelations'
ENDPOINTS['relations']['text'] = '/text/TextGetRelations'
ENDPOINTS['relations']['html'] = '/html/HTMLGetRelations'
ENDPOINTS['language'] = {}
ENDPOINTS['language']['url'] = '/url/URLGetLanguage'
ENDPOINTS['language']['text'] = '/text/TextGetLanguage'
ENDPOINTS['language']['html'] = '/html/HTMLGetLanguage'
ENDPOINTS['text'] = {}
ENDPOINTS['text']['url'] = '/url/URLGetText'
ENDPOINTS['text']['html'] = '/html/HTMLGetText'
ENDPOINTS['text_raw'] = {}
ENDPOINTS['text_raw']['url'] = '/url/URLGetRawText'
ENDPOINTS['text_raw']['html'] = '/html/HTMLGetRawText'
ENDPOINTS['title'] = {}
ENDPOINTS['title']['url'] = '/url/URLGetTitle'
ENDPOINTS['title']['html'] = '/html/HTMLGetTitle'
ENDPOINTS['feeds'] = {}
ENDPOINTS['feeds']['url'] = '/url/URLGetFeedLinks'
ENDPOINTS['feeds']['html'] = '/html/HTMLGetFeedLinks'
ENDPOINTS['microformats'] = {}
ENDPOINTS['microformats']['url'] = '/url/URLGetMicroformatData'
ENDPOINTS['microformats']['html'] = '/html/HTMLGetMicroformatData'
ENDPOINTS['combined'] = {}
ENDPOINTS['combined']['url'] = '/url/URLGetCombinedData'
ENDPOINTS['combined']['text'] = '/text/TextGetCombinedData'
ENDPOINTS['image'] = {}
ENDPOINTS['image']['url'] = '/url/URLGetImage'
ENDPOINTS['imagetagging'] = {}
ENDPOINTS['imagetagging']['url'] = '/url/URLGetRankedImageKeywords'
ENDPOINTS['imagetagging']['image'] = '/image/ImageGetRankedImageKeywords'
ENDPOINTS['facetagging'] = {}
ENDPOINTS['facetagging']['url'] = '/url/URLGetRankedImageFaceTags'
ENDPOINTS['facetagging']['image'] = '/image/ImageGetRankedImageFaceTags'
ENDPOINTS['taxonomy'] = {}
ENDPOINTS['taxonomy']['url'] = '/url/URLGetRankedTaxonomy'
ENDPOINTS['taxonomy']['html'] = '/html/HTMLGetRankedTaxonomy'
ENDPOINTS['taxonomy']['text'] = '/text/TextGetRankedTaxonomy'
# The base URL for all endpoints
BASE_URL = 'https://access.alchemyapi.com/calls'
s = requests.Session()
def __init__(self, apikey):
"""
Initializes the SDK so it can send requests to AlchemyAPI for analysis.
"""
self.apikey = apikey
def entities(self, flavor, data, options={}):
"""
Extracts the entities for text, a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/entity-extraction/
For the docs, please refer to: http://www.alchemyapi.com/api/entity-extraction/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
disambiguate -> disambiguate entities (i.e. Apple the company vs. apple the fruit). 0: disabled, 1: enabled (default)
linkedData -> include linked data on disambiguated entities. 0: disabled, 1: enabled (default)
coreference -> resolve coreferences (i.e. the pronouns that correspond to named entities). 0: disabled, 1: enabled (default)
quotations -> extract quotations by entities. 0: disabled (default), 1: enabled.
sentiment -> analyze sentiment for each entity. 0: disabled (default), 1: enabled. Requires 1 additional API transction if enabled.
showSourceText -> 0: disabled (default), 1: enabled
maxRetrieve -> the maximum number of entities to retrieve (default: 50)
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['entities']:
return {'status': 'ERROR', 'statusInfo': 'entity extraction for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['entities'][flavor], {}, options)
def keywords(self, flavor, data, options={}):
"""
Extracts the keywords from text, a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/keyword-extraction/
For the docs, please refer to: http://www.alchemyapi.com/api/keyword-extraction/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
keywordExtractMode -> normal (default), strict
sentiment -> analyze sentiment for each keyword. 0: disabled (default), 1: enabled. Requires 1 additional API transaction if enabled.
showSourceText -> 0: disabled (default), 1: enabled.
maxRetrieve -> the max number of keywords returned (default: 50)
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['keywords']:
return {'status': 'ERROR', 'statusInfo': 'keyword extraction for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['keywords'][flavor], {}, options)
def concepts(self, flavor, data, options={}):
"""
Tags the concepts for text, a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/concept-tagging/
For the docs, please refer to: http://www.alchemyapi.com/api/concept-tagging/
Available Options:
maxRetrieve -> the maximum number of concepts to retrieve (default: 8)
linkedData -> include linked data, 0: disabled, 1: enabled (default)
showSourceText -> 0:disabled (default), 1: enabled
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['concepts']:
return {'status': 'ERROR', 'statusInfo': 'concept tagging for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['concepts'][flavor], {}, options)
def sentiment(self, flavor, data, options={}):
"""
Calculates the sentiment for text, a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/sentiment-analysis/
For the docs, please refer to: http://www.alchemyapi.com/api/sentiment-analysis/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
showSourceText -> 0: disabled (default), 1: enabled
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['sentiment']:
return {'status': 'ERROR', 'statusInfo': 'sentiment analysis for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['sentiment'][flavor], {}, options)
def sentiment_targeted(self, flavor, data, target, options={}):
"""
Calculates the targeted sentiment for text, a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/sentiment-analysis/
For the docs, please refer to: http://www.alchemyapi.com/api/sentiment-analysis/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
target -> the word or phrase to run sentiment analysis on.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
showSourceText -> 0: disabled, 1: enabled
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure the target is valid
if target is None or target == '':
return {'status': 'ERROR', 'statusInfo': 'targeted sentiment requires a non-null target'}
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['sentiment_targeted']:
return {'status': 'ERROR', 'statusInfo': 'targeted sentiment analysis for ' + flavor + ' not available'}
# add the URL encoded data and target to the options and analyze
options[flavor] = data
options['target'] = target
return self.__analyze(AlchemyAPI.ENDPOINTS['sentiment_targeted'][flavor], {}, options)
def text(self, flavor, data, options={}):
"""
Extracts the cleaned text (removes ads, navigation, etc.) for text, a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/text-extraction/
For the docs, please refer to: http://www.alchemyapi.com/api/text-extraction/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
useMetadata -> utilize meta description data, 0: disabled, 1: enabled (default)
extractLinks -> include links, 0: disabled (default), 1: enabled.
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['text']:
return {'status': 'ERROR', 'statusInfo': 'clean text extraction for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['text'][flavor], options)
def text_raw(self, flavor, data, options={}):
"""
Extracts the raw text (includes ads, navigation, etc.) for a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/text-extraction/
For the docs, please refer to: http://www.alchemyapi.com/api/text-extraction/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
none
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['text_raw']:
return {'status': 'ERROR', 'statusInfo': 'raw text extraction for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['text_raw'][flavor], {}, options)
def author(self, flavor, data, options={}):
"""
Extracts the author from a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/author-extraction/
For the docs, please refer to: http://www.alchemyapi.com/api/author-extraction/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Availble Options:
none
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['author']:
return {'status': 'ERROR', 'statusInfo': 'author extraction for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['author'][flavor], {}, options)
def language(self, flavor, data, options={}):
"""
Detects the language for text, a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/api/language-detection/
For the docs, please refer to: http://www.alchemyapi.com/products/features/language-detection/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
none
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['language']:
return {'status': 'ERROR', 'statusInfo': 'language detection for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['language'][flavor], {}, options)
def title(self, flavor, data, options={}):
"""
Extracts the title for a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/text-extraction/
For the docs, please refer to: http://www.alchemyapi.com/api/text-extraction/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
useMetadata -> utilize title info embedded in meta data, 0: disabled, 1: enabled (default)
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['title']:
return {'status': 'ERROR', 'statusInfo': 'title extraction for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['title'][flavor], {}, options)
def relations(self, flavor, data, options={}):
"""
Extracts the relations for text, a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/relation-extraction/
For the docs, please refer to: http://www.alchemyapi.com/api/relation-extraction/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
sentiment -> 0: disabled (default), 1: enabled. Requires one additional API transaction if enabled.
keywords -> extract keywords from the subject and object. 0: disabled (default), 1: enabled. Requires one additional API transaction if enabled.
entities -> extract entities from the subject and object. 0: disabled (default), 1: enabled. Requires one additional API transaction if enabled.
requireEntities -> only extract relations that have entities. 0: disabled (default), 1: enabled.
sentimentExcludeEntities -> exclude full entity name in sentiment analysis. 0: disabled, 1: enabled (default)
disambiguate -> disambiguate entities (i.e. Apple the company vs. apple the fruit). 0: disabled, 1: enabled (default)
linkedData -> include linked data with disambiguated entities. 0: disabled, 1: enabled (default).
coreference -> resolve entity coreferences. 0: disabled, 1: enabled (default)
showSourceText -> 0: disabled (default), 1: enabled.
maxRetrieve -> the maximum number of relations to extract (default: 50, max: 100)
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['relations']:
return {'status': 'ERROR', 'statusInfo': 'relation extraction for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['relations'][flavor], {}, options)
def category(self, flavor, data, options={}):
"""
Categorizes the text for text, a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/text-categorization/
For the docs, please refer to: http://www.alchemyapi.com/api/text-categorization/
INPUT:
flavor -> which version of the call, i.e. text, url or html.
data -> the data to analyze, either the text, the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
showSourceText -> 0: disabled (default), 1: enabled
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['category']:
return {'status': 'ERROR', 'statusInfo': 'text categorization for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['category'][flavor], {}, options)
def feeds(self, flavor, data, options={}):
"""
Detects the RSS/ATOM feeds for a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/feed-detection/
For the docs, please refer to: http://www.alchemyapi.com/api/feed-detection/
INPUT:
flavor -> which version of the call, i.e. url or html.
data -> the data to analyze, either the the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
none
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['feeds']:
return {'status': 'ERROR', 'statusInfo': 'feed detection for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['feeds'][flavor], {}, options)
def microformats(self, flavor, data, options={}):
"""
Parses the microformats for a URL or HTML.
For an overview, please refer to: http://www.alchemyapi.com/products/features/microformats-parsing/
For the docs, please refer to: http://www.alchemyapi.com/api/microformats-parsing/
INPUT:
flavor -> which version of the call, i.e. url or html.
data -> the data to analyze, either the the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
none
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Make sure this request supports this flavor
if flavor not in AlchemyAPI.ENDPOINTS['microformats']:
return {'status': 'ERROR', 'statusInfo': 'microformat extraction for ' + flavor + ' not available'}
# add the data to the options and analyze
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['microformats'][flavor], {}, options)
def imageExtraction(self, flavor, data, options={}):
"""
Extracts main image from a URL
INPUT:
flavor -> which version of the call (url only currently).
data -> URL to analyze
options -> various parameters that can be used to adjust how the API works,
see below for more info on the available options.
Available Options:
extractMode ->
trust-metadata : (less CPU intensive, less accurate)
always-infer : (more CPU intensive, more accurate)
OUTPUT:
The response, already converted from JSON to a Python object.
"""
if flavor not in AlchemyAPI.ENDPOINTS['image']:
return {'status': 'ERROR', 'statusInfo': 'image extraction for ' + flavor + ' not available'}
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['image'][flavor], {}, options)
def taxonomy(self, flavor, data, options={}):
"""
Taxonomy classification operations.
INPUT:
flavor -> which version of the call, i.e. url or html.
data -> the data to analyze, either the the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
showSourceText ->
include the original 'source text' the taxonomy categories were extracted from within the API response
Possible values:
1 - enabled
0 - disabled (default)
sourceText ->
where to obtain the text that will be processed by this API call.
AlchemyAPI supports multiple modes of text extraction:
web page cleaning (removes ads, navigation links, etc.), raw text extraction
(processes all web page text, including ads / nav links), visual constraint queries, and XPath queries.
Possible values:
cleaned_or_raw : cleaning enabled, fallback to raw when cleaning produces no text (default)
cleaned : operate on 'cleaned' web page text (web page cleaning enabled)
raw : operate on raw web page text (web page cleaning disabled)
cquery : operate on the results of a visual constraints query
Note: The 'cquery' http argument must also be set to a valid visual constraints query.
xpath : operate on the results of an XPath query
Note: The 'xpath' http argument must also be set to a valid XPath query.
cquery ->
a visual constraints query to apply to the web page.
xpath ->
an XPath query to apply to the web page.
baseUrl ->
rel-tag output base http url (must be uri-argument encoded)
OUTPUT:
The response, already converted from JSON to a Python object.
"""
if flavor not in AlchemyAPI.ENDPOINTS['taxonomy']:
return {'status': 'ERROR', 'statusInfo': 'taxonomy for ' + flavor + ' not available'}
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['taxonomy'][flavor], {}, options)
def combined(self, flavor, data, options={}):
"""
Combined call for page-image, entity, keyword, title, author, taxonomy, concept.
INPUT:
flavor -> which version of the call, i.e. url or html.
data -> the data to analyze, either the the url or html code.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
Available Options:
extract ->
Possible values: page-image, entity, keyword, title, author, taxonomy, concept
default : entity, keyword, taxonomy, concept
disambiguate ->
disambiguate detected entities
Possible values:
1 : enabled (default)
0 : disabled
linkedData ->
include Linked Data content links with disambiguated entities
Possible values :
1 : enabled (default)
0 : disabled
coreference ->
resolve he/she/etc coreferences into detected entities
Possible values:
1 : enabled (default)
0 : disabled
quotations ->
enable quotations extraction
Possible values:
1 : enabled
0 : disabled (default)
sentiment ->
enable entity-level sentiment analysis
Possible values:
1 : enabled
0 : disabled (default)
showSourceText ->
include the original 'source text' the entities were extracted from within the API response
Possible values:
1 : enabled
0 : disabled (default)
maxRetrieve ->
maximum number of named entities to extract
default : 50
baseUrl ->
rel-tag output base http url
OUTPUT:
The response, already converted from JSON to a Python object.
"""
if flavor not in AlchemyAPI.ENDPOINTS['combined']:
return {'status': 'ERROR', 'statusInfo': 'combined for ' + flavor + ' not available'}
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['combined'][flavor], {}, options)
def imageTagging(self, flavor, data, options={}):
"""
INPUT:
flavor -> which version of the call only url or image.
data -> the data to analyze, either the the url or path to image.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
"""
if flavor not in AlchemyAPI.ENDPOINTS['imagetagging']:
return {'status': 'ERROR', 'statusInfo': 'imagetagging for ' + flavor + ' not available'}
elif 'image' == flavor:
image = open(data, 'rb').read()
options['imagePostMode'] = 'raw'
return self.__analyze(AlchemyAPI.ENDPOINTS['imagetagging'][flavor], options, image)
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['imagetagging'][flavor], {}, options)
def faceTagging(self, flavor, data, options={}):
"""
INPUT:
flavor -> which version of the call only url or image.
data -> the data to analyze, either the the url or path to image.
options -> various parameters that can be used to adjust how the API works, see below for more info on the available options.
"""
if flavor not in AlchemyAPI.ENDPOINTS['facetagging']:
return {'status': 'ERROR', 'statusInfo': 'facetagging for ' + flavor + ' not available'}
elif 'image' == flavor:
image = open(data, 'rb').read()
options['imagePostMode'] = 'raw'
return self.__analyze(AlchemyAPI.ENDPOINTS['facetagging'][flavor], options, image)
options[flavor] = data
return self.__analyze(AlchemyAPI.ENDPOINTS['facetagging'][flavor], {}, options)
def __analyze(self, endpoint, params, post_data=bytearray()):
"""
HTTP Request wrapper that is called by the endpoint functions. This function is not intended to be called through an external interface.
It makes the call, then converts the returned JSON string into a Python object.
INPUT:
url -> the full URI encoded url
OUTPUT:
The response, already converted from JSON to a Python object.
"""
# Add the API Key and set the output mode to JSON
params['apikey'] = self.apikey
params['outputMode'] = 'json'
# Insert the base url
post_url = ""
try:
post_url = AlchemyAPI.BASE_URL + endpoint + \
'?' + urlencode(params).encode('utf-8')
except TypeError:
post_url = AlchemyAPI.BASE_URL + endpoint + '?' + urlencode(params)
results = ""
try:
results = self.s.post(url=post_url, data=post_data)
except Exception as e:
print(e)
return {'status': 'ERROR', 'statusInfo': 'network-error'}
try:
return results.json()
except Exception as e:
if results != "":
print(results)
print(e)
return {'status': 'ERROR', 'statusInfo': 'parse-error'}
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DebuggerProxy class replaces the debugger on a GUI machine that
connects to a remote debugger.
@author: Nicu Tofan <nicu.tofan@gmail.com>
"""
import cPickle
import logging
logger = logging.getLogger(__name__)
import zmq
import time
from PyQt4 import QtCore
from learn_spot.gui import debugger
ERR_SIGNAL = QtCore.SIGNAL("error(QString)")
class Runnable(QtCore.QRunnable):
"""
The worker thread for Debugger.
"""
def __init__(self, debugger, socket):
"""
Constructor
"""
super(Runnable, self).__init__()
self.debugger = debugger
self.setAutoDelete(True)
self.stop = False
self.socket = socket
def run(self):
"""
Build-in method.
"""
while not self.stop:
message = self.socketrecv()
try:
# interpret the result
response = cPickle.loads(message)
if not isinstance(response, dict):
debugger.forward_error(
"Remote send unexpected message type: " + str(response.__class__))
elif not response.has_key('type'):
debugger.forward_error(
"Remote send unexpected message: " + str(response))
else:
self.debugger.forward_message(response)
except Exception:
msg = 'Failed to process broadcasted message'
logger.debug(msg, exc_info=True)
self.debugger.forward_error(msg)
class DebuggerProxy(QtCore.QObject):
"""
The class represents a debugger running on a remote machine.
Parameters
----------
address : string
The IP address on which a DebuggerPublisher process is listening.
req_port : int
The port number on which a DebuggerPublisher process is listening.
The port is used to send requests and commands to the remote instance.
pub_port : int
The port number on which a DebuggerPublisher process is listening.
The port is used to monitor the remote instance.
Signals
-------
alive(bool)
Tell if alive or not.
debug_end(yaml_file)
Returning to ground state.
debug_start(yaml_file)
A file was succesfully loaded into the debugger.
debug_run()
Entering paused state.
debug_paused()
Entering paused state.
debug_stopped()
Entering stopped state.
error(message)
An error happened.
"""
def __init__(self, address='127.0.0.1', req_port=5955, pub_port=5956):
"""
Constructor
"""
super(DebuggerProxy, self).__init__()
self.last_known_status = None
self.address = 'tcp://%s' % address
assert req_port != pub_port
assert req_port > 1024 and req_port < 65536
self.req_port = req_port
assert pub_port > 1024 and pub_port < 65536
self.pub_port = pub_port
# prepare the network
self.context = zmq.Context()
self.reconnect()
# hartbeat
self.timer_hart_beat = self.startTimer(1)
self.last_hartbeat_ok = False
def reconnect(self):
address_template = self.address + ':%d'
self.req_sock = self.context.socket(zmq.REQ)
self.req_sock.connect(address_template % self.req_port)
self.req_sock.setsockopt(zmq.LINGER, 100)
self.req_sock.setsockopt(zmq.RCVTIMEO, 100)
self.req_sock.setsockopt(zmq.SNDTIMEO, 100)
self.pub_sock = self.context.socket(zmq.SUB)
self.pub_sock.setsockopt(zmq.SUBSCRIBE, "")
self.pub_sock.connect(address_template % self.pub_port)
def disconnect(self):
address_template = self.address + ':%d'
if self.req_sock:
self.req_sock.unbind(address_template % self.req_port)
if self.pub_sock:
self.pub_sock.unbind(address_template % self.pub_port)
def timerEvent(self, event):
"""
"""
self.killTimer(self.timer_hart_beat )
if self.context is None:
self.context = zmq.Context()
self.reconnect()
if self.send_basic_command('status'):
self.emit(QtCore.SIGNAL("alive"), True)
next_after = 2000
if not self.last_hartbeat_ok:
self.req_sock.setsockopt(zmq.LINGER, 5000)
self.req_sock.setsockopt(zmq.RCVTIMEO, 5000)
self.req_sock.setsockopt(zmq.SNDTIMEO, 5000)
self.last_hartbeat_ok = True
else:
print 'not alive'
self.disconnect()
self.reconnect()
self.last_hartbeat_ok = False
next_after = 100
self.timer_hart_beat = self.startTimer(next_after)
def is_running(self):
"""
Tell if the debugger is in initialized state or not.
Sends the `status` command to update the local status.
"""
self.send_basic_command('status')
return self.last_known_status == debugger.STS_RUNNING
def unload_file(self):
"""
Terminates debugging session.
Sends the `unload_file` command.
"""
self.send_basic_command('unload_file')
def load_file(self, fname):
"""
Starts debugging given file.
Parameters
----------
fname : str
The path to the file to load on the remote machine.
Top level object must be a Train object.
The file is NOT uploaded from local machine.
Returns
-------
init_ok : bool
True if the file was succesfully loaded, False otherwise
"""
self.send_basic_command('load_file', {'file': fname})
return self.last_known_status != debugger.STS_GROUND
def dbg_run(self, num_steps=None):
"""
Slot that instructs the debugger to run a number of steps.
Parameters
----------
num_steps : int
Number of epochs to perform. Pass None to run without interuption.
"""
self.send_basic_command('run', {'steps': num_steps})
def dbg_stop(self):
"""
Slot that instructs the debugger to terminate after current epoch.
"""
self.send_basic_command('stop')
def dbg_pause(self):
"""
Slot that instructs the debugger to pause after current epoch.
"""
self.send_basic_command('pause')
def dbg_continue(self):
"""
Slot that instructs the debugger to run without interruption.
"""
self.dbg_run(None)
def dbg_run_one(self):
"""
Slot that instructs the debugger to run one epoch then stop.
"""
self.dbg_run(1)
def forward_error(self, message):
"""
"""
self.emit(ERR_SIGNAL, message)
def forward_message(self, response):
"""
"""
resp_type = response['type']
if resp_type == 'debug_start':
self.last_known_status = response['state']
self.emit(QtCore.SIGNAL("debug_start"), response['message'])
elif resp_type == 'debug_end':
self.last_known_status = response['state']
self.emit(QtCore.SIGNAL("debug_end"), response['message'])
elif resp_type == 'debug_paused':
self.last_known_status = response['state']
self.emit(QtCore.SIGNAL("debug_paused"))
elif resp_type == 'debug_run':
self.last_known_status = response['state']
self.emit(QtCore.SIGNAL("debug_run"))
elif resp_type == 'debug_stopped':
self.last_known_status = response['state']
self.emit(QtCore.SIGNAL("debug_stopped"))
elif resp_type == 'state':
self.last_known_status = response['state']
self.emit(QtCore.SIGNAL("debug_state_change(int,int)"),
response['oldstate'],
self.last_known_status)
elif resp_type == 'error':
self.emit(ERR_SIGNAL, response['message'])
else:
self.emit(ERR_SIGNAL, 'Unknown message type: ' + resp_type)
self.updated_state(response['state'])
def updated_state(self, new_state):
"""
"""
if new_state != self.last_known_status:
self.last_known_status = new_state
if new_state == debugger.STS_STOPPED:
self.emit(QtCore.SIGNAL("debug_stopped()"))
elif new_state == debugger.STS_RUNNING:
self.emit(QtCore.SIGNAL("debug_run()"))
elif new_state == debugger.STS_PAUSED:
self.emit(QtCore.SIGNAL("debug_paused()"))
elif new_state == debugger.STS_GROUND:
self.emit(QtCore.SIGNAL("debug_end()"), "")
logger.debug('State is now %s', debugger.state_name(new_state))
def process_reply(self, command, response):
"""
Reply messages have `type: reply`, a `state` representing the
state of the debugger after the command and an optional
`warning` that contains errors produced while executing the command.
"""
if not response.has_key('state'):
self.emit(ERR_SIGNAL,
"Remote send `reply` message type without `state`: " +
str(response))
if response.has_key('warning'):
self.emit(ERR_SIGNAL,
"Remote error: " + response['warning'])
self.updated_state(response['state'])
def send_basic_command(self, command, args=None):
"""
All responses should have a `type` key to indentify the kind of
response.
"""
b_finalized = False
try:
while self.context is None:
time.sleep(0.5)
# prepare the request
command = {'request': command}
if args:
command.update(args)
message = cPickle.dumps(command)
# send it and get a reply back
self.req_sock.send(message)
response = self.req_sock.recv()
# interpret the result
response = cPickle.loads(response)
if not isinstance(response, dict):
self.emit(ERR_SIGNAL,
"Remote send unexpected message type: " + str(response.__class__))
elif not response.has_key('type'):
self.emit(ERR_SIGNAL,
"Remote send unexpected message: " + str(response))
else:
resp_type = response['type']
if resp_type == 'reply':
self.process_reply(command, response)
b_finalized = True
except zmq.ZMQError:
self.emit(QtCore.SIGNAL("alive"), False)
except Exception:
msg = 'Failed to send basic command'
self.emit(ERR_SIGNAL, msg)
logger.debug(msg, exc_info=True)
return b_finalized
|
|
"""
This module tracks the result of a count, and provides a base class to be
implemented and used by callers of for reporting or analysis
The exposed class, BaseResults, is an abstract base class.
Your implementation should inherit from BaseResults.
"""
import datetime
import json
import abc
import os
from .common import logger
class ExclusionDistributionPerformed:
"""
Information on any exclusion distribution which is performed during a
counting round.
"""
def __init__(self, candidates, transfer_value):
"""
candidates: a List of candidate_ids
transfer_value: transfer value, as a Fraction
"""
self.candidates = candidates
self.transfer_value = transfer_value
class ElectionDistributionPerformed:
"""
Information on any election distribution which is performed during a
counting round.
"""
def __init__(self, candidate_id, transfer_value):
"""
transfer_value is a Fraction instance
"""
self.candidate_id = candidate_id
self.transfer_value = transfer_value
class CandidateElected:
"""
Information on the election of a candidate.
"""
def __init__(self, candidate_id, order, excess_votes, paper_count, transfer_value):
"""
candidate_id: the candidate elected
order: the number of the spot the candidate was elected to [1..N_vacancies]
excess_votes: the number of excess votes the candidate received
paper_count: the number of papers the candidate held at time of elect
transfer_value: the transfer value for the excess papers (0 if no excess papers)
"""
self.candidate_id = candidate_id
self.order = order
self.excess_votes = excess_votes
self.paper_count = paper_count
self.transfer_value = transfer_value
class CandidatesExcluded:
"""
Information on the exclusion of one or more candidates.
"""
def __init__(self, candidates, transfer_values, reason):
"""
candidates: list of candidate_ids of those candidates elected
transfer_values: the transfer values of papers to be distributed (a list of Fraction instances)
reason: an instance of ExclusionReason
"""
self.candidates = candidates
self.transfer_values = transfer_values
self.reason = reason
class ExclusionReason:
"""
information of interest about an exclusion of one or more
candidates.
simply to make this information available to users of this
counter, this data is not used to make any decisions which
affect count results.
"""
def __init__(self, reason, info):
"""
reason: a string identifying the reason
info: additional information
"""
self.reason, self.info = reason, info
class ActProvision:
"""
Note that a provision of the Act has been used.
"""
def __init__(self, text):
"""
text: textual description of the provision used.
"""
self.text = text
class BaseResults(metaclass=abc.ABCMeta):
"""
Base class, with callback hooks for each event type which may occur.
Each callback represents an event as the senate election progresses.
The concrete implementation is responsible for tracking events.
"""
@abc.abstractmethod
def round_begin(self, round_number):
"""
Called by the counter at the commencement of a counting round.
"""
pass
@abc.abstractmethod
def round_complete(self):
"""
Called by the counter at the conclusion of a counting round.
"""
self.round_info = None
@abc.abstractmethod
def exclusion_distribution_performed(self, obj):
"""
Called by the counter any time a distribution of papers is performed.
``obj`` is an instance of ExclusionDistributionPerformed.
"""
pass
@abc.abstractmethod
def election_distribution_performed(self, obj):
"""
Called by the counter any time a distribution of papers is performed.
``obj`` is an instance of ElectionDistributionPerformed.
"""
pass
@abc.abstractmethod
def candidate_aggregates(self, obj):
"""
Called by the counter after all papers have been distributed for the
round. ``obj`` is an instance of CandidateAggregates.
"""
pass
@abc.abstractmethod
def candidate_elected(self, obj):
"""
Called by the counter when a candidate is elected.
``obj`` is an instance of CandidateElected.
"""
pass
@abc.abstractmethod
def candidates_excluded(self, obj):
"""
Called by the counter when candidate(s) are excluded.
``obj`` is an instance of CandidatesExcluded
"""
pass
@abc.abstractmethod
def provision_used(self, obj):
"""
Called by the counter when a provision of the act
is used - for example, to resolve a tie, or to
terminate the count.
``obj``: an instance of ProvisionUsed
``total_papers``: the total number of formal papers
``quota``: the quota to be elected
"""
pass
@abc.abstractmethod
def started(self, vacancies, total_papers, quota):
"""
Called by the counter when the election count begins.
vacancies: the number of vacancies to be filled.
"""
pass
@abc.abstractmethod
def finished(self):
"""
Called by the counter when the election count has finished.
"""
pass
class JSONResults(BaseResults):
def __init__(self, filename, test_log_dir, candidate_ids, parties, candidate_order_fn, get_candidate_title, get_candidate_party, **kwargs):
self.filename = filename
self.test_log_dir = test_log_dir
self.candidate_ids = candidate_ids
self.parties = parties
self.candidate_order_fn = candidate_order_fn
self.get_candidate_title = get_candidate_title
self.get_candidate_party = get_candidate_party
self.template_variables = kwargs
self.aggregates = []
self.vacancies = None
self.candidates_affected_by_round = None
self.rounds = []
self._number_excluded = 0
self._start_time = datetime.datetime.now()
# track events by candidate_id
self._candidates_elected = {}
self._candidates_excluded = {}
def started(self, vacancies, total_papers, quota):
self.vacancies = vacancies
self.total_papers = total_papers
self.quota = quota
def round_begin(self, round_number):
self.current_round = round_number
self.candidates_affected_by_round = set()
self.round_info = {
'number': round_number,
'note': '',
'elected': [],
'exclusion': None,
'distribution': None
}
def election_distribution_performed(self, obj):
self.candidates_affected_by_round.add(obj.candidate_id)
self.round_info['distribution'] = {
'type': 'election',
'distributed_candidates': [obj.candidate_id],
'transfer_value': float(obj.transfer_value)
}
def exclusion_distribution_performed(self, obj):
for candidate_id in obj.candidates:
self.candidates_affected_by_round.add(candidate_id)
self.round_info['distribution'] = {
'type': 'exclusion',
'distributed_candidates': [obj.candidates],
'transfer_value': float(obj.transfer_value)
}
def candidate_aggregates(self, obj):
self.aggregates.append(obj)
self.json_log(obj)
def candidate_elected(self, obj):
self._candidates_elected[obj.candidate_id] = (self.current_round, obj)
self.candidates_affected_by_round.add(obj.candidate_id)
info = {
'id': obj.candidate_id,
'pos': obj.order,
}
if obj.excess_votes is not None and obj.paper_count is not None:
info['transfer'] = {
'excess': obj.excess_votes,
'paper_count': obj.paper_count,
'value': float(obj.transfer_value)
}
self.round_info['elected'].append(info)
def candidates_excluded(self, obj):
for candidate_id in obj.candidates:
self._number_excluded += 1
self._candidates_excluded[candidate_id] = (self.current_round, self._number_excluded, obj)
self.candidates_affected_by_round.add(candidate_id)
info = {
'candidates': obj.candidates,
'reason': obj.reason.reason,
'transfers': [float(t) for t in obj.transfer_values],
}
info.update(obj.reason.info)
self.round_info['exclusion'] = info
def provision_used(self, obj):
self.round_info['note'] += obj.text
def candidate_ids_display(self, candidate_aggregates):
return sorted(self.candidate_ids, key=self.candidate_order_fn)
def candidate_election_order(self, candidate_id):
if candidate_id not in self._candidates_elected:
return self.vacancies + 1
_, obj = self._candidates_elected[candidate_id]
return obj.order
def round_count(self):
def exloss(a):
return {
'exhausted_papers': a.get_exhausted_papers(),
'exhausted_votes': a.get_exhausted_votes(),
'gain_loss_papers': a.get_gain_loss_papers(),
'gain_loss_votes': a.get_gain_loss_votes(),
}
def agg(a):
return {
'votes': a.get_vote_count(candidate_id),
'papers': a.get_paper_count(candidate_id)
}
last_candidate_aggregates = None
if len(self.aggregates) > 1:
last_candidate_aggregates = self.aggregates[-2]
candidate_aggregates = None
if len(self.aggregates) > 0:
candidate_aggregates = self.aggregates[-1]
r = {
'candidates': [],
'after': exloss(candidate_aggregates)
}
if last_candidate_aggregates is not None:
before = exloss(last_candidate_aggregates)
r['delta'] = dict((t, r['after'][t] - before[t]) for t in before)
for candidate_id in reversed(sorted(
self.candidate_ids,
key=lambda x: (candidate_aggregates.get_vote_count(x), self.vacancies - self.candidate_election_order(x)))):
entry = {
'id': candidate_id,
'after': agg(candidate_aggregates),
}
if candidate_id in self._candidates_elected:
_, obj = self._candidates_elected[candidate_id]
entry['elected'] = obj.order
if candidate_id in self._candidates_excluded:
_, entry['excluded'], _ = self._candidates_excluded[candidate_id]
done = False
if entry.get('excluded'):
if candidate_id not in self.candidates_affected_by_round and \
not candidate_aggregates.get_candidate_has_papers(candidate_id):
done = True
if last_candidate_aggregates is not None:
before = agg(last_candidate_aggregates)
entry['delta'] = dict((t, entry['after'][t] - before[t]) for t in before)
if not done:
r['candidates'].append(entry)
r['total'] = {
'papers': sum(t['after']['papers'] for t in r['candidates']) + r['after']['exhausted_papers'] + r['after']['gain_loss_papers'],
'votes': sum(t['after']['votes'] for t in r['candidates']) + r['after']['exhausted_votes'] + r['after']['gain_loss_votes'],
}
return r
def round_complete(self):
self.round_info['count'] = self.round_count()
self.rounds.append(self.round_info)
def finished(self):
self._end_time = datetime.datetime.now()
self.write_json()
def summary(self):
r = {
'elected': [],
'excluded': []
}
def in_order(l):
return enumerate(sorted(l, key=lambda k: l[k]['order']))
def elected_json(candidate_id):
round_number, obj = self._candidates_elected[candidate_id]
r = {
'id': candidate_id,
'round': round_number,
'order': obj.order,
'excess_votes': obj.excess_votes,
'paper_count': obj.paper_count,
'transfer_value': float(obj.transfer_value)
}
return r
def excluded_json(candidate_id):
round_number, order, obj = self._candidates_excluded[candidate_id]
r = {
'id': candidate_id,
'round': round_number,
'order': order,
'transfer_values': [float(t) for t in obj.transfer_values],
'reason': obj.reason.reason
}
r.update(obj.reason.info)
return r
for candidate_id in sorted(self._candidates_elected, key=lambda x: self._candidates_elected[x][1].order):
r['elected'].append(elected_json(candidate_id))
for candidate_id in sorted(self._candidates_excluded, key=lambda x: self._candidates_excluded[x][1]):
r['excluded'].append(excluded_json(candidate_id))
return r
def party_json(self):
return dict((party, {
'name': self.parties[party],
}) for party in self.parties)
def candidate_json(self):
return dict((candidate_id, {
'title': self.get_candidate_title(candidate_id),
'party': self.get_candidate_party(candidate_id),
'id': candidate_id
}) for candidate_id in self.candidate_ids)
def json_log(self, candidate_aggregates):
if self.test_log_dir is None:
return
log = []
for candidate_id in self.candidate_ids_display(candidate_aggregates):
log.append((self.get_candidate_title(candidate_id), candidate_aggregates.get_vote_count(candidate_id)))
with open(os.path.join(self.test_log_dir, 'round_%d.json' % (self.current_round)), 'w') as fd:
json.dump(log, fd)
def write_json(self):
params = {
'total_papers': self.total_papers,
'quota': self.quota,
'vacancies': self.vacancies,
'started': self._start_time.strftime("%Y-%m-%d %H:%M"),
'finished': self._end_time.strftime("%Y-%m-%d %H:%M")
}
params.update(self.template_variables)
obj = {
'candidates': self.candidate_json(),
'parties': self.party_json(),
'parameters': params,
'rounds': self.rounds,
'summary': self.summary(),
}
with open(self.filename, 'w') as fd:
try:
json.dump(obj, fd)
except TypeError:
logger.error("failed to serialise data")
logger.error("%s" % (repr(obj)))
raise
|
|
#!/usr/bin/env python
#===- lib/asan/scripts/asan_symbolize.py -----------------------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
import bisect
import os
import re
import subprocess
import sys
llvm_symbolizer = None
symbolizers = {}
filetypes = {}
vmaddrs = {}
DEBUG = False
# FIXME: merge the code that calls fix_filename().
def fix_filename(file_name):
for path_to_cut in sys.argv[1:]:
file_name = re.sub('.*' + path_to_cut, '', file_name)
file_name = re.sub('.*asan_[a-z_]*.cc:[0-9]*', '_asan_rtl_', file_name)
file_name = re.sub('.*crtstuff.c:0', '???:0', file_name)
return file_name
class Symbolizer(object):
def __init__(self):
pass
def symbolize(self, addr, binary, offset):
"""Symbolize the given address (pair of binary and offset).
Overriden in subclasses.
Args:
addr: virtual address of an instruction.
binary: path to executable/shared object containing this instruction.
offset: instruction offset in the @binary.
Returns:
list of strings (one string for each inlined frame) describing
the code locations for this instruction (that is, function name, file
name, line and column numbers).
"""
return None
class LLVMSymbolizer(Symbolizer):
def __init__(self, symbolizer_path):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
if not os.path.exists(self.symbolizer_path):
return None
cmd = [self.symbolizer_path,
'--use-symbol-table=true',
'--demangle=false',
'--functions=true',
'--inlining=true']
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if not self.pipe:
return None
result = []
try:
symbolizer_input = '%s %s' % (binary, offset)
if DEBUG:
print symbolizer_input
print >> self.pipe.stdin, symbolizer_input
while True:
function_name = self.pipe.stdout.readline().rstrip()
if not function_name:
break
file_name = self.pipe.stdout.readline().rstrip()
file_name = fix_filename(file_name)
if (not function_name.startswith('??') and
not file_name.startswith('??')):
# Append only valid frames.
result.append('%s in %s %s' % (addr, function_name,
file_name))
except Exception:
result = []
if not result:
result = None
return result
def LLVMSymbolizerFactory(system):
if system == 'Linux':
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
return LLVMSymbolizer(symbolizer_path)
return None
class Addr2LineSymbolizer(Symbolizer):
def __init__(self, binary):
super(Addr2LineSymbolizer, self).__init__()
self.binary = binary
self.pipe = self.open_addr2line()
def open_addr2line(self):
cmd = ['addr2line', '-f', '-e', self.binary]
if DEBUG:
print ' '.join(cmd)
return subprocess.Popen(cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
try:
print >> self.pipe.stdin, offset
function_name = self.pipe.stdout.readline().rstrip()
file_name = self.pipe.stdout.readline().rstrip()
except Exception:
function_name = ''
file_name = ''
file_name = fix_filename(file_name)
return ['%s in %s %s' % (addr, function_name, file_name)]
class DarwinSymbolizer(Symbolizer):
def __init__(self, addr, binary):
super(DarwinSymbolizer, self).__init__()
self.binary = binary
# Guess which arch we're running. 10 = len('0x') + 8 hex digits.
if len(addr) > 10:
self.arch = 'x86_64'
else:
self.arch = 'i386'
self.vmaddr = None
self.pipe = None
def get_binary_vmaddr(self):
"""Get the slide value to be added to the address.
We're looking for the following piece in otool -l output:
Load command 0
cmd LC_SEGMENT
cmdsize 736
segname __TEXT
vmaddr 0x00000000
"""
if self.vmaddr:
return self.vmaddr
cmdline = ['otool', '-l', self.binary]
pipe = subprocess.Popen(cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
is_text = False
vmaddr = 0
for line in pipe.stdout:
line = line.strip()
if line.startswith('segname'):
is_text = (line == 'segname __TEXT')
continue
if line.startswith('vmaddr') and is_text:
sv = line.split(' ')
vmaddr = int(sv[-1], 16)
break
self.vmaddr = vmaddr
return self.vmaddr
def write_addr_to_pipe(self, offset):
slide = self.get_binary_vmaddr()
print >> self.pipe.stdin, '0x%x' % (int(offset, 16) + slide)
def open_atos(self):
if DEBUG:
print 'atos -o %s -arch %s' % (self.binary, self.arch)
cmdline = ['atos', '-o', self.binary, '-arch', self.arch]
self.pipe = subprocess.Popen(cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
if self.binary != binary:
return None
self.open_atos()
self.write_addr_to_pipe(offset)
self.pipe.stdin.close()
atos_line = self.pipe.stdout.readline().rstrip()
# A well-formed atos response looks like this:
# foo(type1, type2) (in object.name) (filename.cc:80)
match = re.match('^(.*) \(in (.*)\) \((.*:\d*)\)$', atos_line)
if DEBUG:
print 'atos_line: ', atos_line
if match:
function_name = match.group(1)
function_name = re.sub('\(.*?\)', '', function_name)
file_name = fix_filename(match.group(3))
return ['%s in %s %s' % (addr, function_name, file_name)]
else:
return ['%s in %s' % (addr, atos_line)]
# Chain several symbolizers so that if one symbolizer fails, we fall back
# to the next symbolizer in chain.
class ChainSymbolizer(Symbolizer):
def __init__(self, symbolizer_list):
super(ChainSymbolizer, self).__init__()
self.symbolizer_list = symbolizer_list
def symbolize(self, addr, binary, offset):
"""Overrides Symbolizer.symbolize."""
for symbolizer in self.symbolizer_list:
if symbolizer:
result = symbolizer.symbolize(addr, binary, offset)
if result:
return result
return None
def append_symbolizer(self, symbolizer):
self.symbolizer_list.append(symbolizer)
def BreakpadSymbolizerFactory(binary):
suffix = os.getenv('BREAKPAD_SUFFIX')
if suffix:
filename = binary + suffix
if os.access(filename, os.F_OK):
return BreakpadSymbolizer(filename)
return None
def SystemSymbolizerFactory(system, addr, binary):
if system == 'Darwin':
return DarwinSymbolizer(addr, binary)
elif system == 'Linux':
return Addr2LineSymbolizer(binary)
class BreakpadSymbolizer(Symbolizer):
def __init__(self, filename):
super(BreakpadSymbolizer, self).__init__()
self.filename = filename
lines = file(filename).readlines()
self.files = []
self.symbols = {}
self.address_list = []
self.addresses = {}
# MODULE mac x86_64 A7001116478B33F18FF9BEDE9F615F190 t
fragments = lines[0].rstrip().split()
self.arch = fragments[2]
self.debug_id = fragments[3]
self.binary = ' '.join(fragments[4:])
self.parse_lines(lines[1:])
def parse_lines(self, lines):
cur_function_addr = ''
for line in lines:
fragments = line.split()
if fragments[0] == 'FILE':
assert int(fragments[1]) == len(self.files)
self.files.append(' '.join(fragments[2:]))
elif fragments[0] == 'PUBLIC':
self.symbols[int(fragments[1], 16)] = ' '.join(fragments[3:])
elif fragments[0] in ['CFI', 'STACK']:
pass
elif fragments[0] == 'FUNC':
cur_function_addr = int(fragments[1], 16)
if not cur_function_addr in self.symbols.keys():
self.symbols[cur_function_addr] = ' '.join(fragments[4:])
else:
# Line starting with an address.
addr = int(fragments[0], 16)
self.address_list.append(addr)
# Tuple of symbol address, size, line, file number.
self.addresses[addr] = (cur_function_addr,
int(fragments[1], 16),
int(fragments[2]),
int(fragments[3]))
self.address_list.sort()
def get_sym_file_line(self, addr):
key = None
if addr in self.addresses.keys():
key = addr
else:
index = bisect.bisect_left(self.address_list, addr)
if index == 0:
return None
else:
key = self.address_list[index - 1]
sym_id, size, line_no, file_no = self.addresses[key]
symbol = self.symbols[sym_id]
filename = self.files[file_no]
if addr < key + size:
return symbol, filename, line_no
else:
return None
def symbolize(self, addr, binary, offset):
if self.binary != binary:
return None
res = self.get_sym_file_line(int(offset, 16))
if res:
function_name, file_name, line_no = res
result = ['%s in %s %s:%d' % (
addr, function_name, file_name, line_no)]
print result
return result
else:
return None
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None):
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.system = os.uname()[0]
if self.system in ['Linux', 'Darwin']:
self.llvm_symbolizer = LLVMSymbolizerFactory(self.system)
else:
raise Exception('Unknown system')
def symbolize_address(self, addr, binary, offset):
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
[BreakpadSymbolizerFactory(binary), self.llvm_symbolizer])
result = symbolizers[binary].symbolize(addr, binary, offset)
if result is None:
# Initialize system symbolizer only if other symbolizers failed.
symbolizers[binary].append_symbolizer(
SystemSymbolizerFactory(self.system, addr, binary))
result = symbolizers[binary].symbolize(addr, binary, offset)
# The system symbolizer must produce some result.
assert result
return result
def print_symbolized_lines(self, symbolized_lines):
if not symbolized_lines:
print self.current_line
else:
for symbolized_frame in symbolized_lines:
print ' #' + str(self.frame_no) + ' ' + symbolized_frame.rstrip()
self.frame_no += 1
def process_stdin(self):
self.frame_no = 0
for line in sys.stdin:
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
'^( *#([0-9]+) *)(0x[0-9a-f]+) *\((.*)\+(0x[0-9a-f]+)\)')
match = re.match(stack_trace_line_format, line)
if not match:
print self.current_line
continue
if DEBUG:
print line
_, frameno_str, addr, binary, offset = match.groups()
if frameno_str == '0':
# Assume that frame #0 is the first frame of new stack trace.
self.frame_no = 0
original_binary = binary
if self.binary_name_filter:
binary = self.binary_name_filter(binary)
symbolized_line = self.symbolize_address(addr, binary, offset)
if not symbolized_line:
if original_binary != binary:
symbolized_line = self.symbolize_address(addr, binary, offset)
self.print_symbolized_lines(symbolized_line)
if __name__ == '__main__':
loop = SymbolizationLoop()
loop.process_stdin()
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os.path as op
import sys
import logging
import string
from collections import defaultdict
from itertools import product, combinations
from jcvi.formats.blast import BlastLine
from jcvi.formats.fasta import Fasta
from jcvi.formats.bed import Bed
from jcvi.formats.base import must_open, BaseFile
from jcvi.utils.grouper import Grouper
from jcvi.utils.cbook import gene_name
from jcvi.compara.synteny import AnchorFile, check_beds
from jcvi.apps.base import OptionParser, glob, ActionDispatcher, \
need_update, sh, mkdir
class OMGFile (BaseFile):
def __init__(self, filename):
super(OMGFile, self).__init__(filename)
fp = open(filename)
inblock = False
components = []
component = []
for row in fp:
if inblock:
atoms = row.split()
natoms = len(atoms)
assert natoms in (0, 7)
if natoms:
gene, taxa = atoms[0], atoms[5]
component.append((gene, taxa))
else:
inblock = False
components.append(tuple(component))
if row.strip().startswith("---"):
inblock = True
component = []
if inblock:
components.append(tuple(component))
self.components = components
def best(self):
bb = set()
for component in self.components:
size = len(component)
if size > 1:
bb.add(component)
return bb
def main():
actions = (
('tandem', 'identify tandem gene groups within certain distance'),
('ortholog', 'run a combined synteny and RBH pipeline to call orthologs'),
('group', 'cluster the anchors into ortho-groups'),
('omgprepare', 'prepare weights file to run Sankoff OMG algorithm'),
('omg', 'generate a series of Sankoff OMG algorithm inputs'),
('omgparse', 'parse the OMG outputs to get gene lists'),
('enrich', 'enrich OMG output by pulling genes missed by OMG'),
('layout', 'layout the gene lists'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def get_weights(weightsfiles=None):
if weightsfiles is None:
weightsfiles = glob("*.weights")
weights = defaultdict(list)
for row in must_open(weightsfiles):
a, b, c = row.split()
weights[a].append((a, b, c))
return weights
def get_edges(weightsfiles=None):
if weightsfiles is None:
weightsfiles = glob("*.weights")
edges = {}
for row in must_open(weightsfiles):
a, b, c = row.split()
c = int(c)
edges[(a, b)] = c
edges[(b, a)] = c
return edges
def get_info():
infofiles = glob("*.info")
info = {}
for row in must_open(infofiles):
a = row.split()[0]
info[a] = row.rstrip()
return info
def enrich(args):
"""
%prog enrich omgfile groups ntaxa > enriched.omg
Enrich OMG output by pulling genes misses by OMG.
"""
p = OptionParser(enrich.__doc__)
p.add_option("--ghost", default=False, action="store_true",
help="Add ghost homologs already used [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
omgfile, groupsfile, ntaxa = args
ntaxa = int(ntaxa)
ghost = opts.ghost
# Get gene pair => weight mapping
weights = get_edges()
info = get_info()
# Get gene => taxon mapping
info = dict((k, v.split()[5]) for k, v in info.items())
groups = Grouper()
fp = open(groupsfile)
for row in fp:
members = row.strip().split(",")
groups.join(*members)
logging.debug("Imported {0} families with {1} members.".\
format(len(groups), groups.num_members))
seen = set()
omggroups = Grouper()
fp = open(omgfile)
for row in fp:
genes, idxs = row.split()
genes = genes.split(",")
seen.update(genes)
omggroups.join(*genes)
nmembers = omggroups.num_members
logging.debug("Imported {0} OMG families with {1} members.".\
format(len(omggroups), nmembers))
assert nmembers == len(seen)
alltaxa = set(str(x) for x in range(ntaxa))
recruited = []
fp = open(omgfile)
for row in fp:
genes, idxs = row.split()
genes = genes.split(",")
a = genes[0]
idxs = set(idxs.split(","))
missing_taxa = alltaxa - idxs
if not missing_taxa:
print row.rstrip()
continue
leftover = groups[a]
if not ghost:
leftover = set(leftover) - seen
if not leftover:
print row.rstrip()
continue
leftover_sorted_by_taxa = dict((k, \
[x for x in leftover if info[x] == k]) \
for k in missing_taxa)
#print genes, leftover
#print leftover_sorted_by_taxa
solutions = []
for solution in product(*leftover_sorted_by_taxa.values()):
score = sum(weights.get((a, b), 0) for a in solution for b in genes)
if score == 0:
continue
score += sum(weights.get((a, b), 0) for a, b in combinations(solution, 2))
solutions.append((score, solution))
#print solution, score
best_solution = max(solutions) if solutions else None
if best_solution is None:
print row.rstrip()
continue
#print "best ==>", best_solution
best_score, best_addition = best_solution
genes.extend(best_addition)
recruited.extend(best_addition)
genes = sorted([(info[x], x) for x in genes])
idxs, genes = zip(*genes)
if ghost: # decorate additions so it's clear that they were added
pgenes = []
for g in genes:
if g in recruited and g in seen:
pgenes.append("|{0}|".format(g))
else:
pgenes.append(g)
genes = pgenes
print "\t".join((",".join(genes), ",".join(idxs)))
if not ghost:
seen.update(best_addition)
logging.debug("Recruited {0} new genes.".format(len(recruited)))
def pairwise_distance(a, b, threadorder):
d = 0
for x, y in zip(a, b)[:-1]: # Last column not used
x, y = x.strip("|"), y.strip("|")
if "." in (x, y):
dd = 50
else:
xi, x = threadorder[x]
yi, y = threadorder[y]
dd = min(abs(xi - yi), 50)
d += dd
return d
def insert_into_threaded(atoms, threaded, threadorder):
min_idx, min_d = 0, 1000
for i, t in enumerate(threaded):
# calculate distance
d = pairwise_distance(atoms, t, threadorder)
if d < min_d:
min_idx = i
min_d = d
i = min_idx
t = threaded[i]
threaded.insert(i, atoms)
logging.debug("Insert {0} before {1} (d={2})".format(atoms, t, min_d))
def sort_layout(thread, listfile, column=0):
"""
Sort the syntelog table according to chromomomal positions. First orient the
contents against threadbed, then for contents not in threadbed, insert to
the nearest neighbor.
"""
from jcvi.formats.base import DictFile
outfile = listfile.rsplit(".", 1)[0] + ".sorted.list"
threadorder = thread.order
fw = open(outfile, "w")
lt = DictFile(listfile, keypos=column, valuepos=None)
threaded = []
imported = set()
for t in thread:
accn = t.accn
if accn not in lt:
continue
imported.add(accn)
atoms = lt[accn]
threaded.append(atoms)
assert len(threaded) == len(imported)
total = sum(1 for x in open(listfile))
logging.debug("Total: {0}, currently threaded: {1}".format(total, len(threaded)))
fp = open(listfile)
for row in fp:
atoms = row.split()
accn = atoms[0]
if accn in imported:
continue
insert_into_threaded(atoms, threaded, threadorder)
for atoms in threaded:
print >> fw, "\t".join(atoms)
fw.close()
logging.debug("File `{0}` sorted to `{1}`.".format(outfile, thread.filename))
def layout(args):
"""
%prog layout omgfile taxa
Build column formatted gene lists after omgparse(). Use species list
separated by comma in place of taxa, e.g. "BR,BO,AN,CN"
"""
p = OptionParser(layout.__doc__)
p.add_option("--sort",
help="Sort layout file based on bedfile [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
omgfile, taxa = args
listfile = omgfile.rsplit(".", 1)[0] + ".list"
taxa = taxa.split(",")
ntaxa = len(taxa)
fw = open(listfile, "w")
data = []
fp = open(omgfile)
for row in fp:
genes, idxs = row.split()
row = ["."] * ntaxa
genes = genes.split(",")
ixs = [int(x) for x in idxs.split(",")]
for gene, idx in zip(genes, ixs):
row[idx] = gene
txs = ",".join(taxa[x] for x in ixs)
print >> fw, "\t".join(("\t".join(row), txs))
data.append(row)
coldata = zip(*data)
ngenes = []
for i, tx in enumerate(taxa):
genes = [x for x in coldata[i] if x != '.']
genes = set(x.strip("|") for x in genes)
ngenes.append((len(genes), tx))
details = ", ".join("{0} {1}".format(a, b) for a, b in ngenes)
total = sum(a for a, b in ngenes)
s = "A list of {0} orthologous families that collectively".format(len(data))
s += " contain a total of {0} genes ({1})".format(total, details)
print >> sys.stderr, s
fw.close()
lastcolumn = ntaxa + 1
cmd = "sort -k{0},{0} {1} -o {1}".format(lastcolumn, listfile)
sh(cmd)
logging.debug("List file written to `{0}`.".format(listfile))
sort = opts.sort
if sort:
thread = Bed(sort)
sort_layout(thread, listfile)
def omgparse(args):
"""
%prog omgparse work
Parse the OMG outputs to get gene lists.
"""
p = OptionParser(omgparse.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
work, = args
omgfiles = glob(op.join(work, "gf*.out"))
for omgfile in omgfiles:
omg = OMGFile(omgfile)
best = omg.best()
for bb in best:
genes, taxa = zip(*bb)
print "\t".join((",".join(genes), ",".join(taxa)))
def group(args):
"""
%prog group anchorfiles
Group the anchors into ortho-groups. Can input multiple anchor files.
"""
p = OptionParser(group.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
anchorfiles = args
groups = Grouper()
for anchorfile in anchorfiles:
ac = AnchorFile(anchorfile)
for a, b, idx in ac.iter_pairs():
groups.join(a, b)
logging.debug("Created {0} groups with {1} members.".\
format(len(groups), groups.num_members))
outfile = opts.outfile
fw = must_open(outfile, "w")
for g in groups:
print >> fw, ",".join(sorted(g))
fw.close()
return outfile
def omg(args):
"""
%prog omg weightsfile
Run Sankoff's OMG algorithm to get orthologs. Download OMG code at:
<http://137.122.149.195/IsbraSoftware/OMGMec.html>
This script only writes the partitions, but not launch OMGMec. You may need to:
$ parallel "java -cp ~/code/OMGMec TestOMGMec {} 4 > {}.out" ::: work/gf?????
Then followed by omgparse() to get the gene lists.
"""
p = OptionParser(omg.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
weightsfiles = args
groupfile = group(weightsfiles + ["--outfile=groups"])
weights = get_weights(weightsfiles)
info = get_info()
fp = open(groupfile)
work = "work"
mkdir(work)
for i, row in enumerate(fp):
gf = op.join(work, "gf{0:05d}".format(i))
genes = row.rstrip().split(",")
fw = open(gf, "w")
contents = ""
npairs = 0
for gene in genes:
gene_pairs = weights[gene]
for a, b, c in gene_pairs:
if b not in genes:
continue
contents += "weight {0}".format(c) + '\n'
contents += info[a] + '\n'
contents += info[b] + '\n\n'
npairs += 1
header = "a group of genes :length ={0}".format(npairs)
print >> fw, header
print >> fw, contents
fw.close()
def geneinfo(bed, order, genomeidx, ploidy):
bedfile = bed.filename
p = bedfile.split(".")[0]
idx = genomeidx[p]
pd = ploidy[p]
infofile = p + ".info"
if not need_update(bedfile, infofile):
return infofile
fwinfo = open(infofile, "w")
for s in bed:
chr = "".join(x for x in s.seqid if x in string.digits)
try:
chr = int(chr)
except ValueError:
chr = "0"
print >> fwinfo, "\t".join(str(x) for x in \
(s.accn, chr, s.start, s.end, s.strand, idx, pd))
fwinfo.close()
logging.debug("Update info file `{0}`.".format(infofile))
return infofile
def omgprepare(args):
"""
%prog omgprepare ploidy anchorsfile blastfile
Prepare to run Sankoff's OMG algorithm to get orthologs.
"""
from jcvi.formats.blast import cscore
from jcvi.formats.base import DictFile
p = OptionParser(omgprepare.__doc__)
p.add_option("--norbh", action="store_true",
help="Disable RBH hits [default: %default]")
p.add_option("--pctid", default=0, type="int",
help="Percent id cutoff for RBH hits [default: %default]")
p.add_option("--cscore", default=90, type="int",
help="C-score cutoff for RBH hits [default: %default]")
p.set_stripnames()
p.set_beds()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ploidy, anchorfile, blastfile = args
norbh = opts.norbh
pctid = opts.pctid
cs = opts.cscore
qbed, sbed, qorder, sorder, is_self = check_beds(anchorfile, p, opts)
fp = open(ploidy)
genomeidx = dict((x.split()[0], i) for i, x in enumerate(fp))
fp.close()
ploidy = DictFile(ploidy)
geneinfo(qbed, qorder, genomeidx, ploidy)
geneinfo(sbed, sorder, genomeidx, ploidy)
pf = blastfile.rsplit(".", 1)[0]
cscorefile = pf + ".cscore"
cscore([blastfile, "-o", cscorefile, "--cutoff=0", "--pct"])
ac = AnchorFile(anchorfile)
pairs = set((a, b) for a, b, i in ac.iter_pairs())
logging.debug("Imported {0} pairs from `{1}`.".format(len(pairs), anchorfile))
weightsfile = pf + ".weights"
fp = open(cscorefile)
fw = open(weightsfile, "w")
npairs = 0
for row in fp:
a, b, c, pct = row.split()
c, pct = float(c), float(pct)
c = int(c * 100)
if (a, b) not in pairs:
if norbh:
continue
if c < cs:
continue
if pct < pctid:
continue
c /= 10 # This severely penalizes RBH against synteny
print >> fw, "\t".join((a, b, str(c)))
npairs += 1
fw.close()
logging.debug("Write {0} pairs to `{1}`.".format(npairs, weightsfile))
def make_ortholog(blocksfile, rbhfile, orthofile):
from jcvi.formats.base import DictFile
# Generate mapping both ways
adict = DictFile(rbhfile)
bdict = DictFile(rbhfile, keypos=1, valuepos=0)
adict.update(bdict)
fp = open(blocksfile)
fw = open(orthofile, "w")
nrecruited = 0
for row in fp:
a, b = row.split()
if b == '.':
if a in adict:
b = adict[a]
nrecruited += 1
b += "'"
print >> fw, "\t".join((a, b))
logging.debug("Recruited {0} pairs from RBH.".format(nrecruited))
fp.close()
fw.close()
def ortholog(args):
"""
%prog ortholog species_a species_b
Run a sensitive pipeline to find orthologs between two species a and b.
The pipeline runs LAST and generate .lifted.anchors.
`--full` mode would assume 1-to-1 quota synteny blocks as the backbone of
such predictions. Extra orthologs will be recruited from reciprocal best
match (RBH).
"""
from jcvi.apps.last import main as last_main
from jcvi.compara.blastfilter import main as blastfilter_main
from jcvi.compara.quota import main as quota_main
from jcvi.compara.synteny import scan, mcscan, liftover
from jcvi.formats.blast import cscore, filter
p = OptionParser(ortholog.__doc__)
p.add_option("--full", default=False, action="store_true",
help="Run in full mode, including blocks and RBH")
p.add_option("--cscore", default=0.7, type="float",
help="C-score cutoff [default: %default]")
p.add_option("--dist", default=20, type="int",
help="Extent of flanking regions to search")
p.add_option("--quota", help="Quota align parameter")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
a, b = args
abed, afasta = a + ".bed", a + ".cds"
bbed, bfasta = b + ".bed", b + ".cds"
ccscore = opts.cscore
quota = opts.quota
dist = "--dist={0}".format(opts.dist)
aprefix = afasta.split(".")[0]
bprefix = bfasta.split(".")[0]
pprefix = ".".join((aprefix, bprefix))
qprefix = ".".join((bprefix, aprefix))
last = pprefix + ".last"
if need_update((afasta, bfasta), last):
last_main([bfasta, afasta, "-o", last])
if a == b:
last = filter([last, "--hitlen=0", "--pctid=98", "--inverse", "--noself"])
filtered_last = last + ".filtered"
if need_update(last, filtered_last):
blastfilter_main([last, "--cscore={0}".format(ccscore)])
anchors = pprefix + ".anchors"
lifted_anchors = pprefix + ".lifted.anchors"
if not opts.full:
if need_update(filtered_last, lifted_anchors):
scan([filtered_last, anchors, dist,
"--liftover={0}".format(last)])
if quota:
quota_main([lifted_anchors,
"--quota={0}".format(quota), "--screen"])
return
if need_update(filtered_last, anchors):
scan([filtered_last, anchors, dist])
ooanchors = pprefix + ".1x1.anchors"
if need_update(anchors, ooanchors):
quota_main([anchors, "--quota=1:1", "--screen"])
lifted_anchors = pprefix + ".1x1.lifted.anchors"
if need_update((last, ooanchors), lifted_anchors):
liftover([last, ooanchors, dist])
pblocks = pprefix + ".1x1.blocks"
qblocks = qprefix + ".1x1.blocks"
if need_update(lifted_anchors, [pblocks, qblocks]):
mcscan([abed, lifted_anchors, "--iter=1", "-o", pblocks])
mcscan([bbed, lifted_anchors, "--iter=1", "-o", qblocks])
rbh = pprefix + ".rbh"
if need_update(last, rbh):
cscore([last, "-o", rbh])
portho = pprefix + ".ortholog"
qortho = qprefix + ".ortholog"
if need_update([pblocks, qblocks, rbh], [portho, qortho]):
make_ortholog(pblocks, rbh, portho)
make_ortholog(qblocks, rbh, qortho)
def tandem_main(blast_file, cds_file, bed_file, N=3, P=50, is_self=True, \
evalue=.01, strip_name=".", ofile=sys.stderr, genefam=False):
if genefam:
N = 1e5
# get the sizes for the CDS first
f = Fasta(cds_file)
sizes = dict(f.itersizes())
# retrieve the locations
bed = Bed(bed_file)
order = bed.order
if is_self:
# filter the blast file
g = Grouper()
fp = open(blast_file)
for row in fp:
b = BlastLine(row)
query_len = sizes[b.query]
subject_len = sizes[b.subject]
if b.hitlen < min(query_len, subject_len)*P/100.:
continue
query = gene_name(b.query, strip_name)
subject = gene_name(b.subject, strip_name)
qi, q = order[query]
si, s = order[subject]
if abs(qi - si) <= N and b.evalue <= evalue:
if genefam:
g.join(query, subject)
elif q.seqid == s.seqid:
g.join(query, subject)
else:
homologs = Grouper()
fp = open(blast_file)
for row in fp:
b = BlastLine(row)
query_len = sizes[b.query]
subject_len = sizes[b.subject]
if b.hitlen < min(query_len, subject_len)*P/100.:
continue
if b.evalue > evalue:
continue
query = gene_name(b.query, strip_name)
subject = gene_name(b.subject, strip_name)
homologs.join(query, subject)
if genefam:
g = homologs
else:
g = Grouper()
for i, atom in enumerate(bed):
for x in range(1, N+1):
if all([i-x >= 0, bed[i-x].seqid == atom.seqid, \
homologs.joined(bed[i-x].accn, atom.accn)]):
leni = sizes[bed[i].accn]
lenx = sizes[bed[i-x].accn]
if abs(leni - lenx) > max(leni, lenx)*(1-P/100.):
continue
g.join(bed[i-x].accn, atom.accn)
# dump the grouper
fw = must_open(ofile, "w")
ngenes, nfamilies = 0, 0
families = []
for group in sorted(g):
if len(group) >= 2:
print >>fw, ",".join(sorted(group))
ngenes += len(group)
nfamilies += 1
families.append(sorted(group))
longest_family = max(families, key=lambda x: len(x))
# generate reports
print >>sys.stderr, "Proximal paralogues (dist=%d):" % N
print >>sys.stderr, "Total %d genes in %d families" % (ngenes, nfamilies)
print >>sys.stderr, "Longest families (%d): %s" % (len(longest_family),
",".join(longest_family))
return families
def tandem(args):
"""
%prog tandem blast_file cds_file bed_file [options]
Find tandem gene clusters that are separated by N genes, based on filtered
blast_file by enforcing alignments between any two genes at least 50%
(or user specified value) of either gene.
pep_file can also be used in same manner.
"""
p = OptionParser(tandem.__doc__)
p.add_option("--tandem_Nmax", dest="tandem_Nmax", type="int", default=3,
help="merge tandem genes within distance [default: %default]")
p.add_option("--percent_overlap", type="int", default=50,
help="tandem genes have >=x% aligned sequence, x=0-100 \
[default: %default]")
p.set_align(evalue=.01)
p.add_option("--not_self", default=False, action="store_true",
help="provided is not self blast file [default: %default]")
p.add_option("--strip_gene_name", dest="sep", type="string", default=".",
help="strip alternative splicing. Use None for no stripping. \
[default: %default]")
p.add_option("--genefamily", dest="genefam", action="store_true",
help="compile gene families based on similarity [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
blast_file, cds_file, bed_file = args
N = opts.tandem_Nmax
P = opts.percent_overlap
is_self = not opts.not_self
sep = opts.sep
ofile = opts.outfile
tandem_main(blast_file, cds_file, bed_file, N=N, P=P, is_self=is_self, \
evalue=opts.evalue, strip_name=sep, ofile=ofile, genefam=opts.genefam)
if __name__ == '__main__':
main()
|
|
"""Utility functions, node construction macros, etc."""
# Author: Collin Winter
# Local imports
from .pgen2 import token
from .pytree import Leaf, Node
from .pygram import python_symbols as syms
from . import patcomp
###########################################################
### Common node-construction "macros"
###########################################################
def KeywordArg(keyword, value):
return Node(syms.argument,
[keyword, Leaf(token.EQUAL, '='), value])
def LParen():
return Leaf(token.LPAR, "(")
def RParen():
return Leaf(token.RPAR, ")")
def Assign(target, source):
"""Build an assignment statement"""
if not isinstance(target, list):
target = [target]
if not isinstance(source, list):
source.set_prefix(" ")
source = [source]
return Node(syms.atom,
target + [Leaf(token.EQUAL, "=", prefix=" ")] + source)
def Name(name, prefix=None):
"""Return a NAME leaf"""
return Leaf(token.NAME, name, prefix=prefix)
def Attr(obj, attr):
"""A node tuple for obj.attr"""
return [obj, Node(syms.trailer, [Dot(), attr])]
def Comma():
"""A comma leaf"""
return Leaf(token.COMMA, ",")
def Dot():
"""A period (.) leaf"""
return Leaf(token.DOT, ".")
def ArgList(args, lparen=LParen(), rparen=RParen()):
"""A parenthesised argument list, used by Call()"""
node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
if args:
node.insert_child(1, Node(syms.arglist, args))
return node
def Call(func_name, args=None, prefix=None):
"""A function call"""
node = Node(syms.power, [func_name, ArgList(args)])
if prefix is not None:
node.set_prefix(prefix)
return node
def Newline():
"""A newline literal"""
return Leaf(token.NEWLINE, "\n")
def BlankLine():
"""A blank line"""
return Leaf(token.NEWLINE, "")
def Number(n, prefix=None):
return Leaf(token.NUMBER, n, prefix=prefix)
def Subscript(index_node):
"""A numeric or string subscript"""
return Node(syms.trailer, [Leaf(token.LBRACE, '['),
index_node,
Leaf(token.RBRACE, ']')])
def String(string, prefix=None):
"""A string leaf"""
return Leaf(token.STRING, string, prefix=prefix)
def ListComp(xp, fp, it, test=None):
"""A list comprehension of the form [xp for fp in it if test].
If test is None, the "if test" part is omitted.
"""
xp.set_prefix("")
fp.set_prefix(" ")
it.set_prefix(" ")
for_leaf = Leaf(token.NAME, "for")
for_leaf.set_prefix(" ")
in_leaf = Leaf(token.NAME, "in")
in_leaf.set_prefix(" ")
inner_args = [for_leaf, fp, in_leaf, it]
if test:
test.set_prefix(" ")
if_leaf = Leaf(token.NAME, "if")
if_leaf.set_prefix(" ")
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
return Node(syms.atom,
[Leaf(token.LBRACE, "["),
inner,
Leaf(token.RBRACE, "]")])
def FromImport(package_name, name_leafs):
""" Return an import statement in the form:
from package import name_leafs"""
# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
#assert package_name == '.' or '.' not in package_name, "FromImport has "\
# "not been tested with dotted package names -- use at your own "\
# "peril!"
for leaf in name_leafs:
# Pull the leaves out of their old tree
leaf.remove()
children = [Leaf(token.NAME, 'from'),
Leaf(token.NAME, package_name, prefix=" "),
Leaf(token.NAME, 'import', prefix=" "),
Node(syms.import_as_names, name_leafs)]
imp = Node(syms.import_from, children)
return imp
###########################################################
### Determine whether a node represents a given literal
###########################################################
def is_tuple(node):
"""Does the node represent a tuple literal?"""
if isinstance(node, Node) and node.children == [LParen(), RParen()]:
return True
return (isinstance(node, Node)
and len(node.children) == 3
and isinstance(node.children[0], Leaf)
and isinstance(node.children[1], Node)
and isinstance(node.children[2], Leaf)
and node.children[0].value == "("
and node.children[2].value == ")")
def is_list(node):
"""Does the node represent a list literal?"""
return (isinstance(node, Node)
and len(node.children) > 1
and isinstance(node.children[0], Leaf)
and isinstance(node.children[-1], Leaf)
and node.children[0].value == "["
and node.children[-1].value == "]")
###########################################################
### Misc
###########################################################
consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
"min", "max"])
def attr_chain(obj, attr):
"""Follow an attribute chain.
If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
use this to iterate over all objects in the chain. Iteration is
terminated by getattr(x, attr) is None.
Args:
obj: the starting object
attr: the name of the chaining attribute
Yields:
Each successive object in the chain.
"""
next = getattr(obj, attr)
while next:
yield next
next = getattr(next, attr)
p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p1 = """
power<
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
'any' | 'all' | (any* trailer< '.' 'join' >) )
trailer< '(' node=any ')' >
any*
>
"""
p2 = """
power<
'sorted'
trailer< '(' arglist<node=any any*> ')' >
any*
>
"""
pats_built = False
def in_special_context(node):
""" Returns true if node is in an environment where all that is required
of it is being itterable (ie, it doesn't matter if it returns a list
or an itterator).
See test_map_nochange in test_fixers.py for some examples and tests.
"""
global p0, p1, p2, pats_built
if not pats_built:
p1 = patcomp.compile_pattern(p1)
p0 = patcomp.compile_pattern(p0)
p2 = patcomp.compile_pattern(p2)
pats_built = True
patterns = [p0, p1, p2]
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
results = {}
if pattern.match(parent, results) and results["node"] is node:
return True
return False
###########################################################
### The following functions are to find bindings in a suite
###########################################################
def make_suite(node):
if node.type == syms.suite:
return node
node = node.clone()
parent, node.parent = node.parent, None
suite = Node(syms.suite, [node])
suite.parent = parent
return suite
def does_tree_import(package, name, node):
""" Returns true if name is imported from package at the
top level of the tree which node belongs to.
To cover the case of an import like 'import foo', use
Null for the package and 'foo' for the name. """
# Scamper up to the top level namespace
while node.type != syms.file_input:
assert node.parent, "Tree is insane! root found before "\
"file_input node was found."
node = node.parent
binding = find_binding(name, node, package)
return bool(binding)
_def_syms = set([syms.classdef, syms.funcdef])
def find_binding(name, node, package=None):
""" Returns the node which binds variable name, otherwise None.
If optional argument package is supplied, only imports will
be returned.
See test cases for examples."""
for child in node.children:
ret = None
if child.type == syms.for_stmt:
if _find(name, child.children[1]):
return child
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type in (syms.if_stmt, syms.while_stmt):
n = find_binding(name, make_suite(child.children[-1]), package)
if n: ret = n
elif child.type == syms.try_stmt:
n = find_binding(name, make_suite(child.children[2]), package)
if n:
ret = n
else:
for i, kid in enumerate(child.children[3:]):
if kid.type == token.COLON and kid.value == ":":
# i+3 is the colon, i+4 is the suite
n = find_binding(name, make_suite(child.children[i+4]), package)
if n: ret = n
elif child.type in _def_syms and child.children[1].value == name:
ret = child
elif _is_import_binding(child, name, package):
ret = child
elif child.type == syms.simple_stmt:
ret = find_binding(name, child, package)
elif child.type == syms.expr_stmt:
if _find(name, child.children[0]):
ret = child
if ret:
if not package:
return ret
if ret.type in (syms.import_name, syms.import_from):
return ret
return None
_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
def _find(name, node):
nodes = [node]
while nodes:
node = nodes.pop()
if node.type > 256 and node.type not in _block_syms:
nodes.extend(node.children)
elif node.type == token.NAME and node.value == name:
return node
return None
def _is_import_binding(node, name, package=None):
""" Will reuturn node if node will import name, or node
will import * from package. None is returned otherwise.
See test cases for examples. """
if node.type == syms.import_name and not package:
imp = node.children[1]
if imp.type == syms.dotted_as_names:
for child in imp.children:
if child.type == syms.dotted_as_name:
if child.children[2].value == name:
return node
elif child.type == token.NAME and child.value == name:
return node
elif imp.type == syms.dotted_as_name:
last = imp.children[-1]
if last.type == token.NAME and last.value == name:
return node
elif imp.type == token.NAME and imp.value == name:
return node
elif node.type == syms.import_from:
# unicode(...) is used to make life easier here, because
# from a.b import parses to ['import', ['a', '.', 'b'], ...]
if package and unicode(node.children[1]).strip() != package:
return None
n = node.children[3]
if package and _find('as', n):
# See test_from_import_as for explanation
return None
elif n.type == syms.import_as_names and _find(name, n):
return node
elif n.type == syms.import_as_name:
child = n.children[2]
if child.type == token.NAME and child.value == name:
return node
elif n.type == token.NAME and n.value == name:
return node
elif package and n.type == token.STAR:
return node
return None
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for operator dispatch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linear_operator_diag
from tensorflow.python.ops.proto_ops import decode_proto
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import get_canonical_name_for_symbol
from tensorflow.python.util.tf_export import tf_export
class CustomTensor(object):
"""A fake composite tensor class, for testing type-based dispatching."""
def __init__(self, tensor, score):
self.tensor = ops.convert_to_tensor(tensor)
self.score = score
@tf_export("test_op")
@dispatch.add_dispatch_support
def test_op(x, y, z):
"""A fake op for testing dispatch of Python ops."""
return x + (2 * y) + (3 * z)
class TensorTracer(object):
"""An object used to trace TensorFlow graphs.
This is an example class that is used to test global op dispatchers. The
global op dispatcher for TensorTracers is defined below.
"""
def __init__(self, name, args=None, kwargs=None):
self.name = name
self.args = args
self.kwargs = kwargs
self.shape = array_ops.ones(shape=(4, 4)).shape
self.dtype = dtypes.float32
def __repr__(self):
if self.args is None and self.kwargs is None:
return self.name
else:
args = [str(x) for x in self.args]
args += sorted(
["{}={}".format(name, x) for (name, x) in self.kwargs.items()])
return "{}({})".format(self.name, ", ".join(args))
@property
def is_tensor_like(self):
return True
@classmethod
def _overload_all_operators(cls): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
cls._overload_operator(operator)
@classmethod
def _overload_operator(cls, operator): # pylint: disable=invalid-name
"""Overload an operator with the same overloading as `ops.Tensor`."""
tensor_oper = getattr(ops.Tensor, operator)
# Compatibility with Python 2:
# Python 2 unbound methods have type checks for the first arg,
# so we need to extract the underlying function
tensor_oper = getattr(tensor_oper, "__func__", tensor_oper)
setattr(cls, operator, tensor_oper)
TensorTracer._overload_all_operators() # pylint: disable=protected-access
class TensorTracerOpDispatcher(dispatch.GlobalOpDispatcher):
"""Global op dispatcher for TensorTracer."""
def _flatten_with_slice_flattening(self, x):
flat = []
for val in nest.flatten(x):
if isinstance(val, slice):
flat.extend((val.start, val.stop, val.step))
else:
flat.append(val)
return flat
def handle(self, op, args, kwargs):
# Dispatcher only applies if at least one arg is a TensorTracer.
if not (any(self.is_tensor_tracer_arg(x) for x in args) or
any(self.is_tensor_tracer_arg(x) for x in kwargs.values())):
return self.NOT_SUPPORTED
symbol_name = get_canonical_name_for_symbol(op)
return TensorTracer(symbol_name, args, kwargs)
def is_tensor_tracer_arg(self, value):
return any(isinstance(x, TensorTracer) for x in
self._flatten_with_slice_flattening(value))
@test_util.run_all_in_graph_and_eager_modes
class DispatchTest(test_util.TensorFlowTestCase):
def testAddDispatchForTypes_With_CppOp(self):
original_handlers = gen_math_ops.add._tf_dispatchers[:]
# Override the behavior of gen_math_ops.add.
@dispatch.dispatch_for_types(gen_math_ops.add, CustomTensor)
def custom_add(x, y, name=None): # pylint: disable=unused-variable
return CustomTensor(gen_math_ops.add(x.tensor, y.tensor, name),
(x.score+y.score) / 2.0)
self.assertEqual(len(math_ops.add._tf_dispatchers),
len(original_handlers) + 1)
# Test that we see the overridden behavior when using CustomTensors.
x = CustomTensor([1, 2, 3], 2.0)
y = CustomTensor([7, 8, 2], 0.0)
x_plus_y = gen_math_ops.add(x, y)
self.assertAllEqual(self.evaluate(x_plus_y.tensor), [8, 10, 5])
self.assertNear(x_plus_y.score, 1.0, 0.001)
# Test that we still get the right behavior when using normal Tensors.
a = [1, 2, 3]
b = [4, 5, 6]
a_plus_b = gen_math_ops.add(a, b)
self.assertAllEqual(a_plus_b, [5, 7, 9])
# Test that we still get a TypeError or ValueError if we pass some
# type that's not supported by any dispatcher.
with self.assertRaises((TypeError, ValueError)):
gen_math_ops.add(a, None)
# Clean up
gen_math_ops.add._tf_dispatchers = original_handlers
def testAddDispatchForTypes_With_PythonOp(self):
original_handlers = test_op._tf_dispatchers[:]
@dispatch.dispatch_for_types(test_op, CustomTensor)
def override_for_test_op(x, y, z): # pylint: disable=unused-variable
return CustomTensor(test_op(x.tensor, y.tensor, z.tensor),
(x.score + y.score + z.score) / 3.0)
x = CustomTensor([1, 2, 3], 0.2)
y = CustomTensor([7, 8, 2], 0.4)
z = CustomTensor([0, 1, 2], 0.6)
result = test_op(x, y, z)
self.assertAllEqual(self.evaluate(result.tensor), [15, 21, 13])
self.assertNear(result.score, 0.4, 0.001)
# Clean up
test_op._tf_dispatchers = original_handlers
def testDispatchForTypes_SignatureMismatch(self):
with self.assertRaisesRegex(
AssertionError, "The decorated function's "
"signature must exactly match.*"):
@dispatch.dispatch_for_types(test_op, CustomTensor)
def override_for_test_op(a, b, c): # pylint: disable=unused-variable
return CustomTensor(test_op(a.tensor, b.tensor, c.tensor),
(a.score + b.score + c.score) / 3.0)
def testDispatchForTypes_OpDoesNotSupportDispatch(self):
def some_op(x, y):
return x + y
with self.assertRaisesRegex(AssertionError, "Dispatching not enabled for"):
@dispatch.dispatch_for_types(some_op, CustomTensor)
def override_for_some_op(x, y): # pylint: disable=unused-variable
return x if x.score > 0 else y
@test.mock.patch.object(tf_logging, "warning", autospec=True)
def testInteractionWithDeprecationWarning(self, mock_warning):
@deprecation.deprecated(date=None, instructions="Instructions")
@dispatch.add_dispatch_support
def some_op(x):
return x
some_op(5)
message = mock_warning.call_args[0][0] % mock_warning.call_args[0][1:]
self.assertRegex(
message, r".*some_op \(from __main__\) is deprecated and will be "
"removed in a future version.*")
def testGlobalDispatcher(self):
original_global_dispatchers = dispatch._GLOBAL_DISPATCHERS
try:
TensorTracerOpDispatcher().register()
x = TensorTracer("x")
y = TensorTracer("y")
trace = math_ops.reduce_sum(math_ops.add(math_ops.abs(x), y), axis=3)
self.assertEqual(
str(trace),
"math.reduce_sum(math.add(name=None, x=math.abs(x), y=y), axis=3)")
proto_val = TensorTracer("proto")
trace = decode_proto(proto_val, "message_type", ["field"], ["float32"])
self.assertIn("io.decode_proto(bytes=proto,", str(trace))
finally:
# Clean up.
dispatch._GLOBAL_DISPATCHERS = original_global_dispatchers
def testGlobalDispatcherConvertToTensor(self):
original_global_dispatchers = dispatch._GLOBAL_DISPATCHERS
try:
TensorTracerOpDispatcher().register()
x = TensorTracer("x")
y = TensorTracer("y")
trace = math_ops.add(math_ops.abs(
ops.convert_to_tensor_v2_with_dispatch(x)), y)
self.assertEqual(
str(trace),
"math.add(name=None, x=math.abs(convert_to_tensor(x)), y=y)")
finally:
# Clean up.
dispatch._GLOBAL_DISPATCHERS = original_global_dispatchers
def testGlobalDispatcherGetItem(self):
original_global_dispatchers = dispatch._GLOBAL_DISPATCHERS
try:
TensorTracerOpDispatcher().register()
x = TensorTracer("x")
trace = x[0]
self.assertEqual(
str(trace),
"__operators__.getitem(x, 0)")
x = TensorTracer("x")
y = TensorTracer("y")
trace = x[y]
self.assertEqual(
str(trace),
"__operators__.getitem(x, y)")
x = TensorTracer("x")
y = TensorTracer("y")
trace = x[:y] # pylint: disable=invalid-slice-index
self.assertEqual(
str(trace),
"__operators__.getitem(x, slice(None, y, None))")
x = array_ops.ones(shape=(3, 3))
y = TensorTracer("y")
trace = x[y]
self.assertEqual(
str(trace),
"__operators__.getitem(%s, y)" % x)
trace = x[:y] # pylint: disable=invalid-slice-index
self.assertEqual(
str(trace),
"__operators__.getitem(%s, slice(None, y, None))" % x)
finally:
# Clean up.
dispatch._GLOBAL_DISPATCHERS = original_global_dispatchers
def testGlobalDispatcherLinearOperators(self):
original_global_dispatchers = dispatch._GLOBAL_DISPATCHERS
try:
TensorTracerOpDispatcher().register()
x = TensorTracer("x")
# To grab the eigenvalues the diag operator just calls convert_to_tensor
# (twice) in this case.
trace = linear_operator_diag.LinearOperatorDiag(x).eigvals()
self.assertEqual(
str(trace),
"convert_to_tensor(convert_to_tensor(x, dtype=None, dtype_hint=None, "
"name=diag))")
# The diagonal tensor addition gets traced even though the linear_operator
# API only uses dispatchable ops instead of directly exposing dispatching.
trace = linear_operator_diag.LinearOperatorDiag(x).add_to_tensor(x)
self.assertIn(
"linalg.set_diag(convert_to_tensor(x, name=x), __operators__.add("
"convert_to_tensor(x, dtype=None, dtype_hint=None, name=diag), "
"linalg.diag_part(convert_to_tensor(x, name=x)), "
"name=",
str(trace))
# The dispatch-supporting ops the non-singular check calls out to
# get traced.
trace = linear_operator_diag.LinearOperatorDiag(x).assert_non_singular()
self.assertIn("debugging.assert_less", str(trace))
self.assertIn(
"message=Singular operator: Diagonal contained zero values.",
str(trace))
finally:
# Clean up.
dispatch._GLOBAL_DISPATCHERS = original_global_dispatchers
if __name__ == "__main__":
googletest.main()
|
|
"""
Maya-related functions, which are useful to both `api` and `core`, including `mayaInit` which ensures
that maya is initialized in standalone mode.
"""
from __future__ import with_statement
import os.path, sys, glob, inspect
import maya
import maya.OpenMaya as om
import maya.utils
from pymel.util import picklezip, shellOutput, subpackages, refreshEnviron, namedtuple
import pymel.versions as versions
from pymel.mayautils import getUserPrefsDir
from pymel.versions import shortName, installName
import plogging
# There are FOUR different ways maya might be started, all of which are
# subtly different, that need to be considered / tested:
#
# 1) Normal gui
# 2) maya -prompt
# 3) Render
# 4) mayapy (or just straight up python)
_logger = plogging.getLogger(__name__)
try:
import cPickle as pickle
except:
_logger.warning("using pickle instead of cPickle: load performance will be affected")
import pickle
#from maya.cmds import encodeString
isInitializing = False
# Setting this to False will make finalize() do nothing
finalizeEnabled = True
_finalizeCalled = False
# tells whether this maya package has been modified to work with pymel
pymelMayaPackage = hasattr(maya.utils, 'shellLogHandler') or versions.current() >= versions.v2011
def _moduleJoin(*args):
"""
Joins with the base pymel directory.
:rtype: string
"""
moduleDir = os.path.dirname( os.path.dirname( sys.modules[__name__].__file__ ) )
return os.path.realpath(os.path.join( moduleDir, *args))
def mayaStartupHasRun():
"""
Returns True if maya.app.startup has already finished, False otherwise.
"""
return 'maya.app.startup.gui' in sys.modules or 'maya.app.startup.batch' in sys.modules
def mayaStartupHasStarted():
"""
Returns True if maya.app.startup has begun running, False otherwise.
It's possible that maya.app.startup is in the process of running (ie,
in maya.app.startup.basic, calling executeUserSetup) - unlike mayaStartup,
this will attempt to detect if this is the case.
"""
return hasattr(maya, 'stringTable')
def setupFormatting():
import pprint
import maya.utils
def myResultCallback(obj):
return pprint.pformat(obj)
maya.utils.formatGuiResult = myResultCallback
# prevent auto-completion generator from getting confused
maya.utils.formatGuiResult.__module__ = 'maya.utils'
#def loadDynamicLibs():
# """
# due to a bug in maya.app.commands many functions do not return any value the first time they are run,
# especially in standalone mode. this function forces the loading of all dynamic libraries, which is
# a very fast and memory-efficient process, which begs the question: why bother dynamically loading?
#
# this function can only be run after maya.standalone is initialized
# """
#
# commandListPath = os.path.realpath( os.environ[ 'MAYA_LOCATION' ] )
# commandListPath = os.path.join( commandListPath, libdir, 'commandList' )
#
# import maya.cmds
# assert hasattr( maya.cmds, 'dynamicLoad'), "maya.standalone must be initialized before running this function"
# file = open( commandListPath, 'r' )
# libraries = set( [ line.split()[1] for line in file] )
# for library in libraries:
# try:
# maya.cmds.dynamicLoad(library)
# except RuntimeError:
# _logger.debug("Error dynamically loading maya library: %s" % library)
# Will test initialize maya standalone if necessary (like if scripts are run from an external interpreter)
# returns True if Maya is available, False either
def mayaInit(forversion=None) :
""" Try to init Maya standalone module, use when running pymel from an external Python interpreter,
it is possible to pass the desired Maya version number to define which Maya to initialize
Part of the complexity of initializing maya in standalone mode is that maya does not populate os.environ when
parsing Maya.env. If we initialize normally, the env's are available via maya (via the shell), but not in python
via os.environ.
Note: the following example assumes that MAYA_SCRIPT_PATH is not set in your shell environment prior to launching
python or mayapy.
>>> import maya.standalone #doctest: +SKIP
>>> maya.standalone.initialize() #doctest: +SKIP
>>> import maya.mel as mm #doctest: +SKIP
>>> print mm.eval("getenv MAYA_SCRIPT_PATH") #doctest: +SKIP
/Network/Servers/sv-user.luma-pictures.com/luma .....
>>> import os #doctest: +SKIP
>>> 'MAYA_SCRIPT_PATH' in os.environ #doctest: +SKIP
False
The solution lies in `refreshEnviron`, which copies the environment from the shell to os.environ after maya.standalone
initializes.
:rtype: bool
:return: returns True if maya.cmds required initializing ( in other words, we are in a standalone python interpreter )
"""
_logger.debug( "startup.mayaInit: called" )
setupFormatting()
global isInitializing
# test that Maya actually is loaded and that commands have been initialized,for the requested version
aboutExists = False
try :
from maya.cmds import about
aboutExists = True
except ImportError:
pass
if aboutExists and mayaStartupHasStarted():
# if this succeeded, we're initialized
_logger.debug( "startup.mayaInit: maya already started - exiting" )
isInitializing = False
return False
_logger.debug( "startup.mayaInit: running" )
# for use with pymel compatible maya package
os.environ['MAYA_SKIP_USERSETUP_PY'] = 'on'
if not aboutExists and not sys.modules.has_key('maya.standalone'):
try :
_logger.debug( "startup.mayaInit: running standalone.initialize" )
import maya.standalone #@UnresolvedImport
maya.standalone.initialize(name="python")
if versions.current() < versions.v2009:
refreshEnviron()
except ImportError, e:
raise e, str(e) + ": pymel was unable to initialize maya.standalone"
try:
from maya.cmds import about
except Exception:
_logger.error("maya.standalone was successfully initialized, but pymel failed to import maya.cmds (or it was not populated)")
raise
if not mayaStartupHasRun():
_logger.debug( "running maya.app.startup" )
# If we're in 'maya -prompt' mode, and a plugin loads pymel, then we
# can have a state where maya.standalone has been initialized, but
# the python startup code hasn't yet been run...
if about(batch=True):
import maya.app.startup.batch
else:
import maya.app.startup.gui
# return True, meaning we had to initialize maya standalone
isInitializing = True
return True
def initMEL():
if 'PYMEL_SKIP_MEL_INIT' in os.environ or pymel_options.get( 'skip_mel_init', False ) :
_logger.info( "Skipping MEL initialization" )
return
_logger.debug( "initMEL" )
mayaVersion = versions.installName()
prefsDir = getUserPrefsDir()
if prefsDir is None:
_logger.error( "could not initialize user preferences: MAYA_APP_DIR not set" )
elif not os.path.isdir(prefsDir):
_logger.error( "could not initialize user preferences: %s does not exist" % prefsDir )
# TODO : use cmds.internalVar to get paths
# got this startup sequence from autodesk support
startup = [
#'defaultRunTimeCommands.mel', # sourced automatically
#os.path.join( prefsDir, 'userRunTimeCommands.mel'), # sourced automatically
'createPreferencesOptVars.mel',
'createGlobalOptVars.mel',
os.path.join( prefsDir, 'userPrefs.mel') if prefsDir else None,
'initialStartup.mel',
#$HOME/Documents/maya/projects/default/workspace.mel
'initialPlugins.mel',
#'initialGUI.mel', #GUI
#'initialLayout.mel', #GUI
#os.path.join( prefsDir, 'windowPrefs.mel'), #GUI
#os.path.join( prefsDir, 'menuSetPrefs.mel'), #GUI
#'hotkeySetup.mel', #GUI
'namedCommandSetup.mel', #Fails in 2014 at one point
os.path.join( prefsDir, 'userNamedCommands.mel' ) if prefsDir else None,
#'initAfter.mel', #GUI
os.path.join( prefsDir, 'pluginPrefs.mel' ) if prefsDir else None
]
try:
for f in startup:
_logger.debug("running: %s" % f)
if f is not None:
if os.path.isabs(f) and not os.path.exists(f):
_logger.warning( "Maya startup file %s does not exist" % f )
else:
# need to encode backslashes (used for windows paths)
if isinstance(f, unicode):
encoding = 'unicode_escape'
else:
encoding = 'string_escape'
#import pymel.core.language as lang
#lang.mel.source( f.encode(encoding) )
import maya.mel
maya.mel.eval( 'source "%s"' % f.encode(encoding) )
except Exception, e:
_logger.error( "could not perform Maya initialization sequence: failed on %s: %s" % ( f, e) )
try:
# make sure it exists
res = maya.mel.eval('whatIs "userSetup.mel"')
if res != 'Unknown':
maya.mel.eval( 'source "userSetup.mel"')
except RuntimeError: pass
_logger.debug("done running mel files")
def initAE():
try:
pkg = __import__('AETemplates')
except ImportError:
return False
except Exception:
import traceback
traceback.print_exc()
return False
else:
# import subpackages
for data in subpackages(pkg):
pass
return True
def finalize():
global finalizeEnabled
global _finalizeCalled
if not finalizeEnabled or _finalizeCalled:
return
_logger.debug('finalizing')
# Set this to true HERE, as in running userSetup.py,
# we could end up in here again, inside the initial finalize...
_finalizeCalled = True
global isInitializing
if pymelMayaPackage and isInitializing:
# this module is not encapsulated into functions, but it should already
# be imported, so it won't run again
assert 'maya.app.startup.basic' in sys.modules, \
"something is very wrong. maya.app.startup.basic should be imported by now"
import maya.app.startup.basic
maya.app.startup.basic.executeUserSetup()
state = om.MGlobal.mayaState()
if state == om.MGlobal.kLibraryApp: # mayapy only
initMEL()
#fixMayapy2011SegFault()
elif state == om.MGlobal.kInteractive:
initAE()
# Have all the checks inside here, in case people want to insert this in their
# userSetup... it's currently not always on
def fixMayapy2011SegFault():
if versions.v2011 <= versions.current() < versions.v2013 :
import platform
if platform.system() == 'Linux':
if om.MGlobal.mayaState() == om.MGlobal.kLibraryApp: # mayapy only
# In linux maya 2011, once maya has been initialized, if you try
# to do a 'normal' sys.exit, it will crash with a segmentation
# fault..
# do a 'hard' os._exit to avoid this
# note that, since there is no built-in support to tell from
# within atexit functions what the exit code is, we cannot
# guarantee returning the "correct" exit code... for instance,
# if someone does:
# raise SystemExit(300)
# we will instead return a 'normal' exit code of 0
# ... but in general, the return code is a LOT more reliable now,
# since it used to ALWAYS return non-zero...
import sys
import atexit
# First, wrap sys.exit to store the exit code...
_orig_exit = sys.exit
# This is just in case anybody else needs to access the
# original exit function...
if not hasattr('sys', '_orig_exit'):
sys._orig_exit = _orig_exit
def exit(status):
sys._exit_status = status
_orig_exit(status)
sys.exit = exit
def hardExit():
# run all the other exit handlers registered with
# atexit, then hard exit... this is easy, because
# atexit._run_exitfuncs pops funcs off the stack as it goes...
# so all we need to do is call it again
import sys
atexit._run_exitfuncs()
try:
print "pymel: hard exiting to avoid mayapy crash..."
except Exception:
pass
import os
import sys
exitStatus = getattr(sys, '_exit_status', None)
if exitStatus is None:
last_value = getattr(sys, 'last_value', None)
if last_value is not None:
if isinstance(last_value, SystemExit):
try:
exitStatus = last_value.args[0]
except Exception: pass
if exitStatus is None:
exitStatus = 1
if exitStatus is None:
exitStatus = 0
os._exit(exitStatus)
atexit.register(hardExit)
# Fix for non US encodings in Maya
def encodeFix():
if mayaInit() :
from maya.cmds import about
mayaEncode = about(cs=True)
pyEncode = sys.getdefaultencoding() # Encoding tel que defini par sitecustomize
if mayaEncode != pyEncode : # s'il faut redefinir l'encoding
#reload (sys) # attention reset aussi sys.stdout et sys.stderr
#sys.setdefaultencoding(newEncode)
#del sys.setdefaultencoding
#print "# Encoding changed from '"+pyEncode+'" to "'+newEncode+"' #"
if not about(b=True) : # si pas en batch, donc en mode UI, redefinir stdout et stderr avec encoding Maya
import maya.utils
try :
import maya.app.baseUI
import codecs
# Replace sys.stdin with a GUI version that will request input from the user
sys.stdin = codecs.getreader(mayaEncode)(maya.app.baseUI.StandardInput())
# Replace sys.stdout and sys.stderr with versions that can output to Maya's GUI
sys.stdout = codecs.getwriter(mayaEncode)(maya.utils.Output())
sys.stderr = codecs.getwriter(mayaEncode)(maya.utils.Output( error=1 ))
except ImportError :
_logger.debug("Unable to import maya.app.baseUI")
#===============================================================================
# Cache utilities
#===============================================================================
def _dump( data, filename, protocol = -1):
with open(filename, mode='wb') as file:
pickle.dump( data, file, protocol)
def _load(filename):
with open(filename, mode='rb') as file:
res = pickle.load(file)
return res
class PymelCache(object):
# override these
NAME = '' # ie, 'mayaApi'
DESC = '' # ie, 'the API cache' - used in error messages, etc
COMPRESSED = True
# whether to add the version to the filename when writing out the cache
USE_VERSION = True
def read(self):
newPath = self.path()
if self.COMPRESSED:
func = picklezip.load
else:
func = _load
_logger.debug(self._actionMessage('Loading', 'from', newPath))
try:
return func(newPath)
except Exception, e:
self._errorMsg('read', 'from', newPath, e)
def write(self, data):
newPath = self.path()
if self.COMPRESSED:
func = picklezip.dump
else:
func = _dump
_logger.info(self._actionMessage('Saving', 'to', newPath))
try :
func( data, newPath, 2)
except Exception, e:
self._errorMsg('write', 'to', newPath, e)
def path(self):
if self.USE_VERSION:
if hasattr(self, 'version'):
short_version = str(self.version)
else:
short_version = shortName()
else:
short_version = ''
newPath = _moduleJoin( 'cache', self.NAME+short_version )
if self.COMPRESSED:
newPath += '.zip'
else:
newPath += '.bin'
return newPath
@classmethod
def _actionMessage(cls, action, direction, location):
'''_actionMessage('eat', 'at', 'Joes') =>
"eat cls.DESC at 'Joes'"
'''
description = cls.DESC
if description:
description = ' ' + description
return "%s%s %s %r" % (action, description, direction, location)
@classmethod
def _errorMsg(cls, action, direction, path, error):
'''_errorMessage('eat', 'at', 'Joes') =>
'Unable to eat cls.DESC at Joes: error.msg'
'''
actionMsg = cls._actionMessage(action, direction, path)
_logger.error("Unable to %s: %s" % (actionMsg, error))
import traceback
_logger.debug(traceback.format_exc())
# Considered using named_tuple, but wanted to make data stored in cache
# have as few dependencies as possible - ie, just a simple tuple
class SubItemCache(PymelCache):
'''Used to store various maya information
ie, api / cmd data parsed from docs
To implement, create a subclass, which overrides at least the NAME, DESC,
and _CACHE_NAMES attributes, and implements the rebuild method.
Then to access data, you should initialize an instance, then call build;
build will load the data from the cache file if possible, or call rebuild
to build the data from scratch if not. If the data had to be rebuilt,
a new file cache will be saved.
The data may then be accessed through attributes on the instance, with
the names given in _CACHE_NAMES.
>>> class NodeCache(SubItemCache):
... NAME = 'mayaNodes'
... DESC = 'the maya nodes cache'
... COMPRESSED = False
... _CACHE_NAMES = ['nodeTypes']
... def rebuild(self):
... import maya.cmds
... self.nodeTypes = maya.cmds.allNodeTypes(includeAbstract=True)
>>> cacheInst = NodeCache()
>>> cacheInst.build()
>>> 'polyCube' in cacheInst.nodeTypes
True
'''
# Provides a front end for a pickled file, which should contain a
# tuple of items; each item in the tuple is associated with a name from
# _CACHE_NAMES
# override this with a list of names for the items within the cache
_CACHE_NAMES = []
# Set this to the initialization contructor for each cache item;
# if a given cache name is not present in ITEM_TYPES, DEFAULT_TYPE is
# used
# These are the types that the contents will 'appear' to be to the end user
# (ie, the types returned by contents).
# If the value needs to be converted before pickling, specify an entry
# in STORAGE_TYPES
# Both should be constructors which can either take no arguments, or
# a single argument to initialize an instance.
ITEM_TYPES = {}
STORAGE_TYPES = {}
DEFAULT_TYPE = dict
def __init__(self):
for name in self._CACHE_NAMES:
self.initVal(name)
def cacheNames(self):
return tuple(self._CACHE_NAMES)
def initVal(self, name):
itemType = self.itemType(name)
if itemType is None:
val = None
else:
val = itemType()
setattr(self, name, val)
def itemType(self, name):
return self.ITEM_TYPES.get(name, self.DEFAULT_TYPE)
def build(self):
"""
Used to rebuild cache, either by loading from a cache file, or rebuilding from scratch.
"""
data = self.load()
if data is None:
self.rebuild()
self.save()
# override this...
def rebuild(self):
"""Rebuild cache from scratch
Unlike 'build', this does not attempt to load a cache file, but always
rebuilds it by parsing the docs, etc.
"""
pass
def update(self, obj, cacheNames=None):
'''Update all the various data from the given object, which should
either be a dictionary, a list or tuple with the right number of items,
or an object with the caches stored in attributes on it.
'''
if cacheNames is None:
cacheNames = self.cacheNames()
if isinstance(obj, dict):
for key, val in obj.iteritems():
setattr(self, key, val)
elif isinstance(obj, (list, tuple)):
if len(obj) != len(cacheNames):
raise ValueError('length of update object (%d) did not match length of cache names (%d)' % (len(obj), len(cacheNames)))
for newVal, name in zip(obj, cacheNames):
setattr(self, name, newVal)
else:
for cacheName in cacheNames:
setattr(self, cacheName, getattr(obj, cacheName))
def load(self):
'''Attempts to load the data from the cache on file.
If it succeeds, it will update itself, and return the loaded items;
if it fails, it will return None
'''
data = self.read()
if data is not None:
data = list(data)
# if STORAGE_TYPES, need to convert back from the storage type to
# the 'normal' type
if self.STORAGE_TYPES:
for name in self.STORAGE_TYPES:
index = self._CACHE_NAMES.index(name)
val = data[index]
val = self.itemType(name)(val)
data[index] = val
data = tuple(data)
self.update(data, cacheNames=self._CACHE_NAMES)
return data
def save(self, obj=None):
'''Saves the cache
Will optionally update the caches from the given object (which may be
a dictionary, or an object with the caches stored in attributes on it)
before saving
'''
if obj is not None:
self.update(obj)
data = self.contents()
if self.STORAGE_TYPES:
newData = []
for name, val in zip(self._CACHE_NAMES, data):
if name in self.STORAGE_TYPES:
val = self.STORAGE_TYPES[name](val)
newData.append(val)
data = tuple(newData)
self.write(data)
# was called 'caches'
def contents(self):
return tuple( getattr(self, x) for x in self.cacheNames() )
#===============================================================================
# Config stuff
#===============================================================================
def getConfigFile():
return plogging.getConfigFile()
def parsePymelConfig():
import ConfigParser
types = {'skip_mel_init' : 'boolean',
'check_attr_before_lock' : 'boolean',
}
defaults = {'skip_mel_init' : 'off',
'check_attr_before_lock' : 'off',
}
config = ConfigParser.ConfigParser(defaults)
config.read( getConfigFile() )
d = {}
for option in config.options('pymel'):
getter = getattr( config, 'get' + types.get(option, '') )
d[option] = getter( 'pymel', option )
return d
pymel_options = parsePymelConfig()
|
|
# Copyright 2013-2017 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.cqlengine.statements import AssignmentClause, SetUpdateClause, ListUpdateClause, MapUpdateClause, MapDeleteClause, FieldDeleteClause, CounterUpdateClause
class AssignmentClauseTests(unittest.TestCase):
def test_rendering(self):
pass
def test_insert_tuple(self):
ac = AssignmentClause('a', 'b')
ac.set_context_id(10)
self.assertEqual(ac.insert_tuple(), ('a', 10))
class SetUpdateClauseTests(unittest.TestCase):
def test_update_from_none(self):
c = SetUpdateClause('s', set((1, 2)), previous=None)
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, set((1, 2)))
self.assertIsNone(c._additions)
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set((1, 2))})
def test_null_update(self):
""" tests setting a set to None creates an empty update statement """
c = SetUpdateClause('s', None, previous=set((1, 2)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertIsNone(c._additions)
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 0)
self.assertEqual(str(c), '')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {})
def test_no_update(self):
""" tests an unchanged value creates an empty update statement """
c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertIsNone(c._additions)
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 0)
self.assertEqual(str(c), '')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {})
def test_update_empty_set(self):
"""tests assigning a set to an empty set creates a nonempty
update statement and nonzero context size."""
c = SetUpdateClause(field='s', value=set())
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, set())
self.assertIsNone(c._additions)
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set()})
def test_additions(self):
c = SetUpdateClause('s', set((1, 2, 3)), previous=set((1, 2)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertEqual(c._additions, set((3,)))
self.assertIsNone(c._removals)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = "s" + %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set((3,))})
def test_removals(self):
c = SetUpdateClause('s', set((1, 2)), previous=set((1, 2, 3)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertIsNone(c._additions)
self.assertEqual(c._removals, set((3,)))
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = "s" - %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set((3,))})
def test_additions_and_removals(self):
c = SetUpdateClause('s', set((2, 3)), previous=set((1, 2)))
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertEqual(c._additions, set((3,)))
self.assertEqual(c._removals, set((1,)))
self.assertEqual(c.get_context_size(), 2)
self.assertEqual(str(c), '"s" = "s" + %(0)s, "s" = "s" - %(1)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': set((3,)), '1': set((1,))})
class ListUpdateClauseTests(unittest.TestCase):
def test_update_from_none(self):
c = ListUpdateClause('s', [1, 2, 3])
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, [1, 2, 3])
self.assertIsNone(c._append)
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2, 3]})
def test_update_from_empty(self):
c = ListUpdateClause('s', [1, 2, 3], previous=[])
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, [1, 2, 3])
self.assertIsNone(c._append)
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2, 3]})
def test_update_from_different_list(self):
c = ListUpdateClause('s', [1, 2, 3], previous=[3, 2, 1])
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, [1, 2, 3])
self.assertIsNone(c._append)
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2, 3]})
def test_append(self):
c = ListUpdateClause('s', [1, 2, 3, 4], previous=[1, 2])
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertEqual(c._append, [3, 4])
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = "s" + %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [3, 4]})
def test_prepend(self):
c = ListUpdateClause('s', [1, 2, 3, 4], previous=[3, 4])
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertIsNone(c._append)
self.assertEqual(c._prepend, [1, 2])
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s + "s"')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2]})
def test_append_and_prepend(self):
c = ListUpdateClause('s', [1, 2, 3, 4, 5, 6], previous=[3, 4])
c._analyze()
c.set_context_id(0)
self.assertIsNone(c._assignments)
self.assertEqual(c._append, [5, 6])
self.assertEqual(c._prepend, [1, 2])
self.assertEqual(c.get_context_size(), 2)
self.assertEqual(str(c), '"s" = %(0)s + "s", "s" = "s" + %(1)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2], '1': [5, 6]})
def test_shrinking_list_update(self):
""" tests that updating to a smaller list results in an insert statement """
c = ListUpdateClause('s', [1, 2, 3], previous=[1, 2, 3, 4])
c._analyze()
c.set_context_id(0)
self.assertEqual(c._assignments, [1, 2, 3])
self.assertIsNone(c._append)
self.assertIsNone(c._prepend)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"s" = %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': [1, 2, 3]})
class MapUpdateTests(unittest.TestCase):
def test_update(self):
c = MapUpdateClause('s', {3: 0, 5: 6}, previous={5: 0, 3: 4})
c._analyze()
c.set_context_id(0)
self.assertEqual(c._updates, [3, 5])
self.assertEqual(c.get_context_size(), 4)
self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6})
def test_update_from_null(self):
c = MapUpdateClause('s', {3: 0, 5: 6})
c._analyze()
c.set_context_id(0)
self.assertEqual(c._updates, [3, 5])
self.assertEqual(c.get_context_size(), 4)
self.assertEqual(str(c), '"s"[%(0)s] = %(1)s, "s"[%(2)s] = %(3)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': 3, "1": 0, '2': 5, '3': 6})
def test_nulled_columns_arent_included(self):
c = MapUpdateClause('s', {3: 0}, {1: 2, 3: 4})
c._analyze()
c.set_context_id(0)
self.assertNotIn(1, c._updates)
class CounterUpdateTests(unittest.TestCase):
def test_positive_update(self):
c = CounterUpdateClause('a', 5, 3)
c.set_context_id(5)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"a" = "a" + %(5)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'5': 2})
def test_negative_update(self):
c = CounterUpdateClause('a', 4, 7)
c.set_context_id(3)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"a" = "a" - %(3)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'3': 3})
def noop_update(self):
c = CounterUpdateClause('a', 5, 5)
c.set_context_id(5)
self.assertEqual(c.get_context_size(), 1)
self.assertEqual(str(c), '"a" = "a" + %(0)s')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'5': 0})
class MapDeleteTests(unittest.TestCase):
def test_update(self):
c = MapDeleteClause('s', {3: 0}, {1: 2, 3: 4, 5: 6})
c._analyze()
c.set_context_id(0)
self.assertEqual(c._removals, [1, 5])
self.assertEqual(c.get_context_size(), 2)
self.assertEqual(str(c), '"s"[%(0)s], "s"[%(1)s]')
ctx = {}
c.update_context(ctx)
self.assertEqual(ctx, {'0': 1, '1': 5})
class FieldDeleteTests(unittest.TestCase):
def test_str(self):
f = FieldDeleteClause("blake")
assert str(f) == '"blake"'
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.service_usage_v1.types import resources
from google.cloud.service_usage_v1.types import serviceusage
from google.longrunning import operations_pb2 # type: ignore
from .base import ServiceUsageTransport, DEFAULT_CLIENT_INFO
from .grpc import ServiceUsageGrpcTransport
class ServiceUsageGrpcAsyncIOTransport(ServiceUsageTransport):
"""gRPC AsyncIO backend transport for ServiceUsage.
Enables services that service consumers want to use on Google Cloud
Platform, lists the available or enabled services, or disables
services that service consumers no longer use.
See `Service Usage
API <https://cloud.google.com/service-usage/docs/overview>`__
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "serviceusage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "serviceusage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def enable_service(
self,
) -> Callable[
[serviceusage.EnableServiceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the enable service method over gRPC.
Enable a service so that it can be used with a
project.
Returns:
Callable[[~.EnableServiceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "enable_service" not in self._stubs:
self._stubs["enable_service"] = self.grpc_channel.unary_unary(
"/google.api.serviceusage.v1.ServiceUsage/EnableService",
request_serializer=serviceusage.EnableServiceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["enable_service"]
@property
def disable_service(
self,
) -> Callable[
[serviceusage.DisableServiceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the disable service method over gRPC.
Disable a service so that it can no longer be used with a
project. This prevents unintended usage that may cause
unexpected billing charges or security leaks.
It is not valid to call the disable method on a service that is
not currently enabled. Callers will receive a
``FAILED_PRECONDITION`` status if the target service is not
currently enabled.
Returns:
Callable[[~.DisableServiceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "disable_service" not in self._stubs:
self._stubs["disable_service"] = self.grpc_channel.unary_unary(
"/google.api.serviceusage.v1.ServiceUsage/DisableService",
request_serializer=serviceusage.DisableServiceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["disable_service"]
@property
def get_service(
self,
) -> Callable[[serviceusage.GetServiceRequest], Awaitable[resources.Service]]:
r"""Return a callable for the get service method over gRPC.
Returns the service configuration and enabled state
for a given service.
Returns:
Callable[[~.GetServiceRequest],
Awaitable[~.Service]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_service" not in self._stubs:
self._stubs["get_service"] = self.grpc_channel.unary_unary(
"/google.api.serviceusage.v1.ServiceUsage/GetService",
request_serializer=serviceusage.GetServiceRequest.serialize,
response_deserializer=resources.Service.deserialize,
)
return self._stubs["get_service"]
@property
def list_services(
self,
) -> Callable[
[serviceusage.ListServicesRequest], Awaitable[serviceusage.ListServicesResponse]
]:
r"""Return a callable for the list services method over gRPC.
List all services available to the specified project, and the
current state of those services with respect to the project. The
list includes all public services, all services for which the
calling user has the ``servicemanagement.services.bind``
permission, and all services that have already been enabled on
the project. The list can be filtered to only include services
in a specific state, for example to only include services
enabled on the project.
WARNING: If you need to query enabled services frequently or
across an organization, you should use `Cloud Asset Inventory
API <https://cloud.google.com/asset-inventory/docs/apis>`__,
which provides higher throughput and richer filtering
capability.
Returns:
Callable[[~.ListServicesRequest],
Awaitable[~.ListServicesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_services" not in self._stubs:
self._stubs["list_services"] = self.grpc_channel.unary_unary(
"/google.api.serviceusage.v1.ServiceUsage/ListServices",
request_serializer=serviceusage.ListServicesRequest.serialize,
response_deserializer=serviceusage.ListServicesResponse.deserialize,
)
return self._stubs["list_services"]
@property
def batch_enable_services(
self,
) -> Callable[
[serviceusage.BatchEnableServicesRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the batch enable services method over gRPC.
Enable multiple services on a project. The operation is atomic:
if enabling any service fails, then the entire batch fails, and
no state changes occur. To enable a single service, use the
``EnableService`` method instead.
Returns:
Callable[[~.BatchEnableServicesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_enable_services" not in self._stubs:
self._stubs["batch_enable_services"] = self.grpc_channel.unary_unary(
"/google.api.serviceusage.v1.ServiceUsage/BatchEnableServices",
request_serializer=serviceusage.BatchEnableServicesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_enable_services"]
@property
def batch_get_services(
self,
) -> Callable[
[serviceusage.BatchGetServicesRequest],
Awaitable[serviceusage.BatchGetServicesResponse],
]:
r"""Return a callable for the batch get services method over gRPC.
Returns the service configurations and enabled states
for a given list of services.
Returns:
Callable[[~.BatchGetServicesRequest],
Awaitable[~.BatchGetServicesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_get_services" not in self._stubs:
self._stubs["batch_get_services"] = self.grpc_channel.unary_unary(
"/google.api.serviceusage.v1.ServiceUsage/BatchGetServices",
request_serializer=serviceusage.BatchGetServicesRequest.serialize,
response_deserializer=serviceusage.BatchGetServicesResponse.deserialize,
)
return self._stubs["batch_get_services"]
def close(self):
return self.grpc_channel.close()
__all__ = ("ServiceUsageGrpcAsyncIOTransport",)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
"""
Tests for pdc_client.PDCClient class.
"""
import json
import os
import time
import traceback
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
try:
# Python 2.6 compatibility
import unittest2 as unittest
except ImportError:
import unittest
from beanbag import BeanBagException
from pdc_client import NoResultsError, PDCClient
SERVER_ENV_VAR_NAME = 'PDC_CLIENT_TEST_SERVER'
DEFAULT_SERVER = 'localhost'
PORT_ENV_VAR_NAME = 'PDC_CLIENT_TEST_SERVER_PORT'
DEFAULT_PORT = 8378
API_PATH = '/rest_api/v1'
HTTP_OK = 200
HTTP_NO_CONTENT = 204
HTTP_NOT_FOUND = 404
HTTP_INTERNAL_SERVER_ERROR = 500
def _paged_results(results, request):
if request.get('page_size') == '-1':
return HTTP_OK, results
if request.get('page', '1') != '1':
return HTTP_NOT_FOUND, {"detail": "Invalid page"}
return HTTP_OK, {
'count': len(results),
'next': None,
'previous': None,
'results': results,
}
class _MockPDCServerRequestHandler(BaseHTTPRequestHandler, object):
class HttpNotFoundError(Exception):
pass
"""
Mocked PDC server.
"""
data = {
'products': {
"epel": {
"name": "EPEL",
"short": "epel",
"active": True,
"product_versions": [
"epel-6",
"epel-7"
],
"allowed_push_targets": []
},
"fedora": {
"name": "Fedora",
"short": "fedora",
"active": True,
"product_versions": [
"fedora-27",
"fedora-rawhide"
],
"allowed_push_targets": []
}
},
'cpes': {
1: {
"cpe": "cpe:/o:redhat:enterprise_linux:7::workstation",
"description": "RHEL 7 Workstation",
"id": 1
}
},
'auth': {
'token': {
'obtain': {
'token': '1'
}
}
}
}
last_comment = ''
def _find_available_pk(self, data):
"""
Return key not yet used in data.
"""
if data:
return max(data) + 1
return 1
def _data(self):
content_length = int(self.headers['Content-Length'])
if content_length == 0:
return None
raw_data = self.rfile.read(content_length)
return json.loads(raw_data.decode())
def _send_response(self, status_code, data):
self.send_response(status_code)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(data).encode())
def _get_data_item_and_request(self):
if not self.path.startswith(API_PATH + '/'):
raise self.HttpNotFoundError()
data_path = self.path[len(API_PATH) + 1:]
data_path_and_request = data_path.split('?', 1)
data_path = data_path_and_request[0]
if len(data_path_and_request) == 2:
request_part = data_path_and_request[1]
request_part = request_part.split('&')
request = dict(key_value.split('=', 1) for key_value in request_part)
else:
request = {}
path_components = data_path.split('/')
item = self.data
parent_item = None
pk = None
try:
if not path_components[-1]:
path_components.pop()
for path_component in path_components:
parent_item = item
pk = path_component
# If keys are numerical, omit using string as key.
existing_pk = list(item.keys())[0]
if isinstance(existing_pk, int):
pk = int(path_component)
item = item[pk]
except KeyError:
raise self.HttpNotFoundError()
return item, parent_item, pk, request
def _do_GET(self, item, parent_item, pk, request):
status_code = HTTP_OK
if item == self.data['products'] and 'short' in request:
short = request['short']
status_code, data = _paged_results([item[short]], request)
elif item in self.data.values():
status_code, data = _paged_results(list(item.values()), request)
else:
data = item
return status_code, data
def _do_POST(self, item, parent_item, pk, request):
data = self._data()
pk = self._find_available_pk(item)
data['id'] = pk
item[pk] = data
return HTTP_OK, data
def _do_PATCH(self, item, parent_item, pk, request):
data = self._data()
_MockPDCServerRequestHandler.last_comment = self.headers.get('PDC-Change-Comment')
item.update(data)
return HTTP_OK, data
def _do_PUT(self, item, parent_item, pk, request):
data = self._data()
if 'id' in item:
data['id'] = pk
parent_item[pk] = data
return HTTP_OK, data
def _do_DELETE(self, item, parent_item, pk, request):
del parent_item[pk]
return HTTP_NO_CONTENT, 'No content'
def _do(self, method):
try:
item, parent_item, pk, request = self._get_data_item_and_request()
status_code, data = method(item, parent_item, pk, request)
self._send_response(status_code, data)
except self.HttpNotFoundError:
self._send_response(HTTP_NOT_FOUND, '')
except Exception as e:
traceback.print_exc()
data = {'detail': str(e)}
self._send_response(HTTP_INTERNAL_SERVER_ERROR, data)
def do_GET(self):
self._do(self._do_GET)
def do_POST(self):
self._do(self._do_POST)
def do_PATCH(self):
self._do(self._do_PATCH)
def do_PUT(self):
self._do(self._do_PUT)
def do_DELETE(self):
self._do(self._do_DELETE)
def log_message(self, format, *args):
"""
Omit printing on console.
"""
pass
# Workaround for 'Connection reset by peer' errors in Python 2.6.
def handle(self):
time.sleep(0.001)
super(_MockPDCServerRequestHandler, self).handle()
class PDCClientTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
port_value = os.getenv(PORT_ENV_VAR_NAME)
cls.port = int(port_value) if port_value else DEFAULT_PORT
server_name_value = os.getenv(SERVER_ENV_VAR_NAME)
cls.server_name = server_name_value if server_name_value else DEFAULT_SERVER
cls.server = HTTPServer((cls.server_name, cls.port), _MockPDCServerRequestHandler)
cls.server_thread = Thread(target=cls.server.serve_forever)
cls.server_thread.setDaemon(True)
cls.server_thread.start()
cls.url = 'http://{server}:{port}{api_path}'.format(
server=cls.server_name,
port=cls.port,
api_path=API_PATH,
)
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
# Python 2.6 compatibility
if not hasattr(PDCClientTestCase, 'url'):
PDCClientTestCase.setUpClass()
self.client = PDCClient(
server=self.url,
ssl_verify=False,
)
def test_get_attr(self):
response = self.client.products()
self.assertEqual(
response.get('count'), len(_MockPDCServerRequestHandler.data['products']))
def test_get_attr_not_found(self):
with self.assertRaises(BeanBagException) as context:
self.client.bad_api()
self.assertEqual(
context.exception.response.status_code, HTTP_NOT_FOUND)
def test_get_item(self):
response = self.client['products']()
self.assertEqual(
response.get('count'), len(_MockPDCServerRequestHandler.data['products']))
def test_get_item_not_found(self):
with self.assertRaises(BeanBagException) as context:
self.client['bad-resource']()
self.assertEqual(
context.exception.response.status_code, HTTP_NOT_FOUND)
def test_get_attr_attr(self):
response = self.client.products.fedora()
self.assertEqual(
response, _MockPDCServerRequestHandler.data['products']['fedora'])
def test_get_attr_attr_not_found(self):
products = self.client.products
with self.assertRaises(BeanBagException) as context:
products.bad_id()
self.assertEqual(
context.exception.response.status_code, HTTP_NOT_FOUND)
def test_get_item_item(self):
response = self.client['products']['fedora']()
self.assertEqual(
response, _MockPDCServerRequestHandler.data['products']['fedora'])
def test_get_item_item_not_found(self):
products = self.client['products']
with self.assertRaises(BeanBagException) as context:
products['bad-resource']()
self.assertEqual(
context.exception.response.status_code, HTTP_NOT_FOUND)
def test_patch_item(self):
self.client.cpes[1] += {'description': 'TEST'}
self.assertEqual(_MockPDCServerRequestHandler.data['cpes'][1]['description'], 'TEST')
self.client['cpes'][1] += {'description': 'TEST2'}
self.assertEqual(_MockPDCServerRequestHandler.data['cpes'][1]['description'], 'TEST2')
def test_patch_attr(self):
active = not _MockPDCServerRequestHandler.data['products']['epel']['active']
self.client.products.epel += {'active': active}
self.assertEqual(_MockPDCServerRequestHandler.data['products']['epel']['active'], active)
active = not active
self.client['products'].epel += {'active': active}
self.assertEqual(_MockPDCServerRequestHandler.data['products']['epel']['active'], active)
def test_put_item(self):
new_data = {
"cpe": "cpe:/o:redhat:enterprise_linux:6::workstation",
"description": "RHEL 6 Workstation",
}
self.client.cpes[1] = new_data
new_data['id'] = 1
self.assertDictEqual(_MockPDCServerRequestHandler.data['cpes'][1], new_data)
def test_put_attr(self):
new_data = dict(_MockPDCServerRequestHandler.data['products']['epel'])
new_data['active'] = not new_data['active']
self.assertNotEqual(_MockPDCServerRequestHandler.data['products']['epel']['active'], new_data['active'])
self.client.products.epel = new_data
self.assertDictEqual(_MockPDCServerRequestHandler.data['products']['epel'], new_data)
def test_post_and_delete(self):
new_data = {
"cpe": "cpe:/o:redhat:enterprise_linux:5::workstation",
"description": "RHEL 5 Workstation",
}
self.assertEqual(len(_MockPDCServerRequestHandler.data['cpes']), 1)
# post request (create)
response = self.client.cpes(new_data)
new_id = response['id']
self.assertEqual(len(_MockPDCServerRequestHandler.data['cpes']), new_id)
self.assertTrue(new_id in _MockPDCServerRequestHandler.data['cpes'])
new_data['id'] = new_id
self.assertDictEqual(_MockPDCServerRequestHandler.data['cpes'][new_id], new_data)
# delete
del self.client.cpes[new_id]
self.assertEqual(len(_MockPDCServerRequestHandler.data['cpes']), 1)
self.assertTrue(new_id not in _MockPDCServerRequestHandler.data['cpes'])
def test_bad_delete(self):
with self.assertRaises(AttributeError):
del self.client['bad_resource']
with self.assertRaises(AttributeError):
del self.client.bad_resource
def test_bad_put(self):
with self.assertRaises(BeanBagException):
self.client['bad_resource'] = {}
def test_str(self):
self.assertEqual(str(self.client.products.fedora), self.url + '/products/fedora')
self.assertEqual(str(self.client), self.url + '/')
self.assertEqual(str(self.client._), self.url + '/')
self.assertEqual(str(self.client._._), self.url + '/')
self.assertEqual(str(self.client.products), self.url + '/products')
self.assertEqual(str(self.client.products._), self.url + '/products/')
self.assertEqual(str(self.client._._.products._._), self.url + '/products/')
def test_eq(self):
self.assertEqual(self.client.products, self.client.products)
self.assertEqual(self.client['products'], self.client['products'])
self.assertEqual(self.client.products, self.client['products'])
self.assertEqual(self.client.products.fedora, self.client['products']['fedora'])
self.assertEqual(self.client._.products.fedora._, self.client['products/']['fedora/'])
def test_set_comment(self):
self.client.set_comment('TEST')
self.client.cpes[1] += {'description': 'TEST'}
self.assertEqual(_MockPDCServerRequestHandler.last_comment, 'TEST')
def test_results(self):
products = list(self.client.products.results())
self.assertEqual(len(products), 2)
products = list(self.client.products.results(short='fedora'))
self.assertEqual(len(products), 1)
self.assertEqual(products[0]['short'], 'fedora')
with self.assertRaises(BeanBagException):
list(self.client.bad_resource.results())
def test_results_list(self):
products = list(self.client.products.results(page_size=-1))
self.assertEqual(len(products), 2)
products = list(self.client.products.results(short='fedora'))
self.assertEqual(len(products), 1)
self.assertEqual(products[0]['short'], 'fedora')
with self.assertRaises(BeanBagException):
list(self.client.bad_resource.results())
def test_no_results_error(self):
with self.assertRaises(NoResultsError):
list(self.client.products.fedora.results())
def test_get_paged(self):
products = list(self.client.get_paged(self.client.products))
self.assertEqual(len(products), 2)
products = list(self.client.get_paged(self.client.products, short='fedora'))
self.assertEqual(len(products), 1)
self.assertEqual(products[0]['short'], 'fedora')
|
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import List, Optional, Type, TypeVar, Union, TYPE_CHECKING
from .asset import Asset
from .utils import parse_time, snowflake_time, _get_as_snowflake
from .object import Object
from .mixins import Hashable
from .enums import ChannelType, VerificationLevel, InviteTarget, try_enum
from .appinfo import PartialAppInfo
__all__ = (
'PartialInviteChannel',
'PartialInviteGuild',
'Invite',
)
if TYPE_CHECKING:
from .types.invite import (
Invite as InvitePayload,
InviteGuild as InviteGuildPayload,
GatewayInvite as GatewayInvitePayload,
)
from .types.channel import (
PartialChannel as InviteChannelPayload,
)
from .state import ConnectionState
from .guild import Guild
from .abc import GuildChannel
from .user import User
InviteGuildType = Union[Guild, 'PartialInviteGuild', Object]
InviteChannelType = Union[GuildChannel, 'PartialInviteChannel', Object]
import datetime
class PartialInviteChannel:
"""Represents a "partial" invite channel.
This model will be given when the user is not part of the
guild the :class:`Invite` resolves to.
.. container:: operations
.. describe:: x == y
Checks if two partial channels are the same.
.. describe:: x != y
Checks if two partial channels are not the same.
.. describe:: hash(x)
Return the partial channel's hash.
.. describe:: str(x)
Returns the partial channel's name.
Attributes
-----------
name: :class:`str`
The partial channel's name.
id: :class:`int`
The partial channel's ID.
type: :class:`ChannelType`
The partial channel's type.
"""
__slots__ = ('id', 'name', 'type')
def __init__(self, data: InviteChannelPayload):
self.id: int = int(data['id'])
self.name: str = data['name']
self.type: ChannelType = try_enum(ChannelType, data['type'])
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f'<PartialInviteChannel id={self.id} name={self.name} type={self.type!r}>'
@property
def mention(self) -> str:
""":class:`str`: The string that allows you to mention the channel."""
return f'<#{self.id}>'
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the channel's creation time in UTC."""
return snowflake_time(self.id)
class PartialInviteGuild:
"""Represents a "partial" invite guild.
This model will be given when the user is not part of the
guild the :class:`Invite` resolves to.
.. container:: operations
.. describe:: x == y
Checks if two partial guilds are the same.
.. describe:: x != y
Checks if two partial guilds are not the same.
.. describe:: hash(x)
Return the partial guild's hash.
.. describe:: str(x)
Returns the partial guild's name.
Attributes
-----------
name: :class:`str`
The partial guild's name.
id: :class:`int`
The partial guild's ID.
verification_level: :class:`VerificationLevel`
The partial guild's verification level.
features: List[:class:`str`]
A list of features the guild has. See :attr:`Guild.features` for more information.
description: Optional[:class:`str`]
The partial guild's description.
"""
__slots__ = ('_state', 'features', '_icon', '_banner', 'id', 'name', '_splash', 'verification_level', 'description')
def __init__(self, state: ConnectionState, data: InviteGuildPayload, id: int):
self._state: ConnectionState = state
self.id: int = id
self.name: str = data['name']
self.features: List[str] = data.get('features', [])
self._icon: Optional[str] = data.get('icon')
self._banner: Optional[str] = data.get('banner')
self._splash: Optional[str] = data.get('splash')
self.verification_level: VerificationLevel = try_enum(VerificationLevel, data.get('verification_level'))
self.description: Optional[str] = data.get('description')
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return (
f'<{self.__class__.__name__} id={self.id} name={self.name!r} features={self.features} '
f'description={self.description!r}>'
)
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the guild's creation time in UTC."""
return snowflake_time(self.id)
@property
def icon(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return Asset._from_guild_icon(self._state, self.id, self._icon)
@property
def banner(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's banner asset, if available."""
if self._banner is None:
return None
return Asset._from_guild_image(self._state, self.id, self._banner, path='banners')
@property
def splash(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: Returns the guild's invite splash asset, if available."""
if self._splash is None:
return None
return Asset._from_guild_image(self._state, self.id, self._splash, path='splashes')
I = TypeVar('I', bound='Invite')
class Invite(Hashable):
r"""Represents a Discord :class:`Guild` or :class:`abc.GuildChannel` invite.
Depending on the way this object was created, some of the attributes can
have a value of ``None``.
.. container:: operations
.. describe:: x == y
Checks if two invites are equal.
.. describe:: x != y
Checks if two invites are not equal.
.. describe:: hash(x)
Returns the invite hash.
.. describe:: str(x)
Returns the invite URL.
The following table illustrates what methods will obtain the attributes:
+------------------------------------+------------------------------------------------------------+
| Attribute | Method |
+====================================+============================================================+
| :attr:`max_age` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`max_uses` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`created_at` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`temporary` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`uses` | :meth:`abc.GuildChannel.invites`\, :meth:`Guild.invites` |
+------------------------------------+------------------------------------------------------------+
| :attr:`approximate_member_count` | :meth:`Client.fetch_invite` with `with_counts` enabled |
+------------------------------------+------------------------------------------------------------+
| :attr:`approximate_presence_count` | :meth:`Client.fetch_invite` with `with_counts` enabled |
+------------------------------------+------------------------------------------------------------+
| :attr:`expires_at` | :meth:`Client.fetch_invite` with `with_expiration` enabled |
+------------------------------------+------------------------------------------------------------+
If it's not in the table above then it is available by all methods.
Attributes
-----------
max_age: :class:`int`
How long before the invite expires in seconds.
A value of ``0`` indicates that it doesn't expire.
code: :class:`str`
The URL fragment used for the invite.
guild: Optional[Union[:class:`Guild`, :class:`Object`, :class:`PartialInviteGuild`]]
The guild the invite is for. Can be ``None`` if it's from a group direct message.
revoked: :class:`bool`
Indicates if the invite has been revoked.
created_at: :class:`datetime.datetime`
An aware UTC datetime object denoting the time the invite was created.
temporary: :class:`bool`
Indicates that the invite grants temporary membership.
If ``True``, members who joined via this invite will be kicked upon disconnect.
uses: :class:`int`
How many times the invite has been used.
max_uses: :class:`int`
How many times the invite can be used.
A value of ``0`` indicates that it has unlimited uses.
inviter: Optional[:class:`User`]
The user who created the invite.
approximate_member_count: Optional[:class:`int`]
The approximate number of members in the guild.
approximate_presence_count: Optional[:class:`int`]
The approximate number of members currently active in the guild.
This includes idle, dnd, online, and invisible members. Offline members are excluded.
expires_at: Optional[:class:`datetime.datetime`]
The expiration date of the invite. If the value is ``None`` when received through
`Client.fetch_invite` with `with_expiration` enabled, the invite will never expire.
.. versionadded:: 2.0
channel: Union[:class:`abc.GuildChannel`, :class:`Object`, :class:`PartialInviteChannel`]
The channel the invite is for.
target_type: :class:`InviteTarget`
The type of target for the voice channel invite.
.. versionadded:: 2.0
target_user: Optional[:class:`User`]
The user whose stream to display for this invite, if any.
.. versionadded:: 2.0
target_application: Optional[:class:`PartialAppInfo`]
The embedded application the invite targets, if any.
.. versionadded:: 2.0
"""
__slots__ = (
'max_age',
'code',
'guild',
'revoked',
'created_at',
'uses',
'temporary',
'max_uses',
'inviter',
'channel',
'target_user',
'target_type',
'_state',
'approximate_member_count',
'approximate_presence_count',
'target_application',
'expires_at',
)
BASE = 'https://discord.gg'
def __init__(
self,
*,
state: ConnectionState,
data: InvitePayload,
guild: Optional[Union[PartialInviteGuild, Guild]] = None,
channel: Optional[Union[PartialInviteChannel, GuildChannel]] = None,
):
self._state: ConnectionState = state
self.max_age: Optional[int] = data.get('max_age')
self.code: str = data['code']
self.guild: Optional[InviteGuildType] = self._resolve_guild(data.get('guild'), guild)
self.revoked: Optional[bool] = data.get('revoked')
self.created_at: Optional[datetime.datetime] = parse_time(data.get('created_at'))
self.temporary: Optional[bool] = data.get('temporary')
self.uses: Optional[int] = data.get('uses')
self.max_uses: Optional[int] = data.get('max_uses')
self.approximate_presence_count: Optional[int] = data.get('approximate_presence_count')
self.approximate_member_count: Optional[int] = data.get('approximate_member_count')
expires_at = data.get('expires_at', None)
self.expires_at: Optional[datetime.datetime] = parse_time(expires_at) if expires_at else None
inviter_data = data.get('inviter')
self.inviter: Optional[User] = None if inviter_data is None else self._state.create_user(inviter_data)
self.channel: Optional[InviteChannelType] = self._resolve_channel(data.get('channel'), channel)
target_user_data = data.get('target_user')
self.target_user: Optional[User] = None if target_user_data is None else self._state.create_user(target_user_data)
self.target_type: InviteTarget = try_enum(InviteTarget, data.get("target_type", 0))
application = data.get('target_application')
self.target_application: Optional[PartialAppInfo] = (
PartialAppInfo(data=application, state=state) if application else None
)
@classmethod
def from_incomplete(cls: Type[I], *, state: ConnectionState, data: InvitePayload) -> I:
guild: Optional[Union[Guild, PartialInviteGuild]]
try:
guild_data = data['guild']
except KeyError:
# If we're here, then this is a group DM
guild = None
else:
guild_id = int(guild_data['id'])
guild = state._get_guild(guild_id)
if guild is None:
# If it's not cached, then it has to be a partial guild
guild = PartialInviteGuild(state, guild_data, guild_id)
# As far as I know, invites always need a channel
# So this should never raise.
channel: Union[PartialInviteChannel, GuildChannel] = PartialInviteChannel(data['channel'])
if guild is not None and not isinstance(guild, PartialInviteGuild):
# Upgrade the partial data if applicable
channel = guild.get_channel(channel.id) or channel
return cls(state=state, data=data, guild=guild, channel=channel)
@classmethod
def from_gateway(cls: Type[I], *, state: ConnectionState, data: GatewayInvitePayload) -> I:
guild_id: Optional[int] = _get_as_snowflake(data, 'guild_id')
guild: Optional[Union[Guild, Object]] = state._get_guild(guild_id)
channel_id = int(data['channel_id'])
if guild is not None:
channel = guild.get_channel(channel_id) or Object(id=channel_id) # type: ignore
else:
guild = Object(id=guild_id) if guild_id is not None else None
channel = Object(id=channel_id)
return cls(state=state, data=data, guild=guild, channel=channel) # type: ignore
def _resolve_guild(
self,
data: Optional[InviteGuildPayload],
guild: Optional[Union[Guild, PartialInviteGuild]] = None,
) -> Optional[InviteGuildType]:
if guild is not None:
return guild
if data is None:
return None
guild_id = int(data['id'])
return PartialInviteGuild(self._state, data, guild_id)
def _resolve_channel(
self,
data: Optional[InviteChannelPayload],
channel: Optional[Union[PartialInviteChannel, GuildChannel]] = None,
) -> Optional[InviteChannelType]:
if channel is not None:
return channel
if data is None:
return None
return PartialInviteChannel(data)
def __str__(self) -> str:
return self.url
def __repr__(self) -> str:
return (
f'<Invite code={self.code!r} guild={self.guild!r} '
f'online={self.approximate_presence_count} '
f'members={self.approximate_member_count}>'
)
def __hash__(self) -> int:
return hash(self.code)
@property
def id(self) -> str:
""":class:`str`: Returns the proper code portion of the invite."""
return self.code
@property
def url(self) -> str:
""":class:`str`: A property that retrieves the invite URL."""
return self.BASE + '/' + self.code
async def delete(self, *, reason: Optional[str] = None):
"""|coro|
Revokes the instant invite.
You must have the :attr:`~Permissions.manage_channels` permission to do this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this invite. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to revoke invites.
NotFound
The invite is invalid or expired.
HTTPException
Revoking the invite failed.
"""
await self._state.http.delete_invite(self.code, reason=reason)
|
|
#!/usr/bin/env python
"""
_replicator_test_
Tests for the cloudant.replicator module
"""
import unittest
import mock
import requests
from cloudant.errors import CloudantException
from cloudant.replicator import ReplicatorDatabase
from cloudant.document import Document
class ReplicatorDatabaseTests(unittest.TestCase):
"""
tests for ReplicatorDatabase class
"""
def setUp(self):
"""
mock out requests.Session
"""
self.patcher = mock.patch.object(requests, "Session")
self.mock_session = self.patcher.start()
self.mock_instance = mock.Mock()
self.mock_instance.auth = None
self.mock_instance.headers = {}
self.mock_instance.cookies = {'AuthSession': 'COOKIE'}
self.mock_instance.get = mock.Mock()
self.mock_instance.post = mock.Mock()
self.mock_instance.delete = mock.Mock()
self.mock_instance.put = mock.Mock()
self.mock_session.return_value = self.mock_instance
self.username = "steve"
self.password = "abc123"
def tearDown(self):
self.patcher.stop()
def test_create_replication(self):
"""test create_replication method"""
with mock.patch('cloudant.database.CloudantDatabase.create_document') as mock_create:
mock_account = mock.Mock()
mock_account.session = mock.Mock()
mock_account.session.return_value = {
"userCtx": "user Context"
}
mock_target = mock.Mock()
mock_target.database_url = "http://bob.cloudant.com/target"
mock_target.creds = {'basic_auth': "target_auth"}
mock_source = mock.Mock()
mock_source.database_url = "http://bob.cloudant.com/source"
mock_source.creds = {'basic_auth': "source_auth"}
repl = ReplicatorDatabase(mock_account)
repl.create_replication(
mock_source, mock_target, "REPLID"
)
self.failUnless(mock_create.called)
repl_doc = mock_create.call_args[0][0]
self.failUnless('source' in repl_doc)
self.failUnless('target' in repl_doc)
self.assertEqual(repl_doc['_id'], 'REPLID')
self.assertEqual(
repl_doc['source']['url'],
'http://bob.cloudant.com/source'
)
self.assertEqual(
repl_doc['target']['url'],
'http://bob.cloudant.com/target'
)
self.assertEqual(
repl_doc['target']['headers']['Authorization'],
'target_auth'
)
self.assertEqual(
repl_doc['source']['headers']['Authorization'],
'source_auth'
)
def test_create_replication_errors(self):
"""check expected error conditions"""
mock_account = mock.Mock()
mock_account.session = mock.Mock()
mock_account.session.return_value = {
"userCtx": "user Context"
}
mock_target = mock.Mock()
mock_target.database_url = "http://bob.cloudant.com/target"
mock_target.creds = {'basic_auth': "target_auth"}
mock_source = mock.Mock()
mock_source.database_url = "http://bob.cloudant.com/source"
mock_source.creds = {'basic_auth': "source_auth"}
repl = ReplicatorDatabase(mock_account)
self.assertRaises(
CloudantException,
repl.create_replication,
target=mock_target,
repl_id="REPLID"
)
self.assertRaises(
CloudantException,
repl.create_replication,
source=mock_source,
repl_id="REPLID"
)
def test_list_replications(self):
with mock.patch('cloudant.database.CloudantDatabase.all_docs') as mock_all_docs:
mock_all_docs.return_value = {
"rows": [
{"doc":"replication_1"},
{"doc": "replication_2"}
]
}
mock_account = mock.Mock()
repl = ReplicatorDatabase(mock_account)
self.assertEqual(
repl.list_replications(),
['replication_1', 'replication_2']
)
def test_replication_state(self):
"""test replication state method"""
mock_account = mock.Mock()
repl = ReplicatorDatabase(mock_account)
mock_doc = mock.Mock()
mock_doc.fetch = mock.Mock()
mock_doc.get = mock.Mock()
mock_doc.get.return_value = "STATE"
repl['replication_1'] = mock_doc
self.assertEqual(repl.replication_state('replication_1'), 'STATE')
with mock.patch('cloudant.replicator.ReplicatorDatabase.__getitem__') as mock_gi:
mock_gi.side_effect = KeyError("womp")
self.assertRaises(
CloudantException,
repl.replication_state,
'replication_2'
)
def test_stop_replication(self):
"""test stop_replication call"""
mock_account = mock.Mock()
repl = ReplicatorDatabase(mock_account)
mock_doc = mock.Mock()
mock_doc.fetch = mock.Mock()
mock_doc.delete = mock.Mock()
repl['replication_1'] = mock_doc
repl.stop_replication('replication_1')
self.failUnless(mock_doc.fetch.called)
self.failUnless(mock_doc.delete.called)
with mock.patch('cloudant.replicator.ReplicatorDatabase.__getitem__') as mock_gi:
mock_gi.side_effect = KeyError("womp")
self.assertRaises(
CloudantException,
repl.stop_replication,
'replication_2'
)
def test_follow_replication(self):
"""test follow replication feature"""
with mock.patch('cloudant.replicator.ReplicatorDatabase.changes') as mock_changes:
mock_changes.return_value = [
{"id": "not_this replication"},
{"id": "not_this replication"},
{"id": "replication_1", "_replication_state": "not finished"},
{"id": "replication_1", "_replication_state": "completed"},
]
mock_account = mock.Mock()
repl = ReplicatorDatabase(mock_account)
mock_doc = mock.Mock()
mock_doc.fetch = mock.Mock()
mock_doc.get = mock.Mock()
mock_doc.get.side_effect = ['triggered', 'triggered', 'triggered', 'completed']
repl['replication_1'] = mock_doc
for x, i in enumerate(repl.follow_replication('replication_1')):
pass
# expect 4 iterations
self.assertEqual(x, 3)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python2.7
import heapq
import re
import tempfile
import importer
def _try_as_float(s):
if not s or s[0] not in '0123456789.-':
# optimization
return s
try:
return float(s)
except ValueError:
return s
def _clean_start_spaces(_line):
while _line and (_line[0] == ' ' or _line[0] == '\t'):
_line = _line[1:]
return _line
def _get_first_value(line):
line = _clean_start_spaces(line)
if not line:
return None, None
if line[0] == '\'' or line[0] == '\"':
last = 1
while True:
pos = line.find(line[0], last)
if pos < 0:
raise ValueError('Can\'t split')
if pos == 1:
head, separator, tail = line[2:].partition(',')
return '', _clean_start_spaces(tail)
elif line[pos - 1] == '\\':
last = pos + 1
continue
elif pos + 1 < len(line) and line[pos + 1] == line[0]:
last = pos + 2
continue
else:
value = line[1:pos]
head, separator, tail = line[pos + 1:].partition(',')
return value, _clean_start_spaces(tail)
# finding closing quote
else:
head, separator, tail = line.partition(',')
if not tail and not separator:
return head, None
else:
return head, _clean_start_spaces(tail)
# def _get_first_value
def _lines_compare(l1, l2):
p1, tail1 = _get_first_value(l1)
p2, tail2 = _get_first_value(l2)
result = cmp(_try_as_float(p1), _try_as_float(p2))
if not result and tail1 and tail2:
return _lines_compare(tail1, tail2)
return result
def key(line):
head, tail = _get_first_value(line)
if tail:
return _try_as_float(head), _try_as_float(_get_first_value(tail)[0])
else:
return _try_as_float(head), None
class _Dumper:
def __init__(self):
self._output = None
self._buf = []
def flush(self):
if not self._output:
raise ValueError("Output is no opened during flush")
self._output.writelines(self._buf)
self._output.flush()
self._buf = []
def new_output(self, path):
if self._output:
self.flush()
self._output.close()
self._output = file(path, 'w')
def pop_last_lines(self, lines=1):
accum = []
if lines > 0:
while lines > 0 and len(self._buf) > 0:
accum.append(self._buf.pop())
lines -= 1
accum.reverse()
return accum
def add_lines(self, lines):
self._buf.extend(lines)
def append(self, line):
self._buf.append(line)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._output:
self.flush()
self._output.close()
self._output = None
# class _Dumper
class _DataHandler:
def __init__(self, max_chunk_size, start_line, table_name, counter):
self._max_chunk_size = max_chunk_size
self._start_line = start_line
self._table_name = table_name
self._counter = counter
self._buf = []
self._buf_size = 0
self._chunks = []
self._flushed = False
def add_line(self, line):
line += '\n'
if self._flushed:
raise ValueError("Can't add table data, table interaction already flushed")
self._buf.append(line)
self._buf_size += len(line)
if self._buf_size > self._max_chunk_size:
importer.verbose("Splitting %s temporary data, %d-th part, %d lines, size %d" %
(self._table_name, len(self._chunks) + 1, len(self._buf), self._buf_size))
self._buf.sort(cmp=_lines_compare)
chunk = tempfile.TemporaryFile()
chunk.writelines(self._buf)
chunk.seek(0)
self._chunks.append(chunk)
self._buf_size = 0
self._buf = []
# add_line
def flush_data(self, dumper):
if self._flushed:
raise ValueError("Can't add table data, table interaction already flushed")
importer.verbose("Storing %s data, %d lines in memory, size %d" %
(self._table_name, len(self._buf), self._buf_size))
self._buf.sort(cmp=_lines_compare)
# write file
_end_chunk = False
_end_insert = True
sequence = 1
output_size = 0
insert_size = 0
memory_chunk = ((key(line), line) for line in self._buf)
temp_chunks = (((key(line), line) for line in chunk)
for chunk in self._chunks)
for _key, _line in heapq.merge(memory_chunk, *temp_chunks):
if _end_chunk:
dumper.new_output('{counter}_{table_name}_{sequence}.sql'.format(
counter=self._counter, table_name=self._table_name,
sequence=importer.str_in_base(sequence, min_with=4)))
output_size = 0
if _end_chunk or _end_insert:
dumper.append(self._start_line)
output_size += len(self._start_line)
insert_size = len(self._start_line)
# reset output chunk
_end_chunk = False
_end_insert = False
if output_size + len(_line) + 4 >= self._max_chunk_size:
_end_chunk = True
if insert_size + len(_line) + 4 >= 5000:
_end_insert = True
dumper.append('(' + _line[:-1] + ')' + (';' if _end_chunk or _end_insert else ',') + '\n')
output_size += len(_line) + 4
insert_size += len(_line) + 4
if _end_chunk or _end_insert:
dumper.flush()
if _end_chunk:
sequence += 1
# for _key _ine in sorted data
last_lines = dumper.pop_last_lines(1)
if len(last_lines) > 0:
dumper.append(last_lines[0][:-2] + ";\n")
dumper.flush()
for chunk in self._chunks:
chunk.close()
self._chunks = []
self._buf_size = 0
self._buf = []
self._flushed = True
# flush_data
# class _DataHandler
TABLE_STRUCTURE_RE = re.compile(r'^-- Table structure for table `(?P<table>.*?)`')
INSERT_INTO_RE = re.compile(r'^(?P<insert_into>INSERT INTO .* VALUES) \((?P<data>.*?)\);$')
def __do_split(args, sql_dump_file, order):
with _Dumper() as dumper:
counter = 0
previous_table = None
dumper.new_output('0000_prologue.sql')
table_name = None
epilogue = False
data_handler = None
for line in sql_dump_file:
if epilogue:
dumper.append(line)
if TABLE_STRUCTURE_RE.match(line):
previous_table = table_name
table_name = TABLE_STRUCTURE_RE.match(line).groupdict()['table']
counter = importer.get_order_number(order, table_name, previous_table)
backup = dumper.pop_last_lines(2)
dumper.flush()
dumper.new_output('{counter}_{table_name}.sql'.format(
counter=counter, table_name=table_name))
dumper.add_lines(backup)
dumper.append(line)
elif INSERT_INTO_RE.match(line):
re_dict = INSERT_INTO_RE.match(line).groupdict()
start_line = re_dict['insert_into'] + '\n'
data = re_dict['data']
if not data_handler:
data_handler = _DataHandler(args.chunk_size, start_line, table_name, counter)
data_handler.add_line(data)
elif data_handler and line == '\n':
pass
elif data_handler:
data_handler.flush_data(dumper)
data_handler = None
else:
dumper.append(line)
#foreach line
if data_handler:
data_handler.flush_data(dumper)
data_handler = None
dumper.flush()
# def __do_split
if __name__ == '__main__':
importer.split_sql_file(importer.create_argsparser(), __do_split=__do_split)
|
|
# Copyright (c) 2016 Novo Nordisk Foundation Center for Biosustainability, DTU.
# See LICENSE for details.
import unittest
try: # noqa: C901
import gurobipy
except ImportError as e:
class TestMissingDependency(unittest.TestCase):
@unittest.skip('Missing dependency - ' + str(e))
def test_fail(self):
pass
else:
import copy
import random
import os
import nose
import pickle
from optlang.gurobi_interface import Variable, Constraint, Model, Objective
from gurobipy import GurobiError
from optlang.tests import abstract_test_cases
from optlang import gurobi_interface
random.seed(666)
TESTMODELPATH = os.path.join(os.path.dirname(__file__), 'data/model.lp')
TESTMILPMODELPATH = os.path.join(os.path.dirname(__file__), 'data/simple_milp.lp')
CONVEX_QP_PATH = os.path.join(os.path.dirname(__file__), 'data/qplib_3256.lp')
NONCONVEX_QP_PATH = os.path.join(os.path.dirname(__file__), 'data/qplib_1832.lp')
class VariableTestCase(abstract_test_cases.AbstractVariableTestCase):
interface = gurobi_interface
def test_internal_variable(self):
self.assertEqual(self.var._internal_variable, None)
def test_gurobi_change_name(self):
self.model.add(self.var)
self.model.update()
self.var.name = "test_2"
self.assertEqual(self.var._internal_variable.getAttr("VarName"), "test_2")
def test_get_primal(self):
self.assertEqual(self.var.primal, None)
model = Model(problem=gurobipy.read(TESTMODELPATH))
model.optimize()
for i, j in zip([var.primal for var in model.variables],
[0.8739215069684306, -16.023526143167608, 16.023526143167604, -14.71613956874283,
14.71613956874283, 4.959984944574658, 4.959984944574657, 4.959984944574658,
3.1162689467973905e-29, 2.926716099010601e-29, 0.0, 0.0, -6.112235045340358e-30,
-5.6659435396316186e-30, 0.0, -4.922925402711085e-29, 0.0, 9.282532599166613, 0.0,
6.00724957535033, 6.007249575350331, 6.00724957535033, -5.064375661482091,
1.7581774441067828, 0.0, 7.477381962160285, 0.0, 0.22346172933182767, 45.514009774517454,
8.39, 0.0, 6.007249575350331, 0.0, -4.541857463865631, 0.0, 5.064375661482091, 0.0, 0.0,
2.504309470368734, 0.0, 0.0, -22.809833310204958, 22.809833310204958, 7.477381962160285,
7.477381962160285, 1.1814980932459636, 1.496983757261567, -0.0, 0.0, 4.860861146496815,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.064375661482091, 0.0, 5.064375661482091, 0.0, 0.0,
1.496983757261567, 10.000000000000002, -10.0, 0.0, 0.0, 0.0, 0.0, 0.0, -29.175827135565804,
43.598985311997524, 29.175827135565804, 0.0, 0.0, 0.0, -1.2332237321082153e-29,
3.2148950476847613, 38.53460965051542, 5.064375661482091, 0.0, -1.2812714099825612e-29,
-1.1331887079263237e-29, 17.530865429786694, 0.0, 0.0, 0.0, 4.765319193197458,
-4.765319193197457, 21.79949265599876, -21.79949265599876, -3.2148950476847613, 0.0,
-2.281503094067127, 2.6784818505075303, 0.0]):
self.assertAlmostEqual(i, j)
def test_changing_variable_names_is_reflected_in_the_solver(self):
model = Model(problem=gurobipy.read(TESTMODELPATH))
for i, variable in enumerate(model.variables):
print(variable._internal_variable is not None)
print(variable.problem.name)
variable.name = "var" + str(i)
print(variable.problem.name)
print(variable.name)
print(variable._internal_variable is not None)
self.assertEqual(variable.name, "var" + str(i))
self.assertEqual(variable._internal_variable.getAttr('VarName'), "var" + str(i))
def test_gurobi_setting_bounds(self):
var = self.var
model = self.model
model.add(var)
model.update()
var.lb = 1
self.assertEqual(var.lb, 1)
model.problem.update()
self.assertEqual(var._internal_variable.getAttr('LB'), 1)
var.ub = 2
self.assertEqual(var.ub, 2)
model.problem.update()
self.assertEqual(var._internal_variable.getAttr('UB'), 2)
class ConstraintTestCase(abstract_test_cases.AbstractConstraintTestCase):
interface = gurobi_interface
def test_get_primal(self):
self.assertEqual(self.constraint.primal, None)
self.model.optimize()
print([constraint.primal for constraint in self.model.constraints])
for i, j in zip([constraint.primal for constraint in self.model.constraints],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
4.048900234729145e-15, 0.0, 0.0, 0.0, -3.55971196577979e-16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 2.5546369406238147e-17, 0.0, -5.080374405378186e-29, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]):
self.assertAlmostEqual(i, j)
class ObjectiveTestCase(abstract_test_cases.AbstractObjectiveTestCase):
interface = gurobi_interface
def setUp(self):
problem = gurobipy.read(TESTMODELPATH)
self.model = Model(problem=problem)
self.obj = self.model.objective
def test_change_direction(self):
self.obj.direction = "min"
self.assertEqual(self.obj.direction, "min")
self.assertEqual(self.model.problem.getAttr('ModelSense'), gurobipy.GRB.MAXIMIZE)
self.model.update()
self.assertEqual(self.model.problem.getAttr('ModelSense'), gurobipy.GRB.MINIMIZE)
self.obj.direction = "max"
self.assertEqual(self.obj.direction, "max")
self.assertEqual(self.model.problem.getAttr('ModelSense'), gurobipy.GRB.MINIMIZE)
self.model.update()
self.assertEqual(self.model.problem.getAttr('ModelSense'), gurobipy.GRB.MAXIMIZE)
class ConfigurationTestCase(abstract_test_cases.AbstractConfigurationTestCase):
interface = gurobi_interface
class ModelTestCase(abstract_test_cases.AbstractModelTestCase):
interface = gurobi_interface
def test_gurobi_create_empty_model(self):
model = Model()
self.assertEqual(model.problem.getAttr('NumVars'), 0)
self.assertEqual(model.problem.getAttr('NumConstrs'), 0)
self.assertEqual(model.name, None)
self.assertEqual(model.problem.getAttr('ModelName'), '')
model = Model(name="empty_problem")
self.assertEqual(model.problem.getAttr('ModelName'), 'empty_problem')
def test_pickle_ability(self):
self.model.optimize()
value = self.model.objective.value
pickle_string = pickle.dumps(self.model)
from_pickle = pickle.loads(pickle_string)
from_pickle.optimize()
self.assertAlmostEqual(value, from_pickle.objective.value)
self.assertEqual([(var.lb, var.ub, var.name, var.type) for var in from_pickle.variables.values()],
[(var.lb, var.ub, var.name, var.type) for var in self.model.variables.values()])
self.assertEqual([(constr.lb, constr.ub, constr.name) for constr in from_pickle.constraints],
[(constr.lb, constr.ub, constr.name) for constr in self.model.constraints])
def test_config_gets_copied_too(self):
self.assertEquals(self.model.configuration.verbosity, 0)
self.model.configuration.verbosity = 3
model_copy = copy.copy(self.model)
self.assertEquals(model_copy.configuration.verbosity, 3)
def test_init_from_existing_problem(self):
self.assertEqual(len(self.model.variables), len(self.model.problem.getVars()))
self.assertEqual(len(self.model.constraints), len(self.model.problem.getConstrs()))
self.assertEqual(self.model.variables.keys(),
[var.VarName for var in self.model.problem.getVars()])
self.assertEqual(self.model.constraints.keys(),
[constr.ConstrName for constr in self.model.problem.getConstrs()])
def test_gurobi_add_variable(self):
var = Variable('x')
self.model.add(var)
print(self.model._pending_modifications)
self.assertTrue(var in self.model.variables.values())
self.assertEqual(self.model.variables.values().count(var), 1)
self.assertEqual(self.model.variables['x'].problem, var.problem)
print(var.name)
print(self.model.problem.getVars())
print(self.model._pending_modifications)
self.model.update()
print(self.model._pending_modifications)
print(self.model.problem.getVars())
self.assertEqual(self.model.problem.getVarByName(var.name).getAttr('VType'), gurobipy.GRB.CONTINUOUS)
var = Variable('y', lb=-13)
self.model.add(var)
self.assertTrue(var in self.model.variables.values())
self.model.problem.update()
self.assertEqual(self.model.problem.getVarByName(var.name).getAttr('VType'), gurobipy.GRB.CONTINUOUS)
self.assertEqual(self.model.variables['x'].lb, None)
self.assertEqual(self.model.variables['x'].ub, None)
self.assertEqual(self.model.variables['y'].lb, -13)
self.assertEqual(self.model.variables['x'].ub, None)
var = Variable('x_with_ridiculously_long_variable_name_asdffffffffasdfasdfasdfasdfasdfasdfasdf')
self.model.add(var)
self.assertTrue(var in self.model.variables.values())
self.assertEqual(self.model.variables.values().count(var), 1)
def test_gurobi_add_integer_var(self):
var = Variable('int_var', lb=-13, ub=500, type='integer')
self.model.add(var)
self.assertEqual(self.model.variables['int_var'].type, 'integer')
self.assertEqual(self.model.problem.getVarByName(var.name).getAttr('VType'), gurobipy.GRB.INTEGER)
self.assertEqual(self.model.variables['int_var'].ub, 500)
self.assertEqual(self.model.variables['int_var'].lb, -13)
def test_add_non_cplex_conform_variable(self):
var = Variable('12x!!@#5_3', lb=-666, ub=666)
self.model.add(var)
self.assertTrue(var in self.model.variables.values())
self.model.problem.update()
self.assertEqual(var.name, self.model.problem.getVarByName(var.name).VarName)
self.assertEqual(self.model.variables['12x!!@#5_3'].lb, -666)
self.assertEqual(self.model.variables['12x!!@#5_3'].ub, 666)
repickled = pickle.loads(pickle.dumps(self.model))
var_from_pickle = repickled.variables['12x!!@#5_3']
self.assertEqual(var_from_pickle.name, repickled.problem.getVarByName(var.name).VarName)
def test_gurobi_add_constraints(self):
x = Variable('x', lb=0, ub=1, type='binary')
y = Variable('y', lb=-181133.3, ub=12000., type='continuous')
z = Variable('z', lb=0., ub=10., type='integer')
constr1 = Constraint(0.3 * x + 0.4 * y + 66. * z, lb=-100, ub=0., name='test')
constr2 = Constraint(2.333 * x + y + 3.333, ub=100.33, name='test2')
constr3 = Constraint(2.333 * x + y + z, lb=-300)
constr4 = Constraint(x, lb=-300, ub=-300)
self.model.add(constr1)
self.model.add(constr2)
self.model.add(constr3)
self.model.add(constr4)
self.model.problem.update()
self.assertIn(constr1.name, self.model.constraints)
self.assertIn(constr2.name, self.model.constraints)
self.assertIn(constr3.name, self.model.constraints)
self.assertIn(constr4.name, self.model.constraints)
# constr1
coeff_dict = dict()
internal_constraint = self.model.problem.getConstrByName(constr1.name)
row = self.model.problem.getRow(internal_constraint)
for i in range(row.size()):
coeff_dict[row.getVar(i).VarName] = row.getCoeff(i)
self.assertDictEqual(coeff_dict, {'x': 0.3, 'y': 0.4, 'z': 66., 'test_aux': -1.0})
self.assertEqual(internal_constraint.RHS, constr1.lb)
self.assertEqual(self.model.problem.getVarByName(internal_constraint.getAttr('ConstrName') + '_aux'), 100)
# constr2
coeff_dict = dict()
internal_constraint = self.model.problem.getConstrByName(constr2.name)
row = self.model.problem.getRow(internal_constraint)
for i in range(row.size()):
coeff_dict[row.getVar(i).VarName] = row.getCoeff(i)
self.assertDictEqual(coeff_dict, {'x': 2.333, 'y': 1.})
self.assertEqual(internal_constraint.RHS, constr2.ub)
self.assertEqual(internal_constraint.Sense, '<')
# constr3
coeff_dict = dict()
internal_constraint = self.model.problem.getConstrByName(constr3.name)
print(internal_constraint)
row = self.model.problem.getRow(internal_constraint)
for i in range(row.size()):
coeff_dict[row.getVar(i).VarName] = row.getCoeff(i)
self.assertDictEqual(coeff_dict, {'x': 2.333, 'y': 1., 'z': 1.})
self.assertEqual(internal_constraint.RHS, constr3.lb)
self.assertEqual(internal_constraint.Sense, '>')
# constr4
coeff_dict = dict()
internal_constraint = self.model.problem.getConstrByName(constr4.name)
print(internal_constraint)
row = self.model.problem.getRow(internal_constraint)
for i in range(row.size()):
coeff_dict[row.getVar(i).VarName] = row.getCoeff(i)
self.assertDictEqual(coeff_dict, {'x': 1})
self.assertEqual(internal_constraint.RHS, constr4.lb)
self.assertEqual(internal_constraint.Sense, '=')
def test_change_of_constraint_is_reflected_in_low_level_solver(self):
x = Variable('x', lb=-83.3, ub=1324422.)
y = Variable('y', lb=-181133.3, ub=12000.)
constraint = Constraint(0.3 * x + 0.4 * y, lb=-100, name='test')
self.assertEqual(constraint._internal_constraint, None)
self.model.add(constraint)
self.assertEqual(self.model.constraints['test'].lb, -100)
self.assertEqual(
(self.model.constraints['test'].expression - (0.4 * y + 0.3 * x)).expand() - 0,
0
)
z = Variable('z', lb=3, ub=10, type='integer')
self.assertEqual(z._internal_variable, None)
constraint += 77. * z
self.assertEqual(z._internal_variable, self.model.problem.getVarByName('z'))
self.assertEqual(self.model.constraints['test'].lb, -100)
self.assertEqual(
(self.model.constraints['test'].expression - (0.4 * y + 0.3 * x + 77.0 * z)).expand() - 0,
0
)
def test_constraint_set_problem_to_None_caches_the_latest_expression_from_solver_instance(self):
x = Variable('x', lb=-83.3, ub=1324422.)
y = Variable('y', lb=-181133.3, ub=12000.)
constraint = Constraint(0.3 * x + 0.4 * y, lb=-100, name='test')
self.model.add(constraint)
z = Variable('z', lb=2, ub=5, type='integer')
constraint += 77. * z
self.model.remove(constraint)
self.assertEqual(constraint.lb, -100)
self.assertEqual(
(constraint.expression - (0.4 * y + 0.3 * x + 77.0 * z)).expand() - 0, 0
)
def test_change_of_objective_is_reflected_in_low_level_solver(self):
x = Variable('x', lb=-83.3, ub=1324422.)
y = Variable('y', lb=-181133.3, ub=12000.)
objective = Objective(0.3 * x + 0.4 * y, name='test', direction='max')
self.model.objective = objective
self.model.update()
grb_obj = self.model.problem.getObjective()
grb_x = self.model.problem.getVarByName(x.name)
grb_y = self.model.problem.getVarByName(y.name)
expected = {grb_x: 0.3, grb_y: 0.4}
for i in range(grb_obj.size()):
self.assertEqual(grb_obj.getCoeff(i), expected[grb_obj.getVar(i)])
z = Variable('z', lb=4, ub=4, type='integer')
grb_z = self.model.problem.getVarByName(z.name)
self.model.objective += 77. * z
expected[grb_z] = 77.
self.model.update()
for i in range(grb_obj.size()):
self.assertEqual(grb_obj.getCoeff(i), expected[grb_obj.getVar(i)])
def test_change_variable_bounds(self):
inner_prob = self.model.problem
inner_problem_bounds = [(variable.LB, variable.UB) for variable in inner_prob.getVars()]
bounds = [(var.lb, var.ub) for var in self.model.variables.values()]
self.assertEqual(bounds, inner_problem_bounds)
for var in self.model.variables.values():
var.lb = random.uniform(-1000, 1000)
var.ub = random.uniform(var.lb, 1000)
self.model.update()
inner_problem_bounds_new = [(variable.LB, variable.UB) for variable in inner_prob.getVars()]
bounds_new = [(var.lb, var.ub) for var in self.model.variables.values()]
self.assertNotEqual(bounds, bounds_new)
self.assertNotEqual(inner_problem_bounds, inner_problem_bounds_new)
self.assertEqual(bounds_new, inner_problem_bounds_new)
def test_gurobi_change_variable_type(self):
for variable in self.model.variables:
variable.type = 'integer'
self.model.update()
for variable in self.model.problem.getVars():
self.assertEqual(variable.VType, gurobipy.GRB.INTEGER)
def test_change_constraint_bounds(self):
inner_prob = self.model.problem
inner_problem_bounds = []
for constr in inner_prob.getConstrs():
aux_var = inner_prob.getVarByName(constr.getAttr('ConstrName') + '_aux')
if aux_var is None:
inner_problem_bounds.append((constr.RHS, constr.RHS))
else:
inner_problem_bounds.append((aux_var.UB, constr.RHS))
print(len(self.model.constraints))
print(len(self.model.problem.getConstrs()))
bounds = [(constr.lb, constr.ub) for constr in self.model.constraints]
print('bounds', inner_problem_bounds)
print('bounds', bounds)
self.assertEqual(bounds, inner_problem_bounds)
@unittest.skip('Not supported yet')
def test_iadd_objective(self):
v2, v3 = self.model.variables.values()[1:3]
print(v2, v3)
# 1/0
self.model.objective += 2. * v2 - 3. * v3
internal_objective = self.model.problem.getObjective()
result = {}
for i in range(internal_objective.size()):
var = internal_objective.getVar(i)
coeff = internal_objective.getCoeff(i)
result[var.VarName] = coeff
self.assertDictEqual(result, {'R_Biomass_Ecoli_core_w_GAM': 1.0})
self.model.update()
self.assertDictEqual(result, {'R_Biomass_Ecoli_core_w_GAM': 1.0, 'R_PGK': 2, 'R_GAPD': -3})
@unittest.skip('Not supported yet')
def test_imul_objective(self):
self.model.objective *= 2.
obj_coeff = list()
self.assertEqual(obj_coeff,
[0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0,
0.0]
)
def test_set_copied_objective(self):
obj_copy = copy.copy(self.model.objective)
self.model.objective = obj_copy
self.assertEqual(self.model.objective.direction, "max")
self.assertEqual(
(self.model.objective.expression - (1.0 * self.model.variables.R_Biomass_Ecoli_core_w_GAM)).expand() - 0,
0
)
def test_timeout(self):
self.model.configuration.timeout = 0
status = self.model.optimize()
print(status)
self.assertEqual(status, 'time_limit')
def test_set_linear_coefficients_objective(self):
self.model.objective.set_linear_coefficients({self.model.variables.R_TPI: 666.})
self.model.update()
grb_obj = self.model.problem.getObjective()
for i in range(grb_obj.size()):
if 'R_TPI' == grb_obj.getVar(i).getAttr('VarName'):
self.assertEqual(grb_obj.getCoeff(i), 666.)
def test_set_linear_coefficients_constraint(self):
constraint = self.model.constraints.M_atp_c
constraint.set_linear_coefficients({self.model.variables.R_Biomass_Ecoli_core_w_GAM: 666.})
self.model.update()
row = self.model.problem.getRow(self.model.problem.getConstrByName(constraint.name))
for i in range(row.size()):
col_name = row.getVar(i).getAttr('VarName')
if col_name == 'R_Biomass_Ecoli_core_w_GAM':
self.assertEqual(row.getCoeff(i), 666.)
class QuadraticProgrammingTestCase(abstract_test_cases.AbstractQuadraticProgrammingTestCase):
def setUp(self):
self.model = Model()
self.x1 = Variable("x1", lb=0)
self.x2 = Variable("x2", lb=0)
self.c1 = Constraint(self.x1 + self.x2, lb=1)
self.model.add([self.x1, self.x2, self.c1])
def test_convex_obj(self):
model = self.model
obj = Objective(self.x1 ** 2 + self.x2 ** 2, direction="min")
model.objective = obj
model.optimize()
self.assertAlmostEqual(model.objective.value, 0.5)
self.assertAlmostEqual(self.x1.primal, 0.5)
self.assertAlmostEqual(self.x2.primal, 0.5)
obj_2 = Objective(self.x1, direction="min")
model.objective = obj_2
model.optimize()
self.assertAlmostEqual(model.objective.value, 0.0)
self.assertAlmostEqual(self.x1.primal, 0.0)
self.assertGreaterEqual(self.x2.primal, 1.0)
# According to documentation and mailing lists Gurobi cannot solve non-convex QP
# However version 7.0 solves this fine. Skipping for now
@unittest.skip("Can gurobi solve non-convex QP?")
def test_non_convex_obj(self):
model = self.model
obj = Objective(self.x1 * self.x2, direction="min")
model.objective = obj
self.assertRaises(GurobiError, model.optimize)
obj_2 = Objective(self.x1, direction="min")
model.objective = obj_2
model.optimize()
self.assertAlmostEqual(model.objective.value, 0.0)
self.assertAlmostEqual(self.x1.primal, 0.0)
self.assertGreaterEqual(self.x2.primal, 1.0)
def test_qp_convex(self):
model = Model(problem=gurobipy.read(CONVEX_QP_PATH))
self.assertEqual(len(model.variables), 651)
self.assertEqual(len(model.constraints), 501)
for constraint in model.constraints:
self.assertTrue(constraint.is_Linear, "%s should be linear" % (str(constraint.expression)))
self.assertFalse(constraint.is_Quadratic, "%s should not be quadratic" % (str(constraint.expression)))
self.assertTrue(model.objective.is_Quadratic, "objective should be quadratic")
self.assertFalse(model.objective.is_Linear, "objective should not be linear")
model.optimize()
self.assertAlmostEqual(model.objective.value, 32.2291282)
@unittest.skip("Takes a very long time")
def test_qp_non_convex(self):
model = Model(problem=gurobipy.read(NONCONVEX_QP_PATH))
self.assertEqual(len(model.variables), 31)
self.assertEqual(len(model.constraints), 1)
for constraint in model.constraints:
self.assertTrue(constraint.is_Linear, "%s should be linear" % (str(constraint.expression)))
self.assertFalse(constraint.is_Quadratic, "%s should not be quadratic" % (str(constraint.expression)))
self.assertTrue(model.objective.is_Quadratic, "objective should be quadratic")
self.assertFalse(model.objective.is_Linear, "objective should not be linear")
self.assertRaises(GurobiError, model.optimize)
def test_quadratic_objective_expression(self):
objective = Objective(self.x1 ** 2 + self.x2 ** 2, direction="min")
self.model.objective = objective
self.assertEqual((self.model.objective.expression - (self.x1 ** 2 + self.x2 ** 2)).simplify(), 0)
if __name__ == '__main__':
nose.runmodule()
|
|
# encoding: utf-8
"""
Base classes and other objects used by enumerations
"""
from __future__ import absolute_import, print_function
import sys
import textwrap
def alias(*aliases):
"""
Decorating a class with @alias('FOO', 'BAR', ..) allows the class to
be referenced by each of the names provided as arguments.
"""
def decorator(cls):
# alias must be set in globals from caller's frame
caller = sys._getframe(1)
globals_dict = caller.f_globals
for alias in aliases:
globals_dict[alias] = cls
return cls
return decorator
class _DocsPageFormatter(object):
"""
Formats a RestructuredText documention page (string) for the enumeration
class parts passed to the constructor. An immutable one-shot service
object.
"""
def __init__(self, clsname, clsdict):
self._clsname = clsname
self._clsdict = clsdict
@property
def page_str(self):
"""
The RestructuredText documentation page for the enumeration. This is
the only API member for the class.
"""
tmpl = '.. _%s:\n\n%s\n\n%s\n\n----\n\n%s'
components = (
self._ms_name, self._page_title, self._intro_text,
self._member_defs
)
return tmpl % components
@property
def _intro_text(self):
"""
The docstring of the enumeration, formatted for use at the top of the
documentation page
"""
try:
cls_docstring = self._clsdict['__doc__']
except KeyError:
cls_docstring = ''
return textwrap.dedent(cls_docstring).strip()
def _member_def(self, member):
"""
Return an individual member definition formatted as an RST glossary
entry, wrapped to fit within 78 columns.
"""
member_docstring = textwrap.dedent(member.docstring).strip()
member_docstring = textwrap.fill(
member_docstring, width=78, initial_indent=' '*4,
subsequent_indent=' '*4
)
return '%s\n%s\n' % (member.name, member_docstring)
@property
def _member_defs(self):
"""
A single string containing the aggregated member definitions section
of the documentation page
"""
members = self._clsdict['__members__']
member_defs = [
self._member_def(member) for member in members
if member.name is not None
]
return '\n'.join(member_defs)
@property
def _ms_name(self):
"""
The Microsoft API name for this enumeration
"""
return self._clsdict['__ms_name__']
@property
def _page_title(self):
"""
The title for the documentation page, formatted as code (surrounded
in double-backtics) and underlined with '=' characters
"""
title_underscore = '=' * (len(self._clsname)+4)
return '``%s``\n%s' % (self._clsname, title_underscore)
class MetaEnumeration(type):
"""
The metaclass for Enumeration and its subclasses. Adds a name for each
named member and compiles state needed by the enumeration class to
respond to other attribute gets
"""
def __new__(meta, clsname, bases, clsdict):
meta._add_enum_members(clsdict)
meta._collect_valid_settings(clsdict)
meta._generate_docs_page(clsname, clsdict)
return type.__new__(meta, clsname, bases, clsdict)
@classmethod
def _add_enum_members(meta, clsdict):
"""
Dispatch ``.add_to_enum()`` call to each member so it can do its
thing to properly add itself to the enumeration class. This
delegation allows member sub-classes to add specialized behaviors.
"""
enum_members = clsdict['__members__']
for member in enum_members:
member.add_to_enum(clsdict)
@classmethod
def _collect_valid_settings(meta, clsdict):
"""
Return a sequence containing the enumeration values that are valid
assignment values. Return-only values are excluded.
"""
enum_members = clsdict['__members__']
valid_settings = []
for member in enum_members:
valid_settings.extend(member.valid_settings)
clsdict['_valid_settings'] = valid_settings
@classmethod
def _generate_docs_page(meta, clsname, clsdict):
"""
Return the RST documentation page for the enumeration.
"""
clsdict['__docs_rst__'] = (
_DocsPageFormatter(clsname, clsdict).page_str
)
class EnumerationBase(object):
"""
Base class for all enumerations, used directly for enumerations requiring
only basic behavior. It's __dict__ is used below in the Python 2+3
compatible metaclass definition.
"""
__members__ = ()
__ms_name__ = ''
@classmethod
def validate(cls, value):
"""
Raise |ValueError| if *value* is not an assignable value.
"""
if value not in cls._valid_settings:
raise ValueError(
"%s not a member of %s enumeration" % (value, cls.__name__)
)
Enumeration = MetaEnumeration(
'Enumeration', (object,), dict(EnumerationBase.__dict__)
)
class XmlEnumeration(Enumeration):
"""
Provides ``to_xml()`` and ``from_xml()`` methods in addition to base
enumeration features
"""
__members__ = ()
__ms_name__ = ''
@classmethod
def from_xml(cls, xml_val):
"""
Return the enumeration member corresponding to the XML value
*xml_val*.
"""
return cls._xml_to_member[xml_val]
@classmethod
def to_xml(cls, enum_val):
"""
Return the XML value of the enumeration value *enum_val*.
"""
cls.validate(enum_val)
return cls._member_to_xml[enum_val]
class EnumMember(object):
"""
Used in the enumeration class definition to define a member value and its
mappings
"""
def __init__(self, name, value, docstring):
self._name = name
if isinstance(value, int):
value = EnumValue(name, value, docstring)
self._value = value
self._docstring = docstring
def add_to_enum(self, clsdict):
"""
Add a name to *clsdict* for this member.
"""
self.register_name(clsdict)
@property
def docstring(self):
"""
The description of this member
"""
return self._docstring
@property
def name(self):
"""
The distinguishing name of this member within the enumeration class,
e.g. 'MIDDLE' for MSO_VERTICAL_ANCHOR.MIDDLE, if this is a named
member. Otherwise the primitive value such as |None|, |True| or
|False|.
"""
return self._name
def register_name(self, clsdict):
"""
Add a member name to the class dict *clsdict* containing the value of
this member object. Where the name of this object is None, do
nothing; this allows out-of-band values to be defined without adding
a name to the class dict.
"""
if self.name is None:
return
clsdict[self.name] = self.value
@property
def valid_settings(self):
"""
A sequence containing the values valid for assignment for this
member. May be zero, one, or more in number.
"""
return (self._value,)
@property
def value(self):
"""
The enumeration value for this member, often an instance of
EnumValue, but may be a primitive value such as |None|.
"""
return self._value
class EnumValue(int):
"""
A named enumeration value, providing __str__ and __doc__ string values
for its symbolic name and description, respectively. Subclasses int, so
behaves as a regular int unless the strings are asked for.
"""
def __new__(cls, member_name, int_value, docstring):
return super(EnumValue, cls).__new__(cls, int_value)
def __init__(self, member_name, int_value, docstring):
super(EnumValue, self).__init__()
self._member_name = member_name
self._docstring = docstring
@property
def __doc__(self):
"""
The description of this enumeration member
"""
return self._docstring.strip()
def __str__(self):
"""
The symbolic name and string value of this member, e.g. 'MIDDLE (3)'
"""
return "{0:s} ({1:d})".format(self._member_name, self)
class ReturnValueOnlyEnumMember(EnumMember):
"""
Used to define a member of an enumeration that is only valid as a query
result and is not valid as a setting, e.g. MSO_VERTICAL_ANCHOR.MIXED (-2)
"""
@property
def valid_settings(self):
"""
No settings are valid for a return-only value.
"""
return ()
class XmlMappedEnumMember(EnumMember):
"""
Used to define a member whose value maps to an XML attribute value.
"""
def __init__(self, name, value, xml_value, docstring):
super(XmlMappedEnumMember, self).__init__(name, value, docstring)
self._xml_value = xml_value
def add_to_enum(self, clsdict):
"""
Compile XML mappings in addition to base add behavior.
"""
super(XmlMappedEnumMember, self).add_to_enum(clsdict)
self.register_xml_mapping(clsdict)
def register_xml_mapping(self, clsdict):
"""
Add XML mappings to the enumeration class state for this member.
"""
member_to_xml = self._get_or_add_member_to_xml(clsdict)
member_to_xml[self.value] = self.xml_value
xml_to_member = self._get_or_add_xml_to_member(clsdict)
xml_to_member[self.xml_value] = self.value
@property
def xml_value(self):
"""
The XML attribute value that corresponds to this enumeration value
"""
return self._xml_value
@staticmethod
def _get_or_add_member_to_xml(clsdict):
"""
Add the enum -> xml value mapping to the enumeration class state
"""
if '_member_to_xml' not in clsdict:
clsdict['_member_to_xml'] = dict()
return clsdict['_member_to_xml']
@staticmethod
def _get_or_add_xml_to_member(clsdict):
"""
Add the xml -> enum value mapping to the enumeration class state
"""
if '_xml_to_member' not in clsdict:
clsdict['_xml_to_member'] = dict()
return clsdict['_xml_to_member']
|
|
import os
import io
import codecs
import sys
import sublime
import platform
import time
import sublime_plugin
import subprocess
from subprocess import Popen, PIPE, STDOUT
from os import path
import socket
import subprocess
import errno
from socket import error as socket_error
from .utils import *
out_panel = 'CS-Script'
plugin_dir = path.dirname(path.dirname(__file__))
csscriptApp = path.join(plugin_dir, 'bin', 'cscs.exe')
syntaxerApp = path.join(path.dirname(plugin_dir), 'User', 'cs-script', 'syntaxer_v'+os.environ["cs-script.st3.ver"],'syntaxer.exe')
syntaxerPort = 18000
# =================================================================================
# C# Syntax Server - service that any process can connect via socket and request
# intellisense queries
# =================================================================================
def is_linux():
return os.name == 'posix' and platform.system() == 'Linux'
def is_mac():
return os.name == 'posix' and platform.system() == 'Darwin'
# -----------------
def to_args(args):
# excellent discussion about why popen+shell doesn't work on Linux
# http://stackoverflow.com/questions/1253122/why-does-subprocess-popen-with-shell-true-work-differently-on-linux-vs-windows
if is_linux() and not is_mac():
result = ''
if args[0].endswith('cscs.exe') or args[0].endswith('syntaxer.exe'):
result = 'mono '
for arg in args:
result = result + '"'+arg+'" '
return [result.rstrip()]
return args
# -----------------
def start_syntax_server():
try:
sublime.status_message('Starting syntaxer server...')
serverApp = syntaxerApp
args = []
# if is_linux():
# args.append('mono')
args.append(serverApp)
args.append('-listen')
args.append('-port:'+str(syntaxerPort))
args.append('-timeout:3000')
args.append('-client:{0}'.format(os.getpid()))
args.append('-cscs_path:{0}'.format(csscriptApp))
args = to_args(args)
# args = '{0} -listen -port:{1} -client:{2}'.fnormat(serverApp, syntaxerPort, os.getpid())
start = time.time()
subprocess.Popen(args, shell=True)
print('> Syntaxer server started:', time.time()-start, 'seconds')
sublime.status_message('> Syntaxer server started...')
except Exception as ex:
print('Cannot start syntaxer server', ex)
pass
# Start the server as soon as possible. If the server is already running the next call will do nothing.
# The server will terminate itself after the last client exits
start_syntax_server()
# -----------------
def send_exit_request():
try:
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect(('localhost', syntaxerPort))
clientsocket.send('-exit'.encode('utf-8'))
except socket_error as serr:
pass
# -----------------
reconnect_count = 0
last_cscs_sent = None
def set_engine_path(cscs_path):
global csscriptApp
if cscs_path:
csscriptApp = cscs_path
reconnect_count = 0
# print('setting engine path')
send_cscs_path(csscriptApp)
# -----------------
def preload_engine():
global csscriptApp
try:
args = []
args.append(csscriptApp)
args.append('-preload')
args = to_args(args)
start = time.time()
subprocess.Popen(args, shell=True)
print('> Roslyn preloading done:', time.time()-start, 'seconds')
except:
pass
# -----------------
def send_cscs_path(cscs_path):
sublime.set_timeout_async(lambda: try_send_cscs_path(cscs_path), 3000)
def try_send_cscs_path(cscs_path):
global reconnect_count
global last_cscs_sent
reconnect_count = reconnect_count + 1
if last_cscs_sent == cscs_path:
return
try:
start_time = time.time()
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect(('localhost', syntaxerPort))
request = '-cscs_path:{0}'.format(cscs_path)
clientsocket.send(request.encode('utf-8'))
last_cscs_sent = cscs_path
reconnect_count = 0
print('> Connected to syntaxer server:', time.time()-start_time, 'seconds')
except socket_error as serr:
# send_cscs_path may be issued before server is ready for the connection
# so we may need to retry
last_cscs_sent = None
if reconnect_count < 5:
print(serr)
print('Cannot configure syntaxer server with cscs location. Schedule another attempt in 3 seconds.')
sublime.set_timeout_async(try_send_cscs_path, 3000)
else:
# just give up. 5 sec should be enough to connect. Meaning there is something
# more serious than server is not being ready.
print(serr)
print('Cannot configure syntaxer server with cscs location.')
reconnect_count = 0
# -----------------
def send_pkill_request(pid, pname=None):
try:
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect(('localhost', syntaxerPort))
request = '-pkill\n-pid:{0}'.format(pid)
if pname:
request = request + '\n-pname:' + pname
clientsocket.send(request.encode('utf-8'))
except socket_error as serr:
if serr.errno == errno.ECONNREFUSED:
start_syntax_server()
# -----------------
def send_popen_request(command):
try:
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect(('localhost', syntaxerPort))
request = '-popen:{0}'.format(command)
clientsocket.send(request.encode('utf-8'))
except socket_error as serr:
if serr.errno == errno.ECONNREFUSED:
start_syntax_server()
# -----------------
def send_syntax_request(file, location, operation):
try:
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect(('localhost', syntaxerPort))
request = '-client:{0}\n-op:{1}\n-script:{2}\n-pos:{3}'.format(os.getpid(), operation, file, location)
clientsocket.send(request.encode('utf-8'))
response = clientsocket.recv(1024*1024)
return response.decode('utf-8')
except socket_error as serr:
if serr.errno == errno.ECONNREFUSED:
start_syntax_server()
# print(serr)
# -----------------
def send_formatting_request(file, location):
return send_syntax_request(file, location, 'format')
# -----------------
def send_completion_request(file, location):
print('send_completion_request')
return send_syntax_request(file, location, 'completion')
# -----------------
def send_tooltip_request(file, location, hint, short_hinted_tooltips=True):
args = 'tooltip:'+hint
if short_hinted_tooltips:
args = args + '\n-short_hinted_tooltips:1'
else:
args = args + '\n-short_hinted_tooltips:0'
return send_syntax_request(file, location, args)
# if short_hinted_tooltips:
# return send_syntax_request(file, location, 'short_hinted_tooltips:1\n-tooltip:'+hint)
# else:
# return send_syntax_request(file, location, 'short_hinted_tooltips:0\n-tooltip:'+hint)
# return send_syntax_request(file, location, 'tooltip:'+hint)
# -----------------
def send_resolve_request(file, location):
return send_syntax_request(file, location, 'resolve')
# -----------------
def send_resolve_references(file, location):
return send_syntax_request(file, location, 'references')
# -----------------
def send_resolve_using_request(file, word):
return send_syntax_request(file, -1, 'suggest_usings:'+word)
# -----------------
def popen_redirect(args):
return subprocess.Popen(to_args(args), stdout=subprocess.PIPE, shell=True)
# -----------------
def popen_redirect_tofile(args, file):
return subprocess.Popen(to_args(args), stdout=file, shell=True)
# -----------------
def run_doc_in_cscs(args, view, handle_line, on_done=None, nuget_warning = True):
curr_doc = view.file_name()
clear_and_print_result_header(curr_doc)
if not path.exists(csscriptApp):
print('Error: cannot find CS-Script launcher - ', csscriptApp)
elif not curr_doc:
print('Error: cannot find out the document path')
else:
clear_and_print_result_header(curr_doc)
if nuget_warning and '//css_nuget' in view.substr(sublime.Region(0, view.size())):
output_view_write_line(out_panel, "Resolving NuGet packages may take time...")
def do():
all_args = [csscriptApp]
for a in args:
all_args.append(a)
all_args.append(curr_doc)
proc = popen_redirect(all_args)
first_result = True
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"):
line = line.strip()
if first_result:
first_result = False
clear_and_print_result_header(curr_doc)
handle_line(line)
if on_done:
on_done()
sublime.set_timeout(do, 100)
# -----------------
def run_cscs(args, handle_line, on_done=None, header=None):
output_view_show(out_panel)
output_view_clear(out_panel)
if header:
output_view_write_line(out_panel, header)
output_view_write_line(out_panel, "------------------------------------------------------------------------")
if not path.exists(csscriptApp):
print('Error: cannot find CS-Script launcher - ', csscriptApp)
else:
def do():
all_args = [csscriptApp]
for a in args:
all_args.append(a)
proc = popen_redirect(all_args)
for line in io.TextIOWrapper(proc.stdout, encoding="utf-8"):
handle_line(line.strip())
if on_done:
on_done()
sublime.set_timeout(do, 100)
# -------------
def clear_and_print_result_header(curr_doc):
output_view_show(out_panel)
output_view_clear(out_panel)
simple_output_header = sublime.load_settings("cs-script.sublime-settings").get('simple_output_header', False)
if not simple_output_header:
output_view_write_line(out_panel, 'Script: '+ curr_doc)
output_view_write_line(out_panel, "------------------------------------------------------------------------")
|
|
import os
import time
import cPickle
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import pandas as pd
from PIL import Image as PILImage
import cStringIO as StringIO
import urllib
import caffe
import exifutil
REPO_DIRNAME = os.path.abspath(os.path.dirname(__file__) + '/../..')
UPLOAD_FOLDER = '/tmp/caffe_demos_uploads'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])
# Obtain the flask app object
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
string_buffer = StringIO.StringIO(
urllib.urlopen(imageurl).read())
image = caffe.io.load_image(string_buffer)
except Exception as err:
# For any exception we encounter in reading the image, we will just
# not continue.
logging.info('URL Image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open image from URL.')
)
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result, imagesrc=imageurl)
@app.route('/classify_upload', methods=['POST'])
def classify_upload():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
image = exifutil.open_oriented_im(filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open uploaded image.')
)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(image)
)
def embed_image_html(image):
"""Creates an image embedded in HTML base64 format."""
image_pil = PILImage.fromarray((255 * image).astype('uint8'))
image_pil = image_pil.resize((256, 256))
string_buf = StringIO.StringIO()
image_pil.save(string_buf, format='png')
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/png;base64,' + data
def allowed_file(filename):
return (
'.' in filename and
filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS
)
class ImagenetClassifier(object):
default_args = {
'model_def_file': (
'{}/models/bvlc_reference_caffenet/deploy.prototxt'.format(REPO_DIRNAME)),
'pretrained_model_file': (
'{}/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'.format(REPO_DIRNAME)),
'mean_file': (
'{}/python/caffe/imagenet/ilsvrc_2012_mean.npy'.format(REPO_DIRNAME)),
'class_labels_file': (
'{}/data/ilsvrc12/synset_words.txt'.format(REPO_DIRNAME)),
'bet_file': (
'{}/data/ilsvrc12/imagenet.bet.pickle'.format(REPO_DIRNAME)),
}
for key, val in default_args.iteritems():
if not os.path.exists(val):
raise Exception(
"File for {} is missing. Should be at: {}".format(key, val))
default_args['image_dim'] = 227
default_args['raw_scale'] = 255.
default_args['gpu_mode'] = False
def __init__(self, model_def_file, pretrained_model_file, mean_file,
raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):
logging.info('Loading net and associated files...')
self.net = caffe.Classifier(
model_def_file, pretrained_model_file,
image_dims=(image_dim, image_dim), raw_scale=raw_scale,
mean=np.load(mean_file), channel_swap=(2, 1, 0), gpu=gpu_mode
)
with open(class_labels_file) as f:
labels_df = pd.DataFrame([
{
'synset_id': l.strip().split(' ')[0],
'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
}
for l in f.readlines()
])
self.labels = labels_df.sort('synset_id')['name'].values
self.bet = cPickle.load(open(bet_file))
# A bias to prefer children nodes in single-chain paths
# I am setting the value to 0.1 as a quick, simple model.
# We could use better psychological models here...
self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1
def classify_image(self, image):
try:
starttime = time.time()
scores = self.net.predict([image], oversample=True).flatten()
endtime = time.time()
indices = (-scores).argsort()[:5]
predictions = self.labels[indices]
# In addition to the prediction text, we will also produce
# the length for the progress bar visualization.
meta = [
(p, '%.5f' % scores[i])
for i, p in zip(indices, predictions)
]
logging.info('result: %s', str(meta))
# Compute expected information gain
expected_infogain = np.dot(
self.bet['probmat'], scores[self.bet['idmapping']])
expected_infogain *= self.bet['infogain']
# sort the scores
infogain_sort = expected_infogain.argsort()[::-1]
bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])
for v in infogain_sort[:5]]
logging.info('bet result: %s', str(bet_result))
return (True, meta, bet_result, '%.3f' % (endtime - starttime))
except Exception as err:
logging.info('Classification error: %s', err)
return (False, 'Something went wrong when classifying the '
'image. Maybe try another one?')
def start_tornado(app, port=5000):
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
print("Tornado server starting on port {}".format(port))
tornado.ioloop.IOLoop.instance().start()
def start_from_terminal(app):
"""
Parse command line options and start the server.
"""
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug',
help="enable debug mode",
action="store_true", default=False)
parser.add_option(
'-p', '--port',
help="which port to serve content on",
type='int', default=5000)
parser.add_option(
'-g', '--gpu',
help="use gpu mode",
action='store_true', default=False)
opts, args = parser.parse_args()
ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu})
# Initialize classifier
app.clf = ImagenetClassifier(**ImagenetClassifier.default_args)
if opts.debug:
app.run(debug=True, host='0.0.0.0', port=opts.port)
else:
start_tornado(app, opts.port)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
start_from_terminal(app)
|
|
import token, tokenize, json, re, string, nltk
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
from cStringIO import StringIO
'''
This is psanchez's answer to:
http://stackoverflow.com/questions/4033633/handling-lazy-json-in-python-expecting-property-name
'''
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn
lmtzr = WordNetLemmatizer()
rcParams['text.usetex'] = True
standard_spelling = {'whaaaattttt':'what','annnnnnnd':'and','yeahh':'yeah','yeahhh':'yeah','wwhite':'white',
'toooo':'to','hahhaha':'ha','fellah':'fellow','poppin':'popping','feelin':'feeling','thouygh':'though','sadsadsad':'sad',
'longterm':' long','orang':'orange','takin':'taking'}
informative_tokens = json.load(open('informative-tokens.json','rb'))
synsets = {'positive' :{synset for token in informative_tokens['positive']['tokens'] for synset in wn.synsets(token)},
'negative':{synset for token in informative_tokens['negative']['tokens'] for synset in wn.synsets(token)}}
format = lambda txt: r'\Large \textbf{\textsc{%s}}'%txt
def classify(tweet):
copy_of_tweet = tweet
tweet -= set(informative_tokens['common']['tokens'])
positive_overlap = tweet & set(informative_tokens['positive']['tokens'])
negative_overlap = tweet & set(informative_tokens['negative']['tokens'])
if len(negative_overlap) == 0 and len(positive_overlap) == 0:
tweet_synset = {synset for token in copy_of_tweet for synset in wn.synsets(token) if len(wn.synsets(token))>0}
if len(tweet_synset) > 0:
positive_overlap = tweet_synset & synsets['positive']
negative_overlap = tweet_synset & synsets['negative']
else:
return np.nan
return 1 if len(positive_overlap) > len(negative_overlap) else 0
def find_all(a_string, sub):
result = []
k = 0
while k < len(a_string):
k = a_string.find(sub, k)
if k == -1:
return result
else:
result.append(k)
k += 1 #change to k += len(sub) to not search overlapping results
return result
def isdecimal(aStr):
return all([ch.isdigit() or ch in string.punctuation for ch in aStr])
def isusername(aStr):
return all([any([ch.isdigit() for ch in aStr]), any([ch.isalpha() for ch in aStr]),len(aStr)>3]) or aStr.startswith('@')
def hasvowels(aStr):
return any([ch in 'aeiou' for ch in aStr])
def count_usernames(set_of_words):
return [word for word in set_of_words if isusername(word)]
def word_tokenize(tweet):
my_verboten_punctuation = string.punctuation.replace('@','').replace('#','')
return [''.join([ch for ch in word if ch not in my_verboten_punctuation]) for word in tweet.split()]
def extract_tokens(list_of_tweets_as_str, count_usernames=True,is_single=False):
if is_single:
list_of_words_in_tweet = set([word for word in list_of_tweets_as_str.lower().split()
if all([not word.isdigit(),not isdecimal(word)])])
else:
list_of_words_in_tweet = set([word for tweet in list_of_tweets_as_str for word in tweet.lower().split()
if all([not word.isdigit(),not isdecimal(word)])])
list_of_words_in_tweet -= set(string.punctuation)
list_of_words_in_tweet = {token.replace('-','').replace('_','').replace('.','').replace("'",'').replace('/','').replace('*','')
for token in list_of_words_in_tweet if len(token)>3}
list_of_words_in_tweet = {token if token not in standard_spelling else standard_spelling[token] for token in list_of_words_in_tweet}
list_of_words_in_tweet = {lmtzr.lemmatize(token,'v' if len(token)>4 and token.endswith('ing') or token.endswith('ed') else 'n')
for token in list_of_words_in_tweet}
usernames = {token for token in list_of_words_in_tweet if isusername(token)} if count_usernames else {}
hashtags = {token for token in list_of_words_in_tweet if token.startswith('#')}
return ({token for token in list_of_words_in_tweet
if all([token not in stopwords.words('english'),len(token)>3,
not isusername(token),hasvowels(token), not token.startswith('#')])},usernames,hashtags)
def get(lst,field):
return [item[field] for item in lst]
def regularize_json(json_string):
json_string = re.sub(r"{\s*'?(\w)", r'{"\1', json_string)
json_string = re.sub(r",\s*'?(\w)", r',"\1', json_string)
json_string = re.sub(r"(\w)'?\s*:", r'\1":', json_string)
json_string = re.sub(r":\s*'(\w+)'\s*([,}])", r':"\1"\2', json_string)
return json_string
def jaccard(one,two):
one = set(one)
two = set(two)
if len(one & two) == 0:
return 0
else:
return len(one & two) / float(len(one | two))
def json_decode (json_string, *args, **kwargs):
try:
json.loads(json_string, *args, **kwargs)
except:
json_string = fixLazyJson (json_string)
json.loads(json_string, *args, **kwargs)
def get_field_damaged_string(astring):
#Extract text
text_key_start = astring.find('text')
snippet = astring[(text_key_start+6):(text_key_start+146)].split(', u')[0].encode('utf-8').replace('u','').replace('[','').replace(']','').strip()[1:-1]
#Extract id
id_key_start = astring.find("id_str")
#guessing how many to go ahead
id_string = astring[id_key_start:(id_key_start+50)].split(':')[1].split(', u')[0].replace("u'",'').strip()[1:-1]
return (snippet,id_string)
def cleanse(data,remove_stopwords=True):
corpus = [word_tokenize(datum.lower().strip()) for datum in data]
#remove URLs and stopwords
corpus = [[word for word in text if not word.startswith('http')
and word not in stopwords.words('english')
and word not in ['rt',"'s",'bt','em']
and not any(['\u' in word,'\\x' in word,'t.co' in word, 'tco' in word])] for text in corpus]
#remove unicode
corpus = [[word.replace('\\','').replace(',','') for word in text if all([ord(ch)<128 for ch in word])
and not all([ch in string.punctuation.replace('@','').replace('#','') for ch in word])] for text in corpus]
corpus = [[lmtzr.lemmatize(word) for word in text if not word.isdigit()] for text in corpus]
return corpus
def adjust_spines(ax, spines=['left','bottom']):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 10)) # outward by 10 points
#spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
def freqplot(tokens,n=50,filename=None):
'''Input is a list of tokens'''
words,freqs = zip(*nltk.FreqDist(tokens).most_common(n))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(freqs,'k--')
adjust_spines(ax)
ax.set_xticks(range(len(freqs)))
ax.set_xticklabels(map(format,words),rotation='vertical')
ax.set_ylabel(format('Count'))
plt.tight_layout()
plt.savefig(filename)
plt.savefig('%s.tiff'%filename)
plt.close()
def fixLazyJson (in_text):
tokengen = tokenize.generate_tokens(StringIO(in_text).readline)
result = []
for tokid, tokval, _, _, _ in tokengen:
# fix unquoted strings
if (tokid == token.NAME):
if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
tokid = token.STRING
tokval = "%s" % tokval
if tokval == "None":
tokval = "null"
if tokval == "False" or tokval == "True":
tokval = tokval.lower()
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = "%s" % tokval[1:-1].replace ('"', '\\"')
# remove invalid commas
elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
if (len(result) > 0) and (result[-1][1] == ','):
result.pop()
result.append((tokid, tokval))
return tokenize.untokenize(result)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import CBlockHeader, CInv, msg_block, msg_headers, msg_inv
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
# 1. Have nodes mine a block (leave IBD)
[n.generatetoaddress(1, n.get_deterministic_priv_key().address) for n in self.nodes]
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2,None,block_time), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_and_ping(msg_block(blocks_h2[0]))
min_work_node.send_and_ping(msg_block(blocks_h2[1]))
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1,None,block_time), block_time)
block_time += 1
block_h1f.solve()
test_node.send_and_ping(msg_block(block_h1f))
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2,None,block_time), block_time)
block_time += 1
block_h2f.solve()
test_node.send_and_ping(msg_block(block_h2f))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3,None,block_h2f.nTime), block_h2f.nTime+1)
block_h3.solve()
test_node.send_and_ping(msg_block(block_h3))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4,None,tip.nTime), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_and_ping(msg_block(all_blocks[1]))
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_and_ping(msg_block(all_blocks[1]))
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.send_and_ping(msg_block(block_h1f))
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_and_ping(msg_block(block_h1f))
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289,None,all_blocks[284].nTime), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290,None,block_289f.nTime), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291,None,block_290f.nTime), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292,None,block_291.nTime), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_and_ping(headers_message)
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_and_ping(msg_block(block_290f))
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293,None,block_292.nTime), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
|
|
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import uuid
import mock
from neutronclient.common import exceptions as n_exc
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import webob
from nova.api.openstack.compute import security_groups
from nova import compute
from nova import context
import nova.db
from nova import exception
from nova.network import model
from nova.network.neutronv2 import api as neutron_api
from nova.network.security_group import neutron_driver
from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack.compute import test_security_groups
from nova.tests.unit.api.openstack import fakes
from nova.tests import uuidsentinel as uuids
UUID_SERVER = uuids.server
class TestNeutronSecurityGroupsTestCase(test.TestCase):
def setUp(self):
super(TestNeutronSecurityGroupsTestCase, self).setUp()
cfg.CONF.set_override('use_neutron', True)
self.original_client = neutron_api.get_client
neutron_api.get_client = get_client
def tearDown(self):
neutron_api.get_client = self.original_client
get_client()._reset()
super(TestNeutronSecurityGroupsTestCase, self).tearDown()
class TestNeutronSecurityGroupsV21(
test_security_groups.TestSecurityGroupsV21,
TestNeutronSecurityGroupsTestCase):
def _create_sg_template(self, **kwargs):
sg = test_security_groups.security_group_request_template(**kwargs)
return self.controller.create(self.req, body={'security_group': sg})
def _create_network(self):
body = {'network': {'name': 'net1'}}
neutron = get_client()
net = neutron.create_network(body)
body = {'subnet': {'network_id': net['network']['id'],
'cidr': '10.0.0.0/24'}}
neutron.create_subnet(body)
return net
def _create_port(self, **kwargs):
body = {'port': {'binding:vnic_type': model.VNIC_TYPE_NORMAL}}
fields = ['security_groups', 'device_id', 'network_id',
'port_security_enabled', 'ip_allocation']
for field in fields:
if field in kwargs:
body['port'][field] = kwargs[field]
neutron = get_client()
return neutron.create_port(body)
def _create_security_group(self, **kwargs):
body = {'security_group': {}}
fields = ['name', 'description']
for field in fields:
if field in kwargs:
body['security_group'][field] = kwargs[field]
neutron = get_client()
return neutron.create_security_group(body)
def test_create_security_group_with_no_description(self):
# Neutron's security group description field is optional.
pass
def test_create_security_group_with_empty_description(self):
# Neutron's security group description field is optional.
pass
def test_create_security_group_with_blank_name(self):
# Neutron's security group name field is optional.
pass
def test_create_security_group_with_whitespace_name(self):
# Neutron allows security group name to be whitespace.
pass
def test_create_security_group_with_blank_description(self):
# Neutron's security group description field is optional.
pass
def test_create_security_group_with_whitespace_description(self):
# Neutron allows description to be whitespace.
pass
def test_create_security_group_with_duplicate_name(self):
# Neutron allows duplicate names for security groups.
pass
def test_create_security_group_non_string_name(self):
# Neutron allows security group name to be non string.
pass
def test_create_security_group_non_string_description(self):
# Neutron allows non string description.
pass
def test_create_security_group_quota_limit(self):
# Enforced by Neutron server.
pass
def test_update_security_group(self):
# Enforced by Neutron server.
pass
def test_get_security_group_list(self):
self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
list_dict = self.controller.index(req)
self.assertEqual(len(list_dict['security_groups']), 2)
def test_get_security_group_list_all_tenants(self):
pass
def test_get_security_group_by_instance(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'rules': [], 'tenant_id': 'fake', 'id': sg['id'],
'name': 'test', 'description': 'test-description'}]
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server_by_uuid)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/os-security-groups'
% test_security_groups.FAKE_UUID1)
res_dict = self.server_controller.index(
req, test_security_groups.FAKE_UUID1)['security_groups']
self.assertEqual(expected, res_dict)
def test_get_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
res_dict = self.controller.show(req, sg['id'])
expected = {'security_group': sg}
self.assertEqual(res_dict, expected)
def test_delete_security_group_by_id(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
sg['id'])
self.controller.delete(req, sg['id'])
def test_delete_security_group_by_admin(self):
sg = self._create_sg_template().get('security_group')
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
sg['id'], use_admin_context=True)
self.controller.delete(req, sg['id'])
@mock.patch('nova.compute.utils.refresh_info_cache_for_instance')
def test_delete_security_group_in_use(self, refresh_info_cache_mock):
sg = self._create_sg_template().get('security_group')
self._create_network()
db_inst = fakes.stub_instance(id=1, nw_cache=[], security_groups=[])
_context = context.get_admin_context()
instance = instance_obj.Instance._from_db_object(
_context, instance_obj.Instance(), db_inst,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
neutron = neutron_api.API()
with mock.patch.object(nova.db, 'instance_get_by_uuid',
return_value=db_inst):
neutron.allocate_for_instance(_context, instance,
security_groups=[sg['id']])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, sg['id'])
def test_associate_non_running_instance(self):
# Neutron does not care if the instance is running or not. When the
# instances is detected by neutron it will push down the security
# group policy to it.
pass
def test_associate_already_associated_security_group_to_instance(self):
# Neutron security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_associate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.manager._addSecurityGroup(req, UUID_SERVER, body)
def test_associate_duplicate_names(self):
sg1 = self._create_security_group(name='sg1',
description='sg1')['security_group']
self._create_security_group(name='sg1',
description='sg1')['security_group']
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id']],
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="sg1"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.assertRaises(webob.exc.HTTPConflict,
self.manager._addSecurityGroup,
req, UUID_SERVER, body)
def test_associate_port_security_enabled_true(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
port_security_enabled=True,
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.manager._addSecurityGroup(req, UUID_SERVER, body)
def test_associate_port_security_enabled_false(self):
self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], port_security_enabled=False,
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup,
req, UUID_SERVER, body)
def test_associate_deferred_ip_port(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
port_security_enabled=True, ip_allocation='deferred',
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.manager._addSecurityGroup(req, UUID_SERVER, body)
def test_disassociate_by_non_existing_security_group_name(self):
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name='non-existing'))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup,
req, UUID_SERVER, body)
def test_disassociate_non_running_instance(self):
# Neutron does not care if the instance is running or not. When the
# instances is detected by neutron it will push down the security
# group policy to it.
pass
def test_disassociate_already_associated_security_group_to_instance(self):
# Neutron security groups does not raise an error if you update a
# port adding a security group to it that was already associated
# to the port. This is because PUT semantics are used.
pass
def test_disassociate(self):
sg = self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
device_id=UUID_SERVER)
self.stub_out('nova.db.instance_get_by_uuid',
test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name="test"))
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
UUID_SERVER)
self.manager._removeSecurityGroup(req, UUID_SERVER, body)
def test_get_instances_security_groups_bindings(self):
servers = [{'id': test_security_groups.FAKE_UUID1},
{'id': test_security_groups.FAKE_UUID2}]
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id']],
device_id=test_security_groups.FAKE_UUID1)
self._create_port(
network_id=net['network']['id'], security_groups=[sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID2)
expected = {test_security_groups.FAKE_UUID1: [{'name': sg1['name']},
{'name': sg2['name']}],
test_security_groups.FAKE_UUID2: [{'name': sg2['name']},
{'name': sg3['id']}]}
security_group_api = self.controller.security_group_api
bindings = (
security_group_api.get_instances_security_groups_bindings(
context.get_admin_context(), servers))
self.assertEqual(bindings, expected)
def test_get_instance_security_groups(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
sg2 = self._create_sg_template(name='test2').get('security_group')
# test name='' is replaced with id
sg3 = self._create_sg_template(name='').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id'],
sg2['id'],
sg3['id']],
device_id=test_security_groups.FAKE_UUID1)
expected = [{'name': sg1['name']}, {'name': sg2['name']},
{'name': sg3['id']}]
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(),
instance_obj.Instance(uuid=test_security_groups.FAKE_UUID1))
self.assertEqual(sgs, expected)
@mock.patch('nova.network.security_group.neutron_driver.SecurityGroupAPI.'
'get_instances_security_groups_bindings')
def test_get_security_group_empty_for_instance(self, neutron_sg_bind_mock):
servers = [{'id': test_security_groups.FAKE_UUID1}]
neutron_sg_bind_mock.return_value = {}
security_group_api = self.controller.security_group_api
ctx = context.get_admin_context()
sgs = security_group_api.get_instance_security_groups(ctx,
instance_obj.Instance(uuid=test_security_groups.FAKE_UUID1))
neutron_sg_bind_mock.assert_called_once_with(ctx, servers, False)
self.assertEqual([], sgs)
def test_create_port_with_sg_and_port_security_enabled_true(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id']],
port_security_enabled=True,
device_id=test_security_groups.FAKE_UUID1)
security_group_api = self.controller.security_group_api
sgs = security_group_api.get_instance_security_groups(
context.get_admin_context(),
instance_obj.Instance(uuid=test_security_groups.FAKE_UUID1))
self.assertEqual(sgs, [{'name': 'test1'}])
def test_create_port_with_sg_and_port_security_enabled_false(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
self.assertRaises(exception.SecurityGroupCannotBeApplied,
self._create_port,
network_id=net['network']['id'],
security_groups=[sg1['id']],
port_security_enabled=False,
device_id=test_security_groups.FAKE_UUID1)
class TestNeutronSecurityGroupRulesTestCase(TestNeutronSecurityGroupsTestCase):
def setUp(self):
super(TestNeutronSecurityGroupRulesTestCase, self).setUp()
id1 = '11111111-1111-1111-1111-111111111111'
sg_template1 = test_security_groups.security_group_template(
security_group_rules=[], id=id1)
id2 = '22222222-2222-2222-2222-222222222222'
sg_template2 = test_security_groups.security_group_template(
security_group_rules=[], id=id2)
self.controller_sg = security_groups.SecurityGroupController()
neutron = get_client()
neutron._fake_security_groups[id1] = sg_template1
neutron._fake_security_groups[id2] = sg_template2
def tearDown(self):
neutron_api.get_client = self.original_client
get_client()._reset()
super(TestNeutronSecurityGroupsTestCase, self).tearDown()
class _TestNeutronSecurityGroupRulesBase(object):
def test_create_add_existing_rules_by_cidr(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
cidr='15.0.0.0/8', parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_create_add_existing_rules_by_group_id(self):
sg = test_security_groups.security_group_template()
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
self.controller_sg.create(req, {'security_group': sg})
rule = test_security_groups.security_group_rule_template(
group=self.sg1['id'], parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.controller.create(req, {'security_group_rule': rule})
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_rule': rule})
def test_delete(self):
rule = test_security_groups.security_group_rule_template(
parent_group_id=self.sg2['id'])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
res_dict = self.controller.create(req, {'security_group_rule': rule})
security_group_rule = res_dict['security_group_rule']
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
% security_group_rule['id'])
self.controller.delete(req, security_group_rule['id'])
def test_create_rule_quota_limit(self):
# Enforced by neutron
pass
class TestNeutronSecurityGroupRulesV21(
_TestNeutronSecurityGroupRulesBase,
test_security_groups.TestSecurityGroupRulesV21,
TestNeutronSecurityGroupRulesTestCase):
pass
class TestNeutronSecurityGroupsOutputTest(TestNeutronSecurityGroupsTestCase):
content_type = 'application/json'
def setUp(self):
super(TestNeutronSecurityGroupsOutputTest, self).setUp()
fakes.stub_out_nw_api(self)
self.controller = security_groups.SecurityGroupController()
self.stubs.Set(compute.api.API, 'get',
test_security_groups.fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all',
test_security_groups.fake_compute_get_all)
self.stubs.Set(compute.api.API, 'create',
test_security_groups.fake_compute_create)
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instances_security_groups_bindings',
(test_security_groups.
fake_get_instances_security_groups_bindings))
def _make_request(self, url, body=None):
req = fakes.HTTPRequest.blank(url)
if body:
req.method = 'POST'
req.body = encodeutils.safe_encode(self._encode_body(body))
req.content_type = self.content_type
req.headers['Accept'] = self.content_type
# NOTE: This 'os-security-groups' is for enabling security_groups
# attribute on response body.
res = req.get_response(fakes.wsgi_app_v21(
init_only=('servers', 'os-security-groups')))
return res
def _encode_body(self, body):
return jsonutils.dumps(body)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def _get_groups(self, server):
return server.get('security_groups')
def test_create(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_create_server_get_default_security_group(self):
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
group = self._get_groups(server)[0]
self.assertEqual(group.get('name'), 'default')
def test_show(self):
def fake_get_instance_security_groups(inst, context, id):
return [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
self.stubs.Set(neutron_driver.SecurityGroupAPI,
'get_instance_security_groups',
fake_get_instance_security_groups)
url = '/v2/fake/servers'
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
for security_group in security_groups:
sg = test_security_groups.security_group_template(
name=security_group['name'])
self.controller.create(req, {'security_group': sg})
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
security_groups=security_groups)
res = self._make_request(url, {'server': server})
self.assertEqual(res.status_int, 202)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
# Test that show (GET) returns the same information as create (POST)
url = '/v2/fake/servers/' + test_security_groups.UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
for i, group in enumerate(self._get_groups(server)):
name = 'fake-2-%s' % i
self.assertEqual(group.get('name'), name)
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
for j, group in enumerate(self._get_groups(server)):
name = 'fake-%s-%s' % (i, j)
self.assertEqual(group.get('name'), name)
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
def get_client(context=None, admin=False):
return MockClient()
class MockClient(object):
# Needs to be global to survive multiple calls to get_client.
_fake_security_groups = {}
_fake_ports = {}
_fake_networks = {}
_fake_subnets = {}
_fake_security_group_rules = {}
def __init__(self):
# add default security group
if not len(self._fake_security_groups):
ret = {'name': 'default', 'description': 'default',
'tenant_id': 'fake_tenant', 'security_group_rules': [],
'id': str(uuid.uuid4())}
self._fake_security_groups[ret['id']] = ret
def _reset(self):
self._fake_security_groups.clear()
self._fake_ports.clear()
self._fake_networks.clear()
self._fake_subnets.clear()
self._fake_security_group_rules.clear()
def create_security_group(self, body=None):
s = body.get('security_group')
if not isinstance(s.get('name', ''), six.string_types):
msg = ('BadRequest: Invalid input for name. Reason: '
'None is not a valid string.')
raise n_exc.BadRequest(message=msg)
if not isinstance(s.get('description.', ''), six.string_types):
msg = ('BadRequest: Invalid input for description. Reason: '
'None is not a valid string.')
raise n_exc.BadRequest(message=msg)
if len(s.get('name')) > 255 or len(s.get('description')) > 255:
msg = 'Security Group name great than 255'
raise n_exc.NeutronClientException(message=msg, status_code=401)
ret = {'name': s.get('name'), 'description': s.get('description'),
'tenant_id': 'fake', 'security_group_rules': [],
'id': str(uuid.uuid4())}
self._fake_security_groups[ret['id']] = ret
return {'security_group': ret}
def create_network(self, body):
n = body.get('network')
ret = {'status': 'ACTIVE', 'subnets': [], 'name': n.get('name'),
'admin_state_up': n.get('admin_state_up', True),
'tenant_id': 'fake_tenant',
'id': str(uuid.uuid4())}
if 'port_security_enabled' in n:
ret['port_security_enabled'] = n['port_security_enabled']
self._fake_networks[ret['id']] = ret
return {'network': ret}
def create_subnet(self, body):
s = body.get('subnet')
try:
net = self._fake_networks[s.get('network_id')]
except KeyError:
msg = 'Network %s not found' % s.get('network_id')
raise n_exc.NeutronClientException(message=msg, status_code=404)
ret = {'name': s.get('name'), 'network_id': s.get('network_id'),
'tenant_id': 'fake_tenant', 'cidr': s.get('cidr'),
'id': str(uuid.uuid4()), 'gateway_ip': '10.0.0.1'}
net['subnets'].append(ret['id'])
self._fake_networks[net['id']] = net
self._fake_subnets[ret['id']] = ret
return {'subnet': ret}
def create_port(self, body):
p = body.get('port')
ret = {'status': 'ACTIVE', 'id': str(uuid.uuid4()),
'mac_address': p.get('mac_address', 'fa:16:3e:b8:f5:fb'),
'device_id': p.get('device_id', str(uuid.uuid4())),
'admin_state_up': p.get('admin_state_up', True),
'security_groups': p.get('security_groups', []),
'network_id': p.get('network_id'),
'ip_allocation': p.get('ip_allocation'),
'binding:vnic_type':
p.get('binding:vnic_type') or model.VNIC_TYPE_NORMAL}
network = self._fake_networks[p['network_id']]
if 'port_security_enabled' in p:
ret['port_security_enabled'] = p['port_security_enabled']
elif 'port_security_enabled' in network:
ret['port_security_enabled'] = network['port_security_enabled']
port_security = ret.get('port_security_enabled', True)
# port_security must be True if security groups are present
if not port_security and ret['security_groups']:
raise exception.SecurityGroupCannotBeApplied()
if network['subnets'] and p.get('ip_allocation') != 'deferred':
ret['fixed_ips'] = [{'subnet_id': network['subnets'][0],
'ip_address': '10.0.0.1'}]
if not ret['security_groups'] and (port_security is None or
port_security is True):
for security_group in self._fake_security_groups.values():
if security_group['name'] == 'default':
ret['security_groups'] = [security_group['id']]
break
self._fake_ports[ret['id']] = ret
return {'port': ret}
def create_security_group_rule(self, body):
# does not handle bulk case so just picks rule[0]
r = body.get('security_group_rules')[0]
fields = ['direction', 'protocol', 'port_range_min', 'port_range_max',
'ethertype', 'remote_ip_prefix', 'tenant_id',
'security_group_id', 'remote_group_id']
ret = {}
for field in fields:
ret[field] = r.get(field)
ret['id'] = str(uuid.uuid4())
self._fake_security_group_rules[ret['id']] = ret
return {'security_group_rules': [ret]}
def show_security_group(self, security_group, **_params):
try:
sg = self._fake_security_groups[security_group]
except KeyError:
msg = 'Security Group %s not found' % security_group
raise n_exc.NeutronClientException(message=msg, status_code=404)
for security_group_rule in self._fake_security_group_rules.values():
if security_group_rule['security_group_id'] == sg['id']:
sg['security_group_rules'].append(security_group_rule)
return {'security_group': sg}
def show_security_group_rule(self, security_group_rule, **_params):
try:
return {'security_group_rule':
self._fake_security_group_rules[security_group_rule]}
except KeyError:
msg = 'Security Group rule %s not found' % security_group_rule
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_network(self, network, **_params):
try:
return {'network':
self._fake_networks[network]}
except KeyError:
msg = 'Network %s not found' % network
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_port(self, port, **_params):
try:
return {'port':
self._fake_ports[port]}
except KeyError:
msg = 'Port %s not found' % port
raise n_exc.NeutronClientException(message=msg, status_code=404)
def show_subnet(self, subnet, **_params):
try:
return {'subnet':
self._fake_subnets[subnet]}
except KeyError:
msg = 'Port %s not found' % subnet
raise n_exc.NeutronClientException(message=msg, status_code=404)
def list_security_groups(self, **_params):
ret = []
for security_group in self._fake_security_groups.values():
names = _params.get('name')
if names:
if not isinstance(names, list):
names = [names]
for name in names:
if security_group.get('name') == name:
ret.append(security_group)
ids = _params.get('id')
if ids:
if not isinstance(ids, list):
ids = [ids]
for id in ids:
if security_group.get('id') == id:
ret.append(security_group)
elif not (names or ids):
ret.append(security_group)
return {'security_groups': ret}
def list_networks(self, **_params):
# neutronv2/api.py _get_available_networks calls this assuming
# search_opts filter "shared" is implemented and not ignored
shared = _params.get("shared", None)
if shared:
return {'networks': []}
else:
return {'networks':
[network for network in self._fake_networks.values()]}
def list_ports(self, **_params):
ret = []
device_id = _params.get('device_id')
for port in self._fake_ports.values():
if device_id:
if port['device_id'] in device_id:
ret.append(port)
else:
ret.append(port)
return {'ports': ret}
def list_subnets(self, **_params):
return {'subnets':
[subnet for subnet in self._fake_subnets.values()]}
def list_floatingips(self, **_params):
return {'floatingips': []}
def delete_security_group(self, security_group):
self.show_security_group(security_group)
ports = self.list_ports()
for port in ports.get('ports'):
for sg_port in port['security_groups']:
if sg_port == security_group:
msg = ('Unable to delete Security group %s in use'
% security_group)
raise n_exc.NeutronClientException(message=msg,
status_code=409)
del self._fake_security_groups[security_group]
def delete_security_group_rule(self, security_group_rule):
self.show_security_group_rule(security_group_rule)
del self._fake_security_group_rules[security_group_rule]
def delete_network(self, network):
self.show_network(network)
self._check_ports_on_network(network)
for subnet in self._fake_subnets.values():
if subnet['network_id'] == network:
del self._fake_subnets[subnet['id']]
del self._fake_networks[network]
def delete_port(self, port):
self.show_port(port)
del self._fake_ports[port]
def update_port(self, port, body=None):
self.show_port(port)
self._fake_ports[port].update(body['port'])
return {'port': self._fake_ports[port]}
def list_extensions(self, **_parms):
return {'extensions': []}
def _check_ports_on_network(self, network):
ports = self.list_ports()
for port in ports:
if port['network_id'] == network:
msg = ('Unable to complete operation on network %s. There is '
'one or more ports still in use on the network'
% network)
raise n_exc.NeutronClientException(message=msg, status_code=409)
def find_resource(self, resource, name_or_id, project_id=None,
cmd_resource=None, parent_id=None, fields=None):
if resource == 'security_group':
# lookup first by unique id
sg = self._fake_security_groups.get(name_or_id)
if sg:
return sg
# lookup by name, raise an exception on duplicates
res = None
for sg in self._fake_security_groups.values():
if sg['name'] == name_or_id:
if res:
raise n_exc.NeutronClientNoUniqueMatch(
resource=resource, name=name_or_id)
res = sg
if res:
return res
raise n_exc.NotFound("Fake %s '%s' not found." %
(resource, name_or_id))
|
|
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferTest
import GafferUITest
import GafferScene
import GafferSceneUI
class ContextAlgoTest( GafferUITest.TestCase ) :
def testExpandedPaths( self ) :
# A
# |__B
# |__D
# |__E
# |__C
# |__F
# |__G
G = GafferScene.Sphere()
G["name"].setValue( "G" )
F = GafferScene.Sphere()
F["name"].setValue( "F" )
D = GafferScene.Sphere()
D["name"].setValue( "D" )
E = GafferScene.Sphere()
E["name"].setValue( "E" )
C = GafferScene.Group()
C["name"].setValue( "C" )
C["in"][0].setInput( F["out"] )
C["in"][1].setInput( G["out"] )
B = GafferScene.Group()
B["name"].setValue( "B" )
B["in"][0].setInput( D["out"] )
B["in"][1].setInput( E["out"] )
A = GafferScene.Group()
A["name"].setValue( "A" )
A["in"][0].setInput( B["out"] )
A["in"][1].setInput( C["out"] )
context = Gaffer.Context()
GafferSceneUI.ContextAlgo.setExpandedPaths( context, IECore.PathMatcher( [ "/" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/" ] ) )
GafferSceneUI.ContextAlgo.setExpandedPaths( context, IECore.PathMatcher( [ "/", "/A" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/", "/A" ] ) )
GafferSceneUI.ContextAlgo.setExpandedPaths( context, IECore.PathMatcher( [ "/", "/A", "/A/C" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/", "/A", "/A/C" ] ) )
GafferSceneUI.ContextAlgo.clearExpansion( context )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher() )
GafferSceneUI.ContextAlgo.expand( context, IECore.PathMatcher( [ "/A/B", "/A/C" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/", "/A", "/A/B", "/A/C" ] ) )
GafferSceneUI.ContextAlgo.clearExpansion( context )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher() )
GafferSceneUI.ContextAlgo.expand( context, IECore.PathMatcher( [ "/A/B", "/A/C" ] ), expandAncestors = False )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/A/B", "/A/C" ] ) )
GafferSceneUI.ContextAlgo.setExpandedPaths( context, IECore.PathMatcher( [ "/" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/" ] ) )
newLeafs = GafferSceneUI.ContextAlgo.expandDescendants( context, IECore.PathMatcher( [ "/" ] ), A["out"] )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/", "/A", "/A/B", "/A/C" ] ) )
self.assertEqual( newLeafs, IECore.PathMatcher( [ "/A/B/D", "/A/B/E", "/A/C/G", "/A/C/F" ] ) )
GafferSceneUI.ContextAlgo.setExpandedPaths( context, IECore.PathMatcher( [ "/" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/" ] ) )
newLeafs = GafferSceneUI.ContextAlgo.expandDescendants( context, IECore.PathMatcher( [ "/" ] ), A["out"], depth = 1 )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/", "/A" ] ) )
self.assertEqual( newLeafs, IECore.PathMatcher( [ "/A/B", "/A/C" ] ) )
newLeafs = GafferSceneUI.ContextAlgo.expandDescendants( context, IECore.PathMatcher( [ "/A/C" ] ), A["out"], depth = 1 )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( context ), IECore.PathMatcher( [ "/", "/A", "/A/C" ] ) )
self.assertEqual( newLeafs, IECore.PathMatcher( [ "/A/C/G", "/A/C/F" ] ) )
def testSelectedPaths( self ) :
context = Gaffer.Context()
GafferSceneUI.ContextAlgo.setSelectedPaths( context, IECore.PathMatcher( [ "/" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getSelectedPaths( context ), IECore.PathMatcher( [ "/" ] ) )
GafferSceneUI.ContextAlgo.setSelectedPaths( context, IECore.PathMatcher( [ "/", "/A" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getSelectedPaths( context ), IECore.PathMatcher( [ "/", "/A" ] ) )
GafferSceneUI.ContextAlgo.setSelectedPaths( context, IECore.PathMatcher( [ "/", "/A", "/A/C" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getSelectedPaths( context ), IECore.PathMatcher( [ "/", "/A", "/A/C" ] ) )
GafferSceneUI.ContextAlgo.setSelectedPaths( context, IECore.PathMatcher( [ "/A/C", "/A/B/D" ] ) )
self.assertEqual( GafferSceneUI.ContextAlgo.getSelectedPaths( context ), IECore.PathMatcher( [ "/A/C", "/A/B/D" ] ) )
def testAffectsExpandedPaths( self ) :
c = Gaffer.Context()
cs = GafferTest.CapturingSlot( c.changedSignal() )
GafferSceneUI.ContextAlgo.setExpandedPaths( c, IECore.PathMatcher( [ "/A" ] ) )
self.assertEqual( len( cs ), 1 )
self.assertTrue( GafferSceneUI.ContextAlgo.affectsExpandedPaths( cs[0][1] ) )
self.assertFalse( GafferSceneUI.ContextAlgo.affectsExpandedPaths( "frame" ) )
def testAffectsSelectedPaths( self ) :
c = Gaffer.Context()
cs = GafferTest.CapturingSlot( c.changedSignal() )
GafferSceneUI.ContextAlgo.setSelectedPaths( c, IECore.PathMatcher( [ "/A" ] ) )
self.assertEqual( len( cs ), 2 )
self.assertTrue( GafferSceneUI.ContextAlgo.affectsSelectedPaths( cs[0][1] ) )
self.assertFalse( GafferSceneUI.ContextAlgo.affectsSelectedPaths( "frame" ) )
def testSelectionIsCopied( self ) :
c = Gaffer.Context()
s = IECore.PathMatcher( [ "/a" ] )
GafferSceneUI.ContextAlgo.setSelectedPaths( c, s )
self.assertEqual( GafferSceneUI.ContextAlgo.getSelectedPaths( c ), s )
s.addPath( "/a/b" )
self.assertNotEqual( GafferSceneUI.ContextAlgo.getSelectedPaths( c ), s )
s = GafferSceneUI.ContextAlgo.getSelectedPaths( c )
self.assertEqual( GafferSceneUI.ContextAlgo.getSelectedPaths( c ), s )
s.addPath( "/a/b" )
self.assertNotEqual( GafferSceneUI.ContextAlgo.getSelectedPaths( c ), s )
def testExpansionIsCopied( self ) :
c = Gaffer.Context()
e = IECore.PathMatcher( [ "/a" ] )
GafferSceneUI.ContextAlgo.setExpandedPaths( c, e )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( c ), e )
e.addPath( "/a/b" )
self.assertNotEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( c ), e )
e = GafferSceneUI.ContextAlgo.getExpandedPaths( c )
self.assertEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( c ), e )
e.addPath( "/a/b" )
self.assertNotEqual( GafferSceneUI.ContextAlgo.getExpandedPaths( c ), e )
def testLastSelectedPath( self ) :
c = Gaffer.Context()
self.assertEqual( GafferSceneUI.ContextAlgo.getLastSelectedPath( c ), "" )
s = IECore.PathMatcher( [ "/a", "/b" ] )
GafferSceneUI.ContextAlgo.setSelectedPaths( c, s )
self.assertTrue( s.match( GafferSceneUI.ContextAlgo.getLastSelectedPath( c ) ) & s.Result.ExactMatch )
GafferSceneUI.ContextAlgo.setLastSelectedPath( c, "/c" )
self.assertEqual( GafferSceneUI.ContextAlgo.getLastSelectedPath( c ), "/c" )
s = GafferSceneUI.ContextAlgo.getSelectedPaths( c )
self.assertEqual( s, IECore.PathMatcher( [ "/a", "/b", "/c" ] ) )
GafferSceneUI.ContextAlgo.setSelectedPaths( c, IECore.PathMatcher() )
self.assertEqual( GafferSceneUI.ContextAlgo.getLastSelectedPath( c ), "" )
if __name__ == "__main__":
unittest.main()
|
|
"""Websocket API for Z-Wave JS."""
from __future__ import annotations
import dataclasses
from functools import partial, wraps
import json
from typing import Any, Callable
from aiohttp import hdrs, web, web_exceptions, web_request
import voluptuous as vol
from zwave_js_server import dump
from zwave_js_server.client import Client
from zwave_js_server.const import CommandClass, InclusionStrategy, LogLevel
from zwave_js_server.exceptions import (
BaseZwaveJSServerError,
FailedCommand,
InvalidNewValue,
NotFoundError,
SetValueFailed,
)
from zwave_js_server.firmware import begin_firmware_update
from zwave_js_server.model.controller import ControllerStatistics
from zwave_js_server.model.firmware import (
FirmwareUpdateFinished,
FirmwareUpdateProgress,
)
from zwave_js_server.model.log_config import LogConfig
from zwave_js_server.model.log_message import LogMessage
from zwave_js_server.model.node import Node, NodeStatistics
from zwave_js_server.util.node import async_set_config_parameter
from homeassistant.components import websocket_api
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.websocket_api.connection import ActiveConnection
from homeassistant.components.websocket_api.const import (
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
ERR_UNKNOWN_ERROR,
)
from homeassistant.config_entries import ConfigEntry, ConfigEntryState
from homeassistant.const import CONF_URL
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import Unauthorized
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
CONF_DATA_COLLECTION_OPTED_IN,
DATA_CLIENT,
DOMAIN,
EVENT_DEVICE_ADDED_TO_REGISTRY,
)
from .helpers import async_enable_statistics, update_data_collection_preference
from .services import BITMASK_SCHEMA
DATA_UNSUBSCRIBE = "unsubs"
# general API constants
ID = "id"
ENTRY_ID = "entry_id"
ERR_NOT_LOADED = "not_loaded"
NODE_ID = "node_id"
COMMAND_CLASS_ID = "command_class_id"
TYPE = "type"
PROPERTY = "property"
PROPERTY_KEY = "property_key"
VALUE = "value"
SECURE = "secure"
# constants for log config commands
CONFIG = "config"
LEVEL = "level"
LOG_TO_FILE = "log_to_file"
FILENAME = "filename"
ENABLED = "enabled"
FORCE_CONSOLE = "force_console"
# constants for setting config parameters
VALUE_ID = "value_id"
STATUS = "status"
# constants for data collection
ENABLED = "enabled"
OPTED_IN = "opted_in"
def async_get_entry(orig_func: Callable) -> Callable:
"""Decorate async function to get entry."""
@wraps(orig_func)
async def async_get_entry_func(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
) -> None:
"""Provide user specific data and store to function."""
entry_id = msg[ENTRY_ID]
entry = hass.config_entries.async_get_entry(entry_id)
if entry is None:
connection.send_error(
msg[ID], ERR_NOT_FOUND, f"Config entry {entry_id} not found"
)
return
if entry.state is not ConfigEntryState.LOADED:
connection.send_error(
msg[ID], ERR_NOT_LOADED, f"Config entry {entry_id} not loaded"
)
return
client = hass.data[DOMAIN][entry_id][DATA_CLIENT]
await orig_func(hass, connection, msg, entry, client)
return async_get_entry_func
def async_get_node(orig_func: Callable) -> Callable:
"""Decorate async function to get node."""
@async_get_entry
@wraps(orig_func)
async def async_get_node_func(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Provide user specific data and store to function."""
node_id = msg[NODE_ID]
node = client.driver.controller.nodes.get(node_id)
if node is None:
connection.send_error(msg[ID], ERR_NOT_FOUND, f"Node {node_id} not found")
return
await orig_func(hass, connection, msg, node)
return async_get_node_func
def async_handle_failed_command(orig_func: Callable) -> Callable:
"""Decorate async function to handle FailedCommand and send relevant error."""
@wraps(orig_func)
async def async_handle_failed_command_func(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
*args: Any,
**kwargs: Any,
) -> None:
"""Handle FailedCommand within function and send relevant error."""
try:
await orig_func(hass, connection, msg, *args, **kwargs)
except FailedCommand as err:
# Unsubscribe to callbacks
if unsubs := msg.get(DATA_UNSUBSCRIBE):
for unsub in unsubs:
unsub()
connection.send_error(msg[ID], err.error_code, err.args[0])
return async_handle_failed_command_func
@callback
def async_register_api(hass: HomeAssistant) -> None:
"""Register all of our api endpoints."""
websocket_api.async_register_command(hass, websocket_network_status)
websocket_api.async_register_command(hass, websocket_node_status)
websocket_api.async_register_command(hass, websocket_node_state)
websocket_api.async_register_command(hass, websocket_node_metadata)
websocket_api.async_register_command(hass, websocket_ping_node)
websocket_api.async_register_command(hass, websocket_add_node)
websocket_api.async_register_command(hass, websocket_stop_inclusion)
websocket_api.async_register_command(hass, websocket_stop_exclusion)
websocket_api.async_register_command(hass, websocket_remove_node)
websocket_api.async_register_command(hass, websocket_remove_failed_node)
websocket_api.async_register_command(hass, websocket_replace_failed_node)
websocket_api.async_register_command(hass, websocket_begin_healing_network)
websocket_api.async_register_command(
hass, websocket_subscribe_heal_network_progress
)
websocket_api.async_register_command(hass, websocket_stop_healing_network)
websocket_api.async_register_command(hass, websocket_refresh_node_info)
websocket_api.async_register_command(hass, websocket_refresh_node_values)
websocket_api.async_register_command(hass, websocket_refresh_node_cc_values)
websocket_api.async_register_command(hass, websocket_heal_node)
websocket_api.async_register_command(hass, websocket_set_config_parameter)
websocket_api.async_register_command(hass, websocket_get_config_parameters)
websocket_api.async_register_command(hass, websocket_subscribe_log_updates)
websocket_api.async_register_command(hass, websocket_update_log_config)
websocket_api.async_register_command(hass, websocket_get_log_config)
websocket_api.async_register_command(
hass, websocket_update_data_collection_preference
)
websocket_api.async_register_command(hass, websocket_data_collection_status)
websocket_api.async_register_command(hass, websocket_version_info)
websocket_api.async_register_command(hass, websocket_abort_firmware_update)
websocket_api.async_register_command(
hass, websocket_subscribe_firmware_update_status
)
websocket_api.async_register_command(hass, websocket_check_for_config_updates)
websocket_api.async_register_command(hass, websocket_install_config_update)
websocket_api.async_register_command(
hass, websocket_subscribe_controller_statistics
)
websocket_api.async_register_command(hass, websocket_subscribe_node_statistics)
hass.http.register_view(DumpView())
hass.http.register_view(FirmwareUploadView())
@websocket_api.require_admin
@websocket_api.websocket_command(
{vol.Required(TYPE): "zwave_js/network_status", vol.Required(ENTRY_ID): str}
)
@websocket_api.async_response
@async_get_entry
async def websocket_network_status(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Get the status of the Z-Wave JS network."""
controller = client.driver.controller
data = {
"client": {
"ws_server_url": client.ws_server_url,
"state": "connected" if client.connected else "disconnected",
"driver_version": client.version.driver_version,
"server_version": client.version.server_version,
},
"controller": {
"home_id": controller.home_id,
"library_version": controller.library_version,
"type": controller.controller_type,
"own_node_id": controller.own_node_id,
"is_secondary": controller.is_secondary,
"is_using_home_id_from_other_network": controller.is_using_home_id_from_other_network,
"is_sis_present": controller.is_SIS_present,
"was_real_primary": controller.was_real_primary,
"is_static_update_controller": controller.is_static_update_controller,
"is_slave": controller.is_slave,
"serial_api_version": controller.serial_api_version,
"manufacturer_id": controller.manufacturer_id,
"product_id": controller.product_id,
"product_type": controller.product_type,
"supported_function_types": controller.supported_function_types,
"suc_node_id": controller.suc_node_id,
"supports_timers": controller.supports_timers,
"is_heal_network_active": controller.is_heal_network_active,
"nodes": list(client.driver.controller.nodes),
},
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/node_status",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_node_status(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Get the status of a Z-Wave JS node."""
data = {
"node_id": node.node_id,
"is_routing": node.is_routing,
"status": node.status,
"is_secure": node.is_secure,
"ready": node.ready,
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/node_state",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_node_state(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Get the state data of a Z-Wave JS node."""
connection.send_result(
msg[ID],
node.data,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/node_metadata",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_node_metadata(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Get the metadata of a Z-Wave JS node."""
data = {
"node_id": node.node_id,
"exclusion": node.device_config.metadata.exclusion,
"inclusion": node.device_config.metadata.inclusion,
"manual": node.device_config.metadata.manual,
"wakeup": node.device_config.metadata.wakeup,
"reset": node.device_config.metadata.reset,
"device_database_url": node.device_database_url,
}
connection.send_result(
msg[ID],
data,
)
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/ping_node",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_ping_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Ping a Z-Wave JS node."""
result = await node.async_ping()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/add_node",
vol.Required(ENTRY_ID): str,
vol.Optional(SECURE, default=False): bool,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_add_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Add a node to the Z-Wave network."""
controller = client.driver.controller
if msg[SECURE]:
inclusion_strategy = InclusionStrategy.SECURITY_S0
else:
inclusion_strategy = InclusionStrategy.INSECURE
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def forward_stage(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "stage": event["stageName"]}
)
)
@callback
def node_added(event: dict) -> None:
node = event["node"]
interview_unsubs = [
node.on("interview started", forward_event),
node.on("interview completed", forward_event),
node.on("interview stage completed", forward_stage),
node.on("interview failed", forward_event),
]
unsubs.extend(interview_unsubs)
node_details = {
"node_id": node.node_id,
"status": node.status,
"ready": node.ready,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node added", "node": node_details}
)
)
@callback
def device_registered(device: DeviceEntry) -> None:
device_details = {
"name": device.name,
"id": device.id,
"manufacturer": device.manufacturer,
"model": device.model,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "device registered", "device": device_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("inclusion started", forward_event),
controller.on("inclusion failed", forward_event),
controller.on("inclusion stopped", forward_event),
controller.on("node added", node_added),
async_dispatcher_connect(
hass, EVENT_DEVICE_ADDED_TO_REGISTRY, device_registered
),
]
result = await controller.async_begin_inclusion(inclusion_strategy)
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_inclusion",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_stop_inclusion(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Cancel adding a node to the Z-Wave network."""
controller = client.driver.controller
result = await controller.async_stop_inclusion()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_exclusion",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_stop_exclusion(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Cancel removing a node from the Z-Wave network."""
controller = client.driver.controller
result = await controller.async_stop_exclusion()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/remove_node",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_remove_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Remove a node from the Z-Wave network."""
controller = client.driver.controller
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def node_removed(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node removed", "node": node_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("exclusion started", forward_event),
controller.on("exclusion failed", forward_event),
controller.on("exclusion stopped", forward_event),
controller.on("node removed", node_removed),
]
result = await controller.async_begin_exclusion()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/replace_failed_node",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
vol.Optional(SECURE, default=False): bool,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_replace_failed_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Replace a failed node with a new node."""
controller = client.driver.controller
node_id = msg[NODE_ID]
if msg[SECURE]:
inclusion_strategy = InclusionStrategy.SECURITY_S0
else:
inclusion_strategy = InclusionStrategy.INSECURE
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def forward_stage(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "stage": event["stageName"]}
)
)
@callback
def node_added(event: dict) -> None:
node = event["node"]
interview_unsubs = [
node.on("interview started", forward_event),
node.on("interview completed", forward_event),
node.on("interview stage completed", forward_stage),
node.on("interview failed", forward_event),
]
unsubs.extend(interview_unsubs)
node_details = {
"node_id": node.node_id,
"status": node.status,
"ready": node.ready,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node added", "node": node_details}
)
)
@callback
def node_removed(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node removed", "node": node_details}
)
)
@callback
def device_registered(device: DeviceEntry) -> None:
device_details = {
"name": device.name,
"id": device.id,
"manufacturer": device.manufacturer,
"model": device.model,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "device registered", "device": device_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("inclusion started", forward_event),
controller.on("inclusion failed", forward_event),
controller.on("inclusion stopped", forward_event),
controller.on("node removed", node_removed),
controller.on("node added", node_added),
async_dispatcher_connect(
hass, EVENT_DEVICE_ADDED_TO_REGISTRY, device_registered
),
]
result = await controller.async_replace_failed_node(node_id, inclusion_strategy)
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/remove_failed_node",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_remove_failed_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Remove a failed node from the Z-Wave network."""
controller = client.driver.controller
node_id = msg[NODE_ID]
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def node_removed(event: dict) -> None:
node = event["node"]
node_details = {
"node_id": node.node_id,
}
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": "node removed", "node": node_details}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [controller.on("node removed", node_removed)]
result = await controller.async_remove_failed_node(node_id)
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/begin_healing_network",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_begin_healing_network(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Begin healing the Z-Wave network."""
controller = client.driver.controller
result = await controller.async_begin_healing_network()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_heal_network_progress",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_get_entry
async def websocket_subscribe_heal_network_progress(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Subscribe to heal Z-Wave network status updates."""
controller = client.driver.controller
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(key: str, event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "heal_node_status": event[key]}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("heal network progress", partial(forward_event, "progress")),
controller.on("heal network done", partial(forward_event, "result")),
]
connection.send_result(msg[ID], controller.heal_network_progress)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/stop_healing_network",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_stop_healing_network(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Stop healing the Z-Wave network."""
controller = client.driver.controller
result = await controller.async_stop_healing_network()
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/heal_node",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_heal_node(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Heal a node on the Z-Wave network."""
controller = client.driver.controller
node_id = msg[NODE_ID]
result = await controller.async_heal_node(node_id)
connection.send_result(
msg[ID],
result,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/refresh_node_info",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_refresh_node_info(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Re-interview a node."""
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_event(event: dict) -> None:
connection.send_message(
websocket_api.event_message(msg[ID], {"event": event["event"]})
)
@callback
def forward_stage(event: dict) -> None:
connection.send_message(
websocket_api.event_message(
msg[ID], {"event": event["event"], "stage": event["stageName"]}
)
)
connection.subscriptions[msg["id"]] = async_cleanup
msg[DATA_UNSUBSCRIBE] = unsubs = [
node.on("interview started", forward_event),
node.on("interview completed", forward_event),
node.on("interview stage completed", forward_stage),
node.on("interview failed", forward_event),
]
result = await node.async_refresh_info()
connection.send_result(msg[ID], result)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/refresh_node_values",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_refresh_node_values(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Refresh node values."""
await node.async_refresh_values()
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/refresh_node_cc_values",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
vol.Required(COMMAND_CLASS_ID): int,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_refresh_node_cc_values(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Refresh node values for a particular CommandClass."""
command_class_id = msg[COMMAND_CLASS_ID]
try:
command_class = CommandClass(command_class_id)
except ValueError:
connection.send_error(
msg[ID], ERR_NOT_FOUND, f"Command class {command_class_id} not found"
)
return
await node.async_refresh_cc_values(command_class)
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/set_config_parameter",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
vol.Required(PROPERTY): int,
vol.Optional(PROPERTY_KEY): int,
vol.Required(VALUE): vol.Any(int, BITMASK_SCHEMA),
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_set_config_parameter(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Set a config parameter value for a Z-Wave node."""
property_ = msg[PROPERTY]
property_key = msg.get(PROPERTY_KEY)
value = msg[VALUE]
try:
zwave_value, cmd_status = await async_set_config_parameter(
node, value, property_, property_key=property_key
)
except (InvalidNewValue, NotFoundError, NotImplementedError, SetValueFailed) as err:
code = ERR_UNKNOWN_ERROR
if isinstance(err, NotFoundError):
code = ERR_NOT_FOUND
elif isinstance(err, (InvalidNewValue, NotImplementedError)):
code = ERR_NOT_SUPPORTED
connection.send_error(
msg[ID],
code,
str(err),
)
return
connection.send_result(
msg[ID],
{
VALUE_ID: zwave_value.value_id,
STATUS: cmd_status,
},
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/get_config_parameters",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_get_config_parameters(
hass: HomeAssistant, connection: ActiveConnection, msg: dict, node: Node
) -> None:
"""Get a list of configuration parameters for a Z-Wave node."""
values = node.get_configuration_values()
result = {}
for value_id, zwave_value in values.items():
metadata = zwave_value.metadata
result[value_id] = {
"property": zwave_value.property_,
"property_key": zwave_value.property_key,
"configuration_value_type": zwave_value.configuration_value_type.value,
"metadata": {
"description": metadata.description,
"label": metadata.label,
"type": metadata.type,
"min": metadata.min,
"max": metadata.max,
"unit": metadata.unit,
"writeable": metadata.writeable,
"readable": metadata.readable,
},
"value": zwave_value.value,
}
if zwave_value.metadata.states:
result[value_id]["metadata"]["states"] = zwave_value.metadata.states
connection.send_result(
msg[ID],
result,
)
def filename_is_present_if_logging_to_file(obj: dict) -> dict:
"""Validate that filename is provided if log_to_file is True."""
if obj.get(LOG_TO_FILE, False) and FILENAME not in obj:
raise vol.Invalid("`filename` must be provided if logging to file")
return obj
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_log_updates",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_subscribe_log_updates(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Subscribe to log message events from the server."""
driver = client.driver
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
hass.async_create_task(driver.async_stop_listening_logs())
for unsub in unsubs:
unsub()
@callback
def log_messages(event: dict) -> None:
log_msg: LogMessage = event["log_message"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"type": "log_message",
"log_message": {
"timestamp": log_msg.timestamp,
"level": log_msg.level,
"primary_tags": log_msg.primary_tags,
"message": log_msg.formatted_message,
},
},
)
)
@callback
def log_config_updates(event: dict) -> None:
log_config: LogConfig = event["log_config"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"type": "log_config",
"log_config": dataclasses.asdict(log_config),
},
)
)
msg[DATA_UNSUBSCRIBE] = unsubs = [
driver.on("logging", log_messages),
driver.on("log config updated", log_config_updates),
]
connection.subscriptions[msg["id"]] = async_cleanup
await driver.async_start_listening_logs()
connection.send_result(msg[ID])
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/update_log_config",
vol.Required(ENTRY_ID): str,
vol.Required(CONFIG): vol.All(
vol.Schema(
{
vol.Optional(ENABLED): cv.boolean,
vol.Optional(LEVEL): vol.All(
cv.string,
vol.Lower,
vol.In([log_level.value for log_level in LogLevel]),
lambda val: LogLevel(val), # pylint: disable=unnecessary-lambda
),
vol.Optional(LOG_TO_FILE): cv.boolean,
vol.Optional(FILENAME): cv.string,
vol.Optional(FORCE_CONSOLE): cv.boolean,
}
),
cv.has_at_least_one_key(
ENABLED, FILENAME, FORCE_CONSOLE, LEVEL, LOG_TO_FILE
),
filename_is_present_if_logging_to_file,
),
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_update_log_config(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Update the driver log config."""
await client.driver.async_update_log_config(LogConfig(**msg[CONFIG]))
connection.send_result(
msg[ID],
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/get_log_config",
vol.Required(ENTRY_ID): str,
},
)
@websocket_api.async_response
@async_get_entry
async def websocket_get_log_config(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Get log configuration for the Z-Wave JS driver."""
connection.send_result(
msg[ID],
dataclasses.asdict(client.driver.log_config),
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/update_data_collection_preference",
vol.Required(ENTRY_ID): str,
vol.Required(OPTED_IN): bool,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_update_data_collection_preference(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Update preference for data collection and enable/disable collection."""
opted_in = msg[OPTED_IN]
update_data_collection_preference(hass, entry, opted_in)
if opted_in:
await async_enable_statistics(client)
else:
await client.driver.async_disable_statistics()
connection.send_result(
msg[ID],
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/data_collection_status",
vol.Required(ENTRY_ID): str,
},
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_data_collection_status(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Return data collection preference and status."""
result = {
OPTED_IN: entry.data.get(CONF_DATA_COLLECTION_OPTED_IN),
ENABLED: await client.driver.async_is_statistics_enabled(),
}
connection.send_result(msg[ID], result)
class DumpView(HomeAssistantView):
"""View to dump the state of the Z-Wave JS server."""
url = "/api/zwave_js/dump/{config_entry_id}"
name = "api:zwave_js:dump"
async def get(self, request: web.Request, config_entry_id: str) -> web.Response:
"""Dump the state of Z-Wave."""
if not request["hass_user"].is_admin:
raise Unauthorized()
hass = request.app["hass"]
if config_entry_id not in hass.data[DOMAIN]:
raise web_exceptions.HTTPBadRequest
entry = hass.config_entries.async_get_entry(config_entry_id)
msgs = await dump.dump_msgs(entry.data[CONF_URL], async_get_clientsession(hass))
return web.Response(
body=json.dumps(msgs, indent=2) + "\n",
headers={
hdrs.CONTENT_TYPE: "application/json",
hdrs.CONTENT_DISPOSITION: 'attachment; filename="zwave_js_dump.json"',
},
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/version_info",
vol.Required(ENTRY_ID): str,
},
)
@websocket_api.async_response
@async_get_entry
async def websocket_version_info(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Get version info from the Z-Wave JS server."""
version_info = {
"driver_version": client.version.driver_version,
"server_version": client.version.server_version,
"min_schema_version": client.version.min_schema_version,
"max_schema_version": client.version.max_schema_version,
}
connection.send_result(
msg[ID],
version_info,
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/abort_firmware_update",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_node
async def websocket_abort_firmware_update(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Abort a firmware update."""
await node.async_abort_firmware_update()
connection.send_result(msg[ID])
def _get_firmware_update_progress_dict(
progress: FirmwareUpdateProgress,
) -> dict[str, int]:
"""Get a dictionary of firmware update progress."""
return {
"sent_fragments": progress.sent_fragments,
"total_fragments": progress.total_fragments,
}
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_firmware_update_status",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_subscribe_firmware_update_status(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Subscribe to the status of a firmware update."""
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_progress(event: dict) -> None:
progress: FirmwareUpdateProgress = event["firmware_update_progress"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
**_get_firmware_update_progress_dict(progress),
},
)
)
@callback
def forward_finished(event: dict) -> None:
finished: FirmwareUpdateFinished = event["firmware_update_finished"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
"status": finished.status,
"wait_time": finished.wait_time,
},
)
)
msg[DATA_UNSUBSCRIBE] = unsubs = [
node.on("firmware update progress", forward_progress),
node.on("firmware update finished", forward_finished),
]
connection.subscriptions[msg["id"]] = async_cleanup
progress = node.firmware_update_progress
connection.send_result(
msg[ID], _get_firmware_update_progress_dict(progress) if progress else None
)
class FirmwareUploadView(HomeAssistantView):
"""View to upload firmware."""
url = r"/api/zwave_js/firmware/upload/{config_entry_id}/{node_id:\d+}"
name = "api:zwave_js:firmware:upload"
async def post(
self, request: web.Request, config_entry_id: str, node_id: str
) -> web.Response:
"""Handle upload."""
if not request["hass_user"].is_admin:
raise Unauthorized()
hass = request.app["hass"]
if config_entry_id not in hass.data[DOMAIN]:
raise web_exceptions.HTTPBadRequest
entry = hass.config_entries.async_get_entry(config_entry_id)
client: Client = hass.data[DOMAIN][config_entry_id][DATA_CLIENT]
node = client.driver.controller.nodes.get(int(node_id))
if not node:
raise web_exceptions.HTTPNotFound
# Increase max payload
request._client_max_size = 1024 * 1024 * 10 # pylint: disable=protected-access
data = await request.post()
if "file" not in data or not isinstance(data["file"], web_request.FileField):
raise web_exceptions.HTTPBadRequest
uploaded_file: web_request.FileField = data["file"]
try:
await begin_firmware_update(
entry.data[CONF_URL],
node,
uploaded_file.filename,
await hass.async_add_executor_job(uploaded_file.file.read),
async_get_clientsession(hass),
)
except BaseZwaveJSServerError as err:
raise web_exceptions.HTTPBadRequest from err
return self.json(None)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/check_for_config_updates",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_check_for_config_updates(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Check for config updates."""
config_update = await client.driver.async_check_for_config_updates()
connection.send_result(
msg[ID],
{
"update_available": config_update.update_available,
"new_version": config_update.new_version,
},
)
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/install_config_update",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_handle_failed_command
@async_get_entry
async def websocket_install_config_update(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Check for config updates."""
success = await client.driver.async_install_config_update()
connection.send_result(msg[ID], success)
def _get_controller_statistics_dict(
statistics: ControllerStatistics,
) -> dict[str, int]:
"""Get dictionary of controller statistics."""
return {
"messages_tx": statistics.messages_tx,
"messages_rx": statistics.messages_rx,
"messages_dropped_tx": statistics.messages_dropped_tx,
"messages_dropped_rx": statistics.messages_dropped_rx,
"nak": statistics.nak,
"can": statistics.can,
"timeout_ack": statistics.timeout_ack,
"timout_response": statistics.timeout_response,
"timeout_callback": statistics.timeout_callback,
}
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_controller_statistics",
vol.Required(ENTRY_ID): str,
}
)
@websocket_api.async_response
@async_get_entry
async def websocket_subscribe_controller_statistics(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
entry: ConfigEntry,
client: Client,
) -> None:
"""Subsribe to the statistics updates for a controller."""
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_stats(event: dict) -> None:
statistics: ControllerStatistics = event["statistics_updated"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
"source": "controller",
**_get_controller_statistics_dict(statistics),
},
)
)
controller = client.driver.controller
msg[DATA_UNSUBSCRIBE] = unsubs = [
controller.on("statistics updated", forward_stats)
]
connection.subscriptions[msg["id"]] = async_cleanup
connection.send_result(
msg[ID], _get_controller_statistics_dict(controller.statistics)
)
def _get_node_statistics_dict(statistics: NodeStatistics) -> dict[str, int]:
"""Get dictionary of node statistics."""
return {
"commands_tx": statistics.commands_tx,
"commands_rx": statistics.commands_rx,
"commands_dropped_tx": statistics.commands_dropped_tx,
"commands_dropped_rx": statistics.commands_dropped_rx,
"timeout_response": statistics.timeout_response,
}
@websocket_api.require_admin
@websocket_api.websocket_command(
{
vol.Required(TYPE): "zwave_js/subscribe_node_statistics",
vol.Required(ENTRY_ID): str,
vol.Required(NODE_ID): int,
}
)
@websocket_api.async_response
@async_get_node
async def websocket_subscribe_node_statistics(
hass: HomeAssistant,
connection: ActiveConnection,
msg: dict,
node: Node,
) -> None:
"""Subsribe to the statistics updates for a node."""
@callback
def async_cleanup() -> None:
"""Remove signal listeners."""
for unsub in unsubs:
unsub()
@callback
def forward_stats(event: dict) -> None:
statistics: NodeStatistics = event["statistics_updated"]
connection.send_message(
websocket_api.event_message(
msg[ID],
{
"event": event["event"],
"source": "node",
"node_id": node.node_id,
**_get_node_statistics_dict(statistics),
},
)
)
msg[DATA_UNSUBSCRIBE] = unsubs = [node.on("statistics updated", forward_stats)]
connection.subscriptions[msg["id"]] = async_cleanup
connection.send_result(msg[ID], _get_node_statistics_dict(node.statistics))
|
|
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of setmeta command for setting cloud object metadata."""
from __future__ import absolute_import
import time
from apitools.base.py import encoding
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import Preconditions
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.name_expansion import NameExpansionIterator
from gslib.name_expansion import SeekAheadNameExpansionIterator
from gslib.parallelism_framework_util import PutToQueueWithTimeout
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.thread_message import MetadataMessage
from gslib.translation_helper import CopyObjectMetadata
from gslib.translation_helper import ObjectMetadataFromHeaders
from gslib.translation_helper import PreconditionsFromHeaders
from gslib.util import GetCloudApiInstance
from gslib.util import InsistAsciiHeader
from gslib.util import InsistAsciiHeaderValue
from gslib.util import IsCustomMetadataHeader
from gslib.util import NO_MAX
from gslib.util import Retry
_SYNOPSIS = """
gsutil setmeta -h [header:value|header] ... url...
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The gsutil setmeta command allows you to set or remove the metadata on one
or more objects. It takes one or more header arguments followed by one or
more URLs, where each header argument is in one of two forms:
- if you specify header:value, it will set the given header on all
named objects.
- if you specify header (with no value), it will remove the given header
from all named objects.
For example, the following command would set the Content-Type and
Cache-Control and remove the Content-Disposition on the specified objects:
gsutil setmeta -h "Content-Type:text/html" \\
-h "Cache-Control:public, max-age=3600" \\
-h "Content-Disposition" gs://bucket/*.html
If you have a large number of objects to update you might want to use the
gsutil -m option, to perform a parallel (multi-threaded/multi-processing)
update:
gsutil -m setmeta -h "Content-Type:text/html" \\
-h "Cache-Control:public, max-age=3600" \\
-h "Content-Disposition" gs://bucket/*.html
You can also use the setmeta command to set custom metadata on an object:
gsutil setmeta -h "x-goog-meta-icecreamflavor:vanilla" gs://bucket/object
See "gsutil help metadata" for details about how you can set metadata
while uploading objects, what metadata fields can be set and the meaning of
these fields, use of custom metadata, and how to view currently set metadata.
NOTE: By default, publicly readable objects are served with a Cache-Control
header allowing such objects to be cached for 3600 seconds. For more details
about this default behavior see the CACHE-CONTROL section of
"gsutil help metadata". If you need to ensure that updates become visible
immediately, you should set a Cache-Control header of "Cache-Control:private,
max-age=0, no-transform" on such objects. You can do this with the command:
gsutil setmeta -h "Content-Type:text/html" \\
-h "Cache-Control:private, max-age=0, no-transform" gs://bucket/*.html
The setmeta command reads each object's current generation and metageneration
and uses those as preconditions unless they are otherwise specified by
top-level arguments. For example:
gsutil -h "x-goog-if-metageneration-match:2" setmeta
-h "x-goog-meta-icecreamflavor:vanilla"
will set the icecreamflavor:vanilla metadata if the current live object has a
metageneration of 2.
<B>OPTIONS</B>
-h Specifies a header:value to be added, or header to be removed,
from each named object.
""")
# Setmeta assumes a header-like model which doesn't line up with the JSON way
# of doing things. This list comes from functionality that was supported by
# gsutil3 at the time gsutil4 was released.
SETTABLE_FIELDS = ['cache-control', 'content-disposition',
'content-encoding', 'content-language',
'content-type']
def _SetMetadataExceptionHandler(cls, e):
"""Exception handler that maintains state about post-completion status."""
cls.logger.error(e)
cls.everything_set_okay = False
def _SetMetadataFuncWrapper(cls, name_expansion_result, thread_state=None):
cls.SetMetadataFunc(name_expansion_result, thread_state=thread_state)
class SetMetaCommand(Command):
"""Implementation of gsutil setmeta command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'setmeta',
command_name_aliases=['setheader'],
usage_synopsis=_SYNOPSIS,
min_args=1,
max_args=NO_MAX,
supported_sub_args='h:rR',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='setmeta',
help_name_aliases=['setheader'],
help_type='command_help',
help_one_line_summary='Set metadata on already uploaded objects',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def RunCommand(self):
"""Command entry point for the setmeta command."""
headers = []
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-h':
if 'x-goog-acl' in a or 'x-amz-acl' in a:
raise CommandException(
'gsutil setmeta no longer allows canned ACLs. Use gsutil acl '
'set ... to set canned ACLs.')
headers.append(a)
(metadata_minus, metadata_plus) = self._ParseMetadataHeaders(headers)
self.metadata_change = metadata_plus
for header in metadata_minus:
self.metadata_change[header] = ''
if len(self.args) == 1 and not self.recursion_requested:
url = StorageUrlFromString(self.args[0])
if not (url.IsCloudUrl() and url.IsObject()):
raise CommandException('URL (%s) must name an object' % self.args[0])
# Used to track if any objects' metadata failed to be set.
self.everything_set_okay = True
self.preconditions = PreconditionsFromHeaders(self.headers)
name_expansion_iterator = NameExpansionIterator(
self.command_name, self.debug, self.logger, self.gsutil_api,
self.args, self.recursion_requested, all_versions=self.all_versions,
continue_on_error=self.parallel_operations,
bucket_listing_fields=['generation', 'metadata', 'metageneration'])
seek_ahead_iterator = SeekAheadNameExpansionIterator(
self.command_name, self.debug, self.GetSeekAheadGsutilApi(),
self.args, self.recursion_requested,
all_versions=self.all_versions, project_id=self.project_id)
try:
# Perform requests in parallel (-m) mode, if requested, using
# configured number of parallel processes and threads. Otherwise,
# perform requests with sequential function calls in current process.
self.Apply(_SetMetadataFuncWrapper, name_expansion_iterator,
_SetMetadataExceptionHandler, fail_on_error=True,
seek_ahead_iterator=seek_ahead_iterator)
except AccessDeniedException as e:
if e.status == 403:
self._WarnServiceAccounts()
raise
if not self.everything_set_okay:
raise CommandException('Metadata for some objects could not be set.')
return 0
@Retry(PreconditionException, tries=3, timeout_secs=1)
def SetMetadataFunc(self, name_expansion_result, thread_state=None):
"""Sets metadata on an object.
Args:
name_expansion_result: NameExpansionResult describing target object.
thread_state: gsutil Cloud API instance to use for the operation.
"""
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
exp_src_url = name_expansion_result.expanded_storage_url
self.logger.info('Setting metadata on %s...', exp_src_url)
cloud_obj_metadata = encoding.JsonToMessage(
apitools_messages.Object, name_expansion_result.expanded_result)
preconditions = Preconditions(
gen_match=self.preconditions.gen_match,
meta_gen_match=self.preconditions.meta_gen_match)
if preconditions.gen_match is None:
preconditions.gen_match = cloud_obj_metadata.generation
if preconditions.meta_gen_match is None:
preconditions.meta_gen_match = cloud_obj_metadata.metageneration
# Patch handles the patch semantics for most metadata, but we need to
# merge the custom metadata field manually.
patch_obj_metadata = ObjectMetadataFromHeaders(self.metadata_change)
api = gsutil_api.GetApiSelector(provider=exp_src_url.scheme)
# For XML we only want to patch through custom metadata that has
# changed. For JSON we need to build the complete set.
if api == ApiSelector.XML:
pass
elif api == ApiSelector.JSON:
CopyObjectMetadata(patch_obj_metadata, cloud_obj_metadata,
override=True)
patch_obj_metadata = cloud_obj_metadata
# Patch body does not need the object generation and metageneration.
patch_obj_metadata.generation = None
patch_obj_metadata.metageneration = None
gsutil_api.PatchObjectMetadata(
exp_src_url.bucket_name, exp_src_url.object_name, patch_obj_metadata,
generation=exp_src_url.generation, preconditions=preconditions,
provider=exp_src_url.scheme, fields=['id'])
PutToQueueWithTimeout(gsutil_api.status_queue,
MetadataMessage(message_time=time.time()))
def _ParseMetadataHeaders(self, headers):
"""Validates and parses metadata changes from the headers argument.
Args:
headers: Header dict to validate and parse.
Returns:
(metadata_plus, metadata_minus): Tuple of header sets to add and remove.
"""
metadata_minus = set()
cust_metadata_minus = set()
metadata_plus = {}
cust_metadata_plus = {}
# Build a count of the keys encountered from each plus and minus arg so we
# can check for dupe field specs.
num_metadata_plus_elems = 0
num_cust_metadata_plus_elems = 0
num_metadata_minus_elems = 0
num_cust_metadata_minus_elems = 0
for md_arg in headers:
# Use partition rather than split, as we should treat all characters past
# the initial : as part of the header's value.
parts = md_arg.partition(':')
(header, _, value) = parts
InsistAsciiHeader(header)
# Translate headers to lowercase to match the casing assumed by our
# sanity-checking operations.
lowercase_header = header.lower()
# This check is overly simple; it would be stronger to check, for each
# URL argument, whether the header starts with the provider
# metadata_prefix, but here we just parse the spec once, before
# processing any of the URLs. This means we will not detect if the user
# tries to set an x-goog-meta- field on an another provider's object,
# for example.
is_custom_meta = IsCustomMetadataHeader(lowercase_header)
if not is_custom_meta and lowercase_header not in SETTABLE_FIELDS:
raise CommandException(
'Invalid or disallowed header (%s).\nOnly these fields (plus '
'x-goog-meta-* fields) can be set or unset:\n%s' % (
header, sorted(list(SETTABLE_FIELDS))))
if value:
if is_custom_meta:
# Allow non-ASCII data for custom metadata fields.
cust_metadata_plus[header] = value
num_cust_metadata_plus_elems += 1
else:
# Don't unicode encode other fields because that would perturb their
# content (e.g., adding %2F's into the middle of a Cache-Control
# value).
InsistAsciiHeaderValue(header, value)
value = str(value)
metadata_plus[lowercase_header] = value
num_metadata_plus_elems += 1
else:
if is_custom_meta:
cust_metadata_minus.add(header)
num_cust_metadata_minus_elems += 1
else:
metadata_minus.add(lowercase_header)
num_metadata_minus_elems += 1
if (num_metadata_plus_elems != len(metadata_plus)
or num_cust_metadata_plus_elems != len(cust_metadata_plus)
or num_metadata_minus_elems != len(metadata_minus)
or num_cust_metadata_minus_elems != len(cust_metadata_minus)
or metadata_minus.intersection(set(metadata_plus.keys()))):
raise CommandException('Each header must appear at most once.')
metadata_plus.update(cust_metadata_plus)
metadata_minus.update(cust_metadata_minus)
return (metadata_minus, metadata_plus)
|
|
import os
import re
from Utils.release_notes_generator import (get_release_notes_dict,
generate_release_notes_summary,
get_pack_entities,
get_pack_version_from_path,
read_and_format_release_note,
merge_version_blocks,
EMPTY_LINES_REGEX,
get_new_entity_record,
construct_entities_block,
aggregate_release_notes,
aggregate_release_notes_for_marketplace)
TEST_DATA_PATH = 'Tests/scripts/infrastructure_tests/tests_data/RN_tests_data'
VERSION = 'VERSION'
ASSET_ID = 'ASSET_ID'
class TestReadAndFormatReleaseNote:
def test_sanity(self):
"""
Given
- A release note file with 2 Integrations:
- FakePack1_Integration1
- FakePack1_Integration2
When
- Formatting a release notes file.
Then
- Ensure both integration appear in the formatted string
"""
rn_file = os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md')
formatted_text = read_and_format_release_note(rn_file)
assert 'FakePack1_Integration1' in formatted_text
assert 'FakePack1_Integration2' in formatted_text
def test_ignored_release_notes_block(self):
"""
Given
- A release note file with an Integration and a Script:
- FakePack4_Script1
- FakePack4_Integration1 - should be ignored
When
- Formatting a release notes file.
Then
- Ensure only the script appears in the formatted string
"""
rn_file = os.path.join(TEST_DATA_PATH, 'FakePack4', 'ReleaseNotes', '1_1_0.md')
formatted_text = read_and_format_release_note(rn_file)
assert 'FakePack4_Script1' in formatted_text
assert 'FakePack4_Integration1' not in formatted_text
def test_ignored_entire_release_note(self):
"""
Given
- A release note file with an Integration and a Script:
- FakePack4_Script1
- FakePack4_Integration1
When
- Formatting a release notes file.
Then
- Ensure formatted string is empty.
"""
rn_file = os.path.join(TEST_DATA_PATH, 'FakePack4', 'ReleaseNotes', '1_0_1.md')
formatted_text = read_and_format_release_note(rn_file)
assert formatted_text == ''
# pylint: disable=W0201
class TestGenerateReleaseNotesSummary:
def setup(self):
self._version = VERSION
self._asset_id = ASSET_ID
self._outfile = 'temp.md'
def test_added_pack(self):
"""
Given
- A repository of two new packs:
- FakePack3 version 1.0.0
- FakePack4 version 1.0.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the release notes summary contains two packs:
- FakePack3 with version 1.0.0
- FakePack4 with version 1.0.0
"""
new_packs_rn = {
'FakePack3': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack3')),
'FakePack4': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack4')),
}
packs_metadta_dict = {
'FakePack3': {},
'FakePack4': {}
}
rn_summary = generate_release_notes_summary(
new_packs_rn, {}, packs_metadta_dict, self._version, self._asset_id, 'temp.md')
assert '## New: FakePack3 Pack v1.0.0' in rn_summary
assert '## New: FakePack4 Pack v1.0.0' in rn_summary
def test_added_partner_pack(self):
"""
Given
- A repository of two new packs:
- FakePack3 version 1.0.0, metadata "supports" field has value "partner"
- FakePack4 version 1.0.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the release notes summary contains two packs:
- FakePack3 with version 1.0.0 and has the string "(Partner Supported)" after the version
- FakePack4 with version 1.0.0 dose not have the string "(Partner Supported)" after the version
"""
new_packs_rn = {
'FakePack3': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack3')),
'FakePack4': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack4')),
}
packs_metadta_dict = {
'FakePack3': {'support': 'partner'},
'FakePack4': {'support': 'xsoar'}
}
rn_summary = generate_release_notes_summary(
new_packs_rn, {}, packs_metadta_dict, self._version, self._asset_id, 'temp.md')
assert '## New: FakePack3 Pack v1.0.0 (Partner Supported)' in rn_summary
assert '## New: FakePack4 Pack v1.0.0' in rn_summary
assert '## New: FakePack4 Pack v1.0.0 (Partner Supported)' not in rn_summary
def test_added_contribution_pack(self):
"""
Given
- A repository of two new packs:
- FakePack3 version 1.0.0, metadata "supports" field has value "contribution"
- FakePack4 version 1.0.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the release notes summary contains two packs:
- FakePack3 with version 1.0.0 and has the string "(Community Contributed)" after the version
- FakePack4 with version 1.0.0 dose not have the string "(Community Contributed)" after the version
"""
new_packs_rn = {
'FakePack3': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack3')),
'FakePack4': get_pack_entities(os.path.join(TEST_DATA_PATH, 'FakePack4')),
}
packs_metadta_dict = {
'FakePack3': {'support': 'community'},
'FakePack4': {'support': 'xsoar'}
}
rn_summary = generate_release_notes_summary(
new_packs_rn, {}, packs_metadta_dict, self._version, self._asset_id, 'temp.md')
assert '## New: FakePack3 Pack v1.0.0 (Community Contributed)' in rn_summary
assert '## New: FakePack4 Pack v1.0.0' in rn_summary
assert '## New: FakePack4 Pack v1.0.0 (Community Contributed)' not in rn_summary
def test_two_packs(self):
"""
Given
- A repository of two packs updates and release notes:
- FakePack1 with versions 1.1.0 and 2.0.0
- FakePack2 version 1.1.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the output of get_release_notes_dict() is a valid dict of (pack_name, dict(pack_version, release_note)).
- the release notes summary contains two packs with 3 updates:
- FakePack1 with versions 1.1.0 and 2.0.0
- FakePack2 with versions 1.1.0
"""
release_notes_files = [
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '2_0_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack2', 'ReleaseNotes', '1_1_0.md'),
]
rn_dict, _ = get_release_notes_dict(release_notes_files)
packs_metadta_dict = {
'FakePack1': {},
'FakePack2': {}
}
assert '1.1.0' in rn_dict['FakePack1'].keys()
assert '2.0.0' in rn_dict['FakePack1'].keys()
assert '1.1.0' in rn_dict['FakePack2'].keys()
rn_summary = generate_release_notes_summary({}, rn_dict, packs_metadta_dict, self._version, self._asset_id, self._outfile)
assert VERSION in rn_summary and ASSET_ID in rn_summary # summary title
assert '### FakePack1 Pack v2.0.0' in rn_summary
assert '##### FakePack1_Integration1' in rn_summary
assert 'This is a fake1 minor release note.' in rn_summary
assert 'This is a fake1 major release note.' in rn_summary
assert '### FakePack2 Pack v1.1.0' in rn_summary
assert '##### FakePack2_Script1' in rn_summary
assert 'This is a fake2 major release note.' in rn_summary
def test_updated_partner_pack(self):
"""
Given
- A repository of two packs updates and release notes:
- FakePack1 with version 2.0.0 metadata "supports" field has value "partner"
- FakePack2 version 1.1.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the output of get_release_notes_dict() is a valid dict of (pack_name, dict(pack_version, release_note)).
- the release notes summary contains two packs with the flowing:
- FakePack1 with version 2.0.0 and has the string "(Partner Supported)" after the version
- FakePack2 with version 1.1.0 dose not have the string "(Partner Supported)" after the version
"""
release_notes_files = [
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '2_0_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack2', 'ReleaseNotes', '1_1_0.md'),
]
rn_dict, _ = get_release_notes_dict(release_notes_files)
packs_metadta_dict = {
'FakePack1': {'support': 'partner'},
'FakePack2': {'support': 'xsoar'}
}
assert '2.0.0' in rn_dict['FakePack1'].keys()
assert '1.1.0' in rn_dict['FakePack2'].keys()
rn_summary = generate_release_notes_summary({}, rn_dict, packs_metadta_dict, self._version, self._asset_id, self._outfile)
assert VERSION in rn_summary and ASSET_ID in rn_summary # summary title
assert '### FakePack1 Pack v2.0.0 (Partner Supported)' in rn_summary
assert '### FakePack2 Pack v1.1.0' in rn_summary
assert '### FakePack2 Pack v1.1.0 (Partner Supported)' not in rn_summary
def test_updated_community_pack(self):
"""
Given
- A repository of two packs updates and release notes:
- FakePack1 with version 2.0.0 metadata "supports" field has value "community"
- FakePack2 version 1.1.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the output of get_release_notes_dict() is a valid dict of (pack_name, dict(pack_version, release_note)).
- the release notes summary contains two packs with the following:
- FakePack1 with version 2.0.0 and has the string "(Community Supported)" after the version
- FakePack2 with version 1.1.0 DOES NOT have the string "(Community Supported)" after the version
"""
release_notes_files = [
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '2_0_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack2', 'ReleaseNotes', '1_1_0.md'),
]
rn_dict, _ = get_release_notes_dict(release_notes_files)
packs_metadta_dict = {
'FakePack1': {'support': 'community'},
'FakePack2': {'support': 'xsoar'}
}
assert '2.0.0' in rn_dict['FakePack1'].keys()
assert '1.1.0' in rn_dict['FakePack2'].keys()
rn_summary = generate_release_notes_summary({}, rn_dict, packs_metadta_dict, self._version, self._asset_id, self._outfile)
assert VERSION in rn_summary and ASSET_ID in rn_summary # summary title
assert '### FakePack1 Pack v2.0.0 (Community Contributed)' in rn_summary
assert '### FakePack2 Pack v1.1.0' in rn_summary
assert '### FakePack2 Pack v1.1.0 (Community Contributed)' not in rn_summary
def test_release_notes_summary_with_empty_lines_in_rn(self):
"""
Given
- A repository contains a FakePack3 update with ignored release notes.
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the output of get_release_notes_dict() is a dict of (pack_name, dict(pack_version, release_note)).
- empty lines (with dashes) are removed from the release notes summary.
"""
release_notes_files = [
os.path.join(TEST_DATA_PATH, 'FakePack3', 'ReleaseNotes', '1_0_1.md')
]
packs_metadta_dict = {
'FakePack3': {}
}
rn_dict, _ = get_release_notes_dict(release_notes_files)
assert '1.0.1' in rn_dict['FakePack3'].keys()
assert len(rn_dict) == 1
rn_summary = generate_release_notes_summary({}, rn_dict, packs_metadta_dict, self._version, self._asset_id, self._outfile)
print(rn_summary)
match = re.search(EMPTY_LINES_REGEX, rn_summary)
assert match is None
def test_release_notes_summary_with_ignored_rns(self):
"""
Given
- A repository of a packs update and release notes:
- FakePack4 with versions 1.0.1 and 1.1.0
When
- Generating a release notes summary file.
Then
- Ensure release notes generator creates a valid summary, by checking:
- the output of get_release_notes_dict() is a valid dict of (pack_name, dict(pack_version, release_note))
- the release notes summary contains one packs with 1 updates:
- FakePack4 version 1.1.0
- the summary does not contain release notes 1.0.1, because it is ignored.
"""
release_notes_files = [
os.path.join(TEST_DATA_PATH, 'FakePack4', 'ReleaseNotes', '1_0_1.md'),
os.path.join(TEST_DATA_PATH, 'FakePack4', 'ReleaseNotes', '1_1_0.md'),
]
packs_metadta_dict = {
'FakePack4': {}
}
rn_dict, _ = get_release_notes_dict(release_notes_files)
assert '1.1.0' in rn_dict['FakePack4'].keys()
assert len(rn_dict) == 1
rn_summary = generate_release_notes_summary({}, rn_dict, packs_metadta_dict, self._version, self._asset_id, self._outfile)
assert '### FakePack4 Pack v1.1.0' in rn_summary
assert '##### FakePack4_Script1' in rn_summary
class TestMergeVersionBlocks:
def test_aggregate_release_notes_for_marketplace(self):
"""
Given
- Two release notes files with content entity instance wrapped with ** and entity type contains spaces.
When
- Merging the two release notes files into one file.
Then
- Ensure that the content entity instance is wrapped with **.
- Ensure that the content entity type contains whitespace.
- Ensure that the content of both RN files appears in the result file.
"""
release_notes_paths = [
os.path.join(TEST_DATA_PATH, 'FakePack6', 'ReleaseNotes', '1_0_1.md'),
os.path.join(TEST_DATA_PATH, 'FakePack6', 'ReleaseNotes', '1_0_2.md'),
]
pack_versions_dict = {}
for path in release_notes_paths:
with open(path) as file_:
pack_versions_dict[get_pack_version_from_path(path)] = file_.read()
rn_block = aggregate_release_notes_for_marketplace(pack_versions_dict)
assert 'Incident Fields' in rn_block
assert '**XDR Alerts**' in rn_block
assert 'First' in rn_block
assert 'Second' in rn_block
assert rn_block.endswith('\n')
assert rn_block.startswith('\n')
def test_spaced_content_entity_and_old_format(self):
"""
Given
- Two release notes files with content entity instance wrapped with ** and entity type contains spaces.
When
- Merging the two release notes files into one file.
Then
- Ensure that the content entity instance is wrapped with **.
- Ensure that the content entity type contains whitespace.
- Ensure that the content of both RN files appears in the result file.
"""
release_notes_paths = [
os.path.join(TEST_DATA_PATH, 'FakePack6', 'ReleaseNotes', '1_0_1.md'),
os.path.join(TEST_DATA_PATH, 'FakePack6', 'ReleaseNotes', '1_0_2.md'),
]
pack_versions_dict = {}
for path in release_notes_paths:
with open(path) as file_:
pack_versions_dict[get_pack_version_from_path(path)] = file_.read()
rn_block, latest_version = merge_version_blocks(pack_versions_dict)
assert 'Incident Fields' in rn_block
assert '**XDR Alerts**' in rn_block
assert 'First' in rn_block
assert 'Second' in rn_block
assert latest_version == '1.0.2'
def test_sanity(self):
"""
Given
two changes in foreign content types
When
two pack versions that modified different items.
Then
type sections appears one after the other
"""
release_notes_paths = [
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '2_1_0.md'),
]
pack_versions_dict = {}
for path in release_notes_paths:
with open(path) as file_:
pack_versions_dict[get_pack_version_from_path(path)] = file_.read()
rn_block = aggregate_release_notes('FakePack', pack_versions_dict, {})
assert 'FakePack1_Playbook1' in rn_block
assert 'FakePack1_Playbook2' in rn_block
assert 'FakePack1_Integration1' in rn_block
assert 'FakePack1_Integration2' in rn_block
assert 'v2.1.0' in rn_block
assert 'v1.1.0' not in rn_block
def test_similiar_entities(self):
"""
Given
two changes in similar content entities
When
two pack versions that modified the same items.
Then
one integration section appears
one entity title for each one with two comments
"""
release_notes_paths = [
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '1_1_0.md'),
os.path.join(TEST_DATA_PATH, 'FakePack1', 'ReleaseNotes', '2_0_0.md'),
]
pack_versions_dict = {}
for path in release_notes_paths:
with open(path) as file_:
pack_versions_dict[get_pack_version_from_path(path)] = file_.read()
rn_block = aggregate_release_notes('FakePack', pack_versions_dict, {})
assert rn_block.count('Integrations') == 1
assert rn_block.count('FakePack1_Integration1') == 1
assert rn_block.count('FakePack1_Integration2') == 1
assert 'v2.0.0' in rn_block
assert 'v1.1.0' not in rn_block
assert 'fake1 minor' in rn_block
assert 'fake2 minor' in rn_block
assert 'fake1 major' in rn_block
assert 'fake2 major' in rn_block
def test_get_new_entity_record_integration(self):
"""
Given
fake integration path.
When
getting entity record for integration.
Then
Ensure the method is valid and returns the integration name and description.
"""
name, description = get_new_entity_record(os.path.join(TEST_DATA_PATH,
'FakePack5', 'Integrations', 'fake_integration.yml'))
assert name == 'fake_integration'
assert description == 'Use the Zoom integration manage your Zoom users and meetings'
def test_get_new_entity_record_layoutscontainer(self):
"""
Given
fake layoutscontainer path.
When
getting entity record for layoutscontainer.
Then
Ensure the method is valid and returns the layoutscontainer name and the fromversion.
"""
name, description = get_new_entity_record(os.path.join(TEST_DATA_PATH,
'FakePack5', 'Layouts', 'layoutscontainer-fake.json'))
assert name == 'layoutscontainer-fake'
assert description == '(Available from Cortex XSOAR 6.0.0)'
def test_get_new_entity_record_layout(self):
"""
Given
fake layout path.
When
getting entity record for layout.
Then
Ensure the method is valid and returns the layout name and description.
"""
name, description = get_new_entity_record(os.path.join(TEST_DATA_PATH,
'FakePack5', 'Layouts', 'fake_layout.json'))
assert name == 'Fake layout - Close'
assert description == ''
def test_get_new_entity_record_classifier(self):
"""
Given
fake classifier path.
When
getting entity record for classifier.
Then
Ensure the method is valid and returns the classifier name and description.
"""
name, description = get_new_entity_record(os.path.join(TEST_DATA_PATH,
'FakePack5', 'Classifiers', 'fake_classifier.json'))
assert name == 'Fake classifier'
assert description == 'Maps incoming Prisma Cloud event fields.'
def test_construct_entities_block_integration(self):
"""
Given
integration entities_data.
When
generates pack release note block for integration.
Then
Ensure the method is valid and the release note block contains Tanium integration.
"""
entities_data = {'Integrations': {'Tanium': 'Tanium endpoint security and systems management'}}
rn = construct_entities_block(entities_data)
assert '### Integrations' in rn
assert '##### Tanium' in rn
assert 'Tanium endpoint security and systems management' in rn
def test_construct_entities_block_indicator_types(self):
"""
Given
indicator type entities_data.
When
generates pack release note block for indicator type.
Then
Ensure the method is valid and the release note block contains accountRep indicator.
"""
entities_data = {'IndicatorTypes': {'accountRep': ''}}
rn = construct_entities_block(entities_data)
assert '### Indicator Types' in rn
assert '- **accountRep**' in rn
|
|
#!/usr/bin/env python
"""
Blast
Genome library
Classes to handle Blast analysis against a local database
"""
import logging
import os
import subprocess
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO # Python 2
else:
from io import StringIO # Python 3
__author__ = "Marco Galardini"
################################################################################
# Log setup
logger = logging.getLogger('ductape.blast')
################################################################################
# Classes
# Useful class for parsing
class BlastHit:
def __init__(self,query,align,hsp):
'''
Query, Alignment and Hsp are all Biopython objects derived from
Blast results parsing
'''
self.query = query.query
self.query_id = query.query.split(' ')[0]
self.query_len = int(query.query_length)
self.hit = align.hit_id
self.hit_desc = align.hit_def
self.hit_len = int(align.length)
self.identity = float(hsp.identities) / float(hsp.align_length)
self.align_len = int(hsp.align_length)
self.mismatches = int(hsp.align_length - hsp.identities - hsp.gaps)
self.gaps = int(hsp.gaps)
self.query_start = int(hsp.query_start)
self.query_end = int(hsp.query_end)
self.subjct_start = int(hsp.sbjct_start)
self.subjct_end = int(hsp.sbjct_end)
self.evalue = float(hsp.expect)
self.bits = float(hsp.bits)
def getHomologyIndex(self):
'''
Get an Index useful for stating the quality of the homology measure
'''
import math
HI=( (math.pow(self.identity,2)*(float(self.hit_len)) /
(float(self.query_len))*(float(self.align_len)/float(self.query_len)))
)
return HI
def getHitCoverage(self):
'''
Get the hit coverage
'''
return float(float(self.align_len)/float(self.hit_len))
def getQueryCoverage(self):
'''
Get the query coverage
'''
return float(float(self.align_len)/float(self.query_len))
def getKO(self):
'''
Assuming that this hit derives from a KEGG DB
Returns the KO ID
'''
import re
a=re.search("K[0-9]{1,}",
self.hit_desc)
if a is not None:
return a.group()
else:
return None
class Blaster(object):
def __init__(self, useDisk=False):
self._hits = None
self._out = ''
# No-disk
self._useDisk = bool(useDisk)
self.retrieved = ''
self.query = ''
self.out = ''
def createDB(self,seqFile,dbType,outFile='BlastDB',parseIDs=True,
title='Generic Blast DB'):
'''Generation of a Blast DB'''
cmd = ('makeblastdb -in %s -dbtype %s -out %s -title "%s"')
cmd = cmd%(seqFile,dbType,outFile,title)
if parseIDs:
cmd = cmd+' -parse_seqids'
logger.debug('Create Blast DB cmd: %s'%cmd)
proc = subprocess.Popen(cmd,shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = proc.communicate()
return_code = proc.returncode
if return_code != 0:
logger.warning('Blast DB creation failed with error %d'
%return_code)
logger.warning('%s'%str(out[1]))
return bool(not return_code)
def retrieveFromDB(self, db, accession, out='out.fsa', isFile=False):
'''Retrieve the desired sequence(s) from a Blast DB'''
if not isFile:
cmd=('blastdbcmd -db %s -entry "%s" -long_seqids'
%(db,accession))
else:
cmd=('blastdbcmd -db %s -entry_batch "%s" -long_seqids'
%(db,accession))
if self._useDisk:
cmd += ' > %s'%out
logger.debug('BlastDBcmd cmd: %s'%cmd)
proc = subprocess.Popen(cmd,shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = proc.communicate()
if not self._useDisk:
self.retrieved = out[0]
return_code = proc.returncode
if return_code != 0:
logger.warning('BlastDBcmd failed with error %d'
%return_code)
logger.warning('%s'%str(out[1]))
return bool(not return_code)
def runBlast(self, queryFile, db, outFile='', evalue = 10,
task = '', ncpus = 1, additional = '', outfmt='5'):
'''Run Blast with the desired parameters'''
# Create the command line
from Bio.Blast.Applications import NcbiblastpCommandline
self._out = outFile
cmd = NcbiblastpCommandline(db=db,
evalue=float(evalue),
outfmt=outfmt,
num_threads=ncpus)
if self._useDisk:
cmd.set_parameter('query', queryFile)
if outFile != '':
cmd.set_parameter('out', outFile)
if task != '':
cmd.set_parameter('task', task)
if additional !='':
cmd = str(cmd)+' '+additional
cmd=str(cmd)
logger.debug('Run Blast cmd: %s'%cmd)
# Run Blast and check the return code
proc = subprocess.Popen(cmd,shell=(sys.platform!="win32"),
stdin=subprocess.PIPE,stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if not self._useDisk:
if isinstance(self.query, str):
proc.stdin.write(self.query.encode())
else:
proc.stdin.write(self.query)
out = proc.communicate()
if not self._useDisk:
self.out = out[0]
return_code = proc.returncode
if return_code != 0:
logger.warning('Run Blast failed with error %d'
%return_code)
logger.warning('%s'%str(out[1]))
return bool(not return_code)
def parseBlast(self, fileOut):
'''Parse the xml blast output -- default file is self._out'''
from Bio.Blast import NCBIXML
if self._useDisk:
self._out = fileOut
handle = open(fileOut)
else:
handle = StringIO(self.out.decode('utf-8'))
self._hits = NCBIXML.parse(handle)
def getHits(self,expect=10.0):
'''Returns a Generator query -> BlastObj'''
if self._hits == None:
self.parseBlast(self._out)
for BlastQuery in self._hits:
hits = []
for alignment in BlastQuery.alignments:
for hsp in alignment.hsps:
if float(hsp.expect) > expect:continue
# Save the hit details
h=BlastHit(BlastQuery,alignment,hsp)
hits.append(h)
yield hits
class RunBBH(object):
def __init__(self, query, queryid,
source, target, targetorg,
evalue, matrix, short = False, uniqueid = 1,
kegg = False, ko_entry = None, ko_id = None, useDisk=True):
for x in [query, queryid, source, target, targetorg,
evalue, matrix, short, uniqueid,
kegg, ko_entry, ko_id, useDisk]:
if isinstance(x, bytes):
x = x.decode('utf-8')
self.query = query
self.queryid = queryid
self.source = source
self.target = target
self.targetorg = targetorg
self.evalue = evalue
self.matrix = matrix
self.short = short
self.uniqueid = uniqueid
self.kegg = kegg
self.ko_entry = ko_entry
self.ko_id = ko_id
self.useDisk = bool(useDisk)
self.out = self.queryid + '_' + str(self.uniqueid) +'.xml'
self.blaster = Blaster(useDisk=self.useDisk)
self.additional = (' -soft_masking true -dbsize 500000000 '+
'-use_sw_tback -max_target_seqs 1 -matrix %s'%self.matrix)
if not self.useDisk:
self.blaster.query = self.query
self.queryreturn = ''
else:
self.queryreturn = self.query + '_' + str(self.uniqueid) + '_return'
def _firstRun(self):
if self.short:
res = self.blaster.runBlast(self.query, self.target, self.out,
evalue = self.evalue,
task='blastp-short',
additional=self.additional)
else:
res = self.blaster.runBlast(self.query, self.target, self.out,
evalue = self.evalue,
additional = self.additional)
return res
def _secondRun(self, hit_len = None):
# Second Blast run
if not hit_len:
if self.short:
hit_len = 29
else:
hit_len = 100
if not self.useDisk:
self.blaster.query = self.blaster.retrieved
if hit_len < 30:
res = self.blaster.runBlast(self.queryreturn, self.source, self.out,
evalue = self.evalue,
task='blastp-short',
additional=self.additional)
else:
res = self.blaster.runBlast(self.queryreturn, self.source, self.out,
evalue = self.evalue,
additional=self.additional)
return res
def __call__(self):
if not self.kegg:
# First Blast run
res = self._firstRun()
if not res:
if self.useDisk:
try:
os.remove(self.out)
except:pass
return [None, self.targetorg, False]
self.blaster.parseBlast(self.out)
for hits in self.blaster.getHits(self.evalue):
if len(hits) == 0:
break
targethit = hits[0]
if not self.blaster.retrieveFromDB(self.target, targethit.hit,
out=self.queryreturn):
if self.useDisk:
try:
os.remove(self.out)
except:pass
return [None, self.targetorg, False]
# Second Blast run
res = self._secondRun(targethit.hit_len)
break
else:
if not self.blaster.retrieveFromDB(self.target, self.ko_entry,
out=self.queryreturn):
if self.useDisk:
try:
os.remove(self.out)
except:
pass
return [None, self.targetorg, False]
res = self._secondRun()
if not res:
if self.useDisk:
try:
os.remove(self.out)
os.remove(self.queryreturn)
except:pass
return [None, self.targetorg, False]
self.blaster.parseBlast(self.out)
for hits in self.blaster.getHits(self.evalue):
if len(hits) == 0:
return [None, self.targetorg, True]
sourcehit = hits[0]
if self.queryid == sourcehit.hit:
if self.useDisk:
os.remove(self.out)
os.remove(self.queryreturn)
if self.kegg:
return [self.ko_id,self.queryid, True]
else:
return [sourcehit.query_id.replace('lcl|',''),
self.targetorg, True]
else:
if self.useDisk:
os.remove(self.out)
os.remove(self.queryreturn)
return [None, self.targetorg, True]
if self.useDisk:
os.remove(self.out)
return [None, self.targetorg, True]
|
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import fnmatch
import json
import logging
import os
import re
import subprocess
import sys
from presubmit_canned_checks import _ReportErrorFileAndLine
class MockCannedChecks(object):
def _FindNewViolationsOfRule(self, callable_rule, input_api,
source_file_filter=None,
error_formatter=_ReportErrorFileAndLine):
"""Find all newly introduced violations of a per-line rule (a callable).
Arguments:
callable_rule: a callable taking a file extension and line of input and
returning True if the rule is satisfied and False if there was a
problem.
input_api: object to enumerate the affected files.
source_file_filter: a filter to be passed to the input api.
error_formatter: a callable taking (filename, line_number, line) and
returning a formatted error string.
Returns:
A list of the newly-introduced violations reported by the rule.
"""
errors = []
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=source_file_filter):
# For speed, we do two passes, checking first the full file. Shelling out
# to the SCM to determine the changed region can be quite expensive on
# Win32. Assuming that most files will be kept problem-free, we can
# skip the SCM operations most of the time.
extension = str(f.LocalPath()).rsplit('.', 1)[-1]
if all(callable_rule(extension, line) for line in f.NewContents()):
continue # No violation found in full text: can skip considering diff.
for line_num, line in f.ChangedContents():
if not callable_rule(extension, line):
errors.append(error_formatter(f.LocalPath(), line_num, line))
return errors
class MockInputApi(object):
"""Mock class for the InputApi class.
This class can be used for unittests for presubmit by initializing the files
attribute as the list of changed files.
"""
DEFAULT_FILES_TO_SKIP = ()
def __init__(self):
self.canned_checks = MockCannedChecks()
self.fnmatch = fnmatch
self.json = json
self.re = re
self.os_path = os.path
self.platform = sys.platform
self.python_executable = sys.executable
self.platform = sys.platform
self.subprocess = subprocess
self.sys = sys
self.files = []
self.is_committing = False
self.change = MockChange([])
self.presubmit_local_path = os.path.dirname(__file__)
self.logging = logging.getLogger('PRESUBMIT')
def CreateMockFileInPath(self, f_list):
self.os_path.exists = lambda x: x in f_list
def AffectedFiles(self, file_filter=None, include_deletes=False):
for file in self.files: # pylint: disable=redefined-builtin
if file_filter and not file_filter(file):
continue
if not include_deletes and file.Action() == 'D':
continue
yield file
def AffectedSourceFiles(self, file_filter=None):
return self.AffectedFiles(file_filter=file_filter)
def FilterSourceFile(self, file, # pylint: disable=redefined-builtin
files_to_check=(), files_to_skip=()):
local_path = file.LocalPath()
found_in_files_to_check = not files_to_check
if files_to_check:
if isinstance(files_to_check, str):
raise TypeError('files_to_check should be an iterable of strings')
for pattern in files_to_check:
compiled_pattern = re.compile(pattern)
if compiled_pattern.search(local_path):
found_in_files_to_check = True
break
if files_to_skip:
if isinstance(files_to_skip, str):
raise TypeError('files_to_skip should be an iterable of strings')
for pattern in files_to_skip:
compiled_pattern = re.compile(pattern)
if compiled_pattern.search(local_path):
return False
return found_in_files_to_check
def LocalPaths(self):
return [file.LocalPath() for file in self.files] # pylint: disable=redefined-builtin
def PresubmitLocalPath(self):
return self.presubmit_local_path
def ReadFile(self, filename, mode='rU'):
if hasattr(filename, 'AbsoluteLocalPath'):
filename = filename.AbsoluteLocalPath()
for file_ in self.files:
if file_.LocalPath() == filename:
return '\n'.join(file_.NewContents())
# Otherwise, file is not in our mock API.
raise IOError("No such file or directory: '%s'" % filename)
class MockOutputApi(object):
"""Mock class for the OutputApi class.
An instance of this class can be passed to presubmit unittests for outputing
various types of results.
"""
class PresubmitResult(object):
def __init__(self, message, items=None, long_text=''):
self.message = message
self.items = items
self.long_text = long_text
def __repr__(self):
return self.message
class PresubmitError(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'error'
class PresubmitPromptWarning(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'warning'
class PresubmitNotifyResult(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'notify'
class PresubmitPromptOrNotify(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'promptOrNotify'
def __init__(self):
self.more_cc = []
def AppendCC(self, more_cc):
self.more_cc.extend(more_cc)
class MockFile(object):
"""Mock class for the File class.
This class can be used to form the mock list of changed files in
MockInputApi for presubmit unittests.
"""
def __init__(self, local_path, new_contents, old_contents=None, action='A',
scm_diff=None):
self._local_path = local_path
self._new_contents = new_contents
self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)]
self._action = action
if scm_diff:
self._scm_diff = scm_diff
else:
self._scm_diff = (
"--- /dev/null\n+++ %s\n@@ -0,0 +1,%d @@\n" %
(local_path, len(new_contents)))
for l in new_contents:
self._scm_diff += "+%s\n" % l
self._old_contents = old_contents
def Action(self):
return self._action
def ChangedContents(self):
return self._changed_contents
def NewContents(self):
return self._new_contents
def LocalPath(self):
return self._local_path
def AbsoluteLocalPath(self):
return self._local_path
def GenerateScmDiff(self):
return self._scm_diff
def OldContents(self):
return self._old_contents
def rfind(self, p):
"""os.path.basename is called on MockFile so we need an rfind method."""
return self._local_path.rfind(p)
def __getitem__(self, i):
"""os.path.basename is called on MockFile so we need a get method."""
return self._local_path[i]
def __len__(self):
"""os.path.basename is called on MockFile so we need a len method."""
return len(self._local_path)
def replace(self, altsep, sep):
"""os.path.basename is called on MockFile so we need a replace method."""
return self._local_path.replace(altsep, sep)
class MockAffectedFile(MockFile):
def AbsoluteLocalPath(self):
return self._local_path
class MockChange(object):
"""Mock class for Change class.
This class can be used in presubmit unittests to mock the query of the
current change.
"""
def __init__(self, changed_files, description=''):
self._changed_files = changed_files
self.footers = defaultdict(list)
self._description = description
def LocalPaths(self):
return self._changed_files
def AffectedFiles(self, include_dirs=False, include_deletes=True,
file_filter=None):
return self._changed_files
def GitFootersFromDescription(self):
return self.footers
def DescriptionText(self):
return self._description
|
|
"""Utility file for utterance generator to work with CX resources."""
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import string
import pandas as pd
from typing import List, Dict
from google.oauth2 import service_account
from dfcx_scrapi.core import scrapi_base
from dfcx_scrapi.core import intents
from dfcx_scrapi.core_ml import utterance_generator
# logging config
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
class UtteranceGeneratorUtils(scrapi_base.ScrapiBase):
"""Wrapper for utterance generator that creates new training phrases.
Can be used to create independent test sets and net-new training phrases
for intents.
"""
def __init__(
self,
creds_path: str = None,
creds_dict: Dict[str,str] = None,
creds: service_account.Credentials = None,
scope=False,
):
super().__init__(
creds_path=creds_path,
creds_dict=creds_dict,
creds=creds,
scope=scope,
)
logging.info("setting up utils....")
self.intents = intents.Intents(creds_path, creds_dict)
logging.info("downloading model....")
self.utterance_generator = utterance_generator.UtteranceGenerator()
logging.info("utterance generator utils setup")
@staticmethod
def _progress_bar(
current: int,
total: int,
bar_length: int = 50,
type_: str = "Progress"
):
"""Display progress bar for processing.
Args:
current: number for current iteration.
total: number for total iterations.
bar_length: number of spaces to make the progress bar,
default 50.
type_: label for the bar, default 'Progress'.
"""
percent = float(current) * 100 / total
arrow = "-" * int(percent / 100 * bar_length - 1) + ">"
spaces = " " * (bar_length - len(arrow))
print(f"{type_}({current}/{total})" + f"[{arrow}{spaces}] {percent}%",
end="\r")
@staticmethod
def _clean_string(string_raw: str) -> str:
"""Cleans a string for comparison.
Cleans a string with the same steps for comparison whether the generated
text exists or not, removes phrases which only differ by:
-case,
-punctuation, or
-leading and trailing spaces.
Args:
string_raw: phrase to clean
Returns:
cleaned string
"""
return string_raw.translate(str.maketrans("", "", string.punctuation)
).lower().strip()
def _remove_training(
self,
synthetic_intent_dataset: pd.DataFrame,
existing_phrases: List[str]
) -> pd.DataFrame:
"""Removes generated phrases that already exist as intent TPs.
Internal function for removing generated phrases which already
exist within intents as training phrases. This is done after applying
clean_string to both.
Args:
synthetic_intent_dataset: dataframe containing generated training
phrases.
existing_phrases: list of phrases that already exist as intent
training phrases.
Returns:
a dataframe of new only generated phrases.
"""
existing_phrases_cleaned = [
self._clean_string(phrase) for phrase in existing_phrases
]
synthetic_intent_dataset.insert(
0,
"cleaned_synthetic_phrase",
synthetic_intent_dataset["synthetic_phrases"]
.apply(self._clean_string),
)
synthetic_intent_dataset = synthetic_intent_dataset.drop_duplicates(
subset=["training_phrase", "cleaned_synthetic_phrase"]
)
synthetic_intent_dataset.insert(
0,
"synthetic_in_training",
synthetic_intent_dataset.apply(
lambda x:
x["cleaned_synthetic_phrase"] in existing_phrases_cleaned,
axis=1,
),
)
synthetic_intent_dataset = (
synthetic_intent_dataset[
~(synthetic_intent_dataset["synthetic_in_training"])]
.drop(columns=["cleaned_synthetic_phrase", "synthetic_in_training"])
.reset_index(drop=True)
)
return synthetic_intent_dataset
def _generate_phrases_intent(
self,
training_phrases_one_intent: pd.DataFrame,
synthetic_phrases_per_intent: int,
) -> pd.DataFrame:
"""Generates new synthetic phrases.
main internal function for generating new synthetic phrases from
the existing training phrases within an intent. The synthetic phrases
are only as good as the training phrases in the intent.
Args:
training_phrases_one_intent: input phrases from which to generate
new training phrases.
synthetic_phrases_per_intent: number of phrases to generate.
Returns:
a DataFrame containing synthetic training phrases.
"""
synthetic_instances = ( synthetic_phrases_per_intent
// len(training_phrases_one_intent) ) + 1
existing_phrases = list(set(
training_phrases_one_intent["training_phrase"]))
if synthetic_instances == 1:
training_phrases_one_intent = training_phrases_one_intent.sample(
frac=1
).reset_index(drop=True)
training_phrases_one_intent = training_phrases_one_intent.iloc[
:synthetic_phrases_per_intent
]
attempts = 0
while True:
synthetic_intent_dataset = ( self.utterance_generator
.generate_utterances(
training_phrases_one_intent,
synthetic_instances=synthetic_instances
)
)
# Check if exist in existing intents
synthetic_intent_dataset = self._remove_training(
synthetic_intent_dataset, existing_phrases
)
# check if dont have enough examples
if ( len(synthetic_intent_dataset)
>= (synthetic_phrases_per_intent - 1)
):
break
synthetic_intent_dataset["synthetic_instances"] += 1
attempts += 1
if attempts > 3:
break
synthetic_intent_dataset = synthetic_intent_dataset.sample(frac=1).iloc[
:synthetic_phrases_per_intent
]
return synthetic_intent_dataset
def _generate_phrases(
self,
training_phrases: pd.DataFrame,
dataset_size: int
) -> pd.DataFrame:
"""Generates phrases for all user-specified intents.
Internal function for running _generate_phrases_intent for all the
user-specified intents.
Args:
training_phrases: df of training phrases for multiple intents with
an Intent "display_name" column.
dataset_size: number of requested phrases to generate over all
specified intents.
Returns:
a DataFrame of generated training phrases.
"""
synthetic_dataset = pd.DataFrame()
intents_list = list(set(training_phrases["display_name"]))
unique_intents_count = len(intents_list)
synthetic_phrases_per_intent = dataset_size // unique_intents_count + 1
i = 0
for intent in intents_list:
training_phrases_one_intent = training_phrases.copy()[
training_phrases["display_name"] == intent
].reset_index(drop=True)
intent_set = self._generate_phrases_intent(
training_phrases_one_intent, synthetic_phrases_per_intent
)
synthetic_dataset = synthetic_dataset.append(intent_set)
i += 1
self._progress_bar(i, len(intents_list))
return synthetic_dataset
def create_synthetic_dataset(
self,
agent_id: str,
intent_subset: List[str],
dataset_size: int = 100) -> pd.DataFrame:
"""Creates a synthetic test dataset.
Creates a test dataset where none of the utterances in the test
dataset are in the training of the existing phrases.
Args:
agent_id: ID of the DFCX agent.
intent_subset: intents to generate a test dataset for.
dataset_size: number of synthetic phrases to generate, default
100.
Returns:
a DataFrame containing synthetic test dataset utterances
columns:
id: IDs of original utterances.
synthetic instances: number of synthetic phrases per
original phrase.
utterance: original utterances.
synthetic_phrases: generated phrases.
intent: intent the utterance is from.
"""
training_phrases = self.intents.bulk_intent_to_df(
agent_id=agent_id, intent_subset=intent_subset
)
training_phrases = training_phrases.copy().rename(
columns={"tp": "utterance"})
test_dataset = self._generate_phrases(training_phrases, dataset_size)
test_dataset = test_dataset[:dataset_size]
return test_dataset.reset_index(drop=True)
def create_test_dataset(
self,
agent_id: str,
intent_subset: List[str],
dataset_size: int = 100) -> pd.DataFrame:
"""Creates a test dataset for a given list of intents.
The phrases in this dataset will not be exact string match phrases that
exist in the training phrases but will be close semantically. This set
is automatically labeled by the intent whose training was used to
generate the new phrase. This can be used to run through the
core.conversations run_intent_detection function. You may need to
specify a flow_display_name and page_display_name in the dataframe to
run the set at the correct location.
Args:
agent_id: name parameter of the agent to pull intents from
full path to agent
intent_subset: display names of the intents to create a test
for, base phrases come from the training in the intent.
dataset_size: overall target size of the test set to create, may
be less depending if new independent phrases can be generated
from the data. The function tries to get even entries per
intent.
Returns:
Dataframe with columns:
utterance: synthesized phrases.
display_name: Display name of the intent the utterance was
generated from; also true label.
"""
synthetic_dataset = self.create_synthetic_dataset(
agent_id, intent_subset,
dataset_size)
test_dataset = (
synthetic_dataset.copy()[["synthetic_phrases", "display_name"]]
.rename(columns={"synthetic_phrases": "utterance"})
.reset_index(drop=True)
)
return test_dataset
def create_new_training_phrases(
self,
agent_id: str,
intent_subset: List[str],
new_phrases: int = 100
) -> pd.DataFrame:
"""Creates new training phrases for a given list of intents.
Generates phrases that are semantically similar to the input training
phrases and returns them in a dataframe. The resulting dataframe can
be used with the core.intents.modify_training_phrase_df method to
create the appropriately formatted training phrase dataframe that will
be ready to update to a CX agent. Using this newly formatted dataframe
(and optionally a Parameters dataframe), the
tools.dataframe_functions.bulk_update_intents_from_dataframe method can
be used to make the final updates to the CX agent.
Args:
agent_id: name parameter of the agent to pull intents from - full
path to agent.
intent_subset: display names of the intents to create a new phrases
for, base phrases come from the training in the intent.
new_phrases: overall target size of new phrases to create, may be
less depending if new independent phrases can be generated from
the data. The function tries to get even entries per intent.
Returns:
Dataframe with columns:
display_name: intent to add the phrase to.
phrase: new phrase.
action: "add".
"""
synthetic_dataset = self.create_synthetic_dataset(
agent_id, intent_subset, new_phrases
)
new_training = (
synthetic_dataset.copy()[["display_name", "synthetic_phrases"]]
.rename(columns={"synthetic_phrases": "phrase"})
.reset_index(drop=True)
)
new_training.insert(len(new_training.columns), "action", "add")
return new_training
|
|
"""Internal utilties; not for external use
"""
import contextlib
import datetime
import functools
import itertools
import re
import warnings
from collections import Mapping, MutableMapping
import numpy as np
import pandas as pd
from . import ops
from .pycompat import iteritems, OrderedDict
def alias_warning(old_name, new_name, stacklevel=3): # pragma: no cover
warnings.warn('%s has been deprecated and renamed to %s'
% (old_name, new_name),
FutureWarning, stacklevel=stacklevel)
def function_alias(obj, old_name): # pragma: no cover
@functools.wraps(obj)
def wrapper(*args, **kwargs):
alias_warning(old_name, obj.__name__)
return obj(*args, **kwargs)
return wrapper
def class_alias(obj, old_name): # pragma: no cover
class Wrapper(obj):
def __new__(cls, *args, **kwargs):
alias_warning(old_name, obj.__name__)
return super(Wrapper, cls).__new__(cls, *args, **kwargs)
Wrapper.__name__ = obj.__name__
return Wrapper
def safe_cast_to_index(array):
"""Given an array, safely cast it to a pandas.Index.
If it is already a pandas.Index, return it unchanged.
Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,
this function will not attempt to do automatic type conversion but will
always return an index with dtype=object.
"""
if isinstance(array, pd.Index):
index = array
elif hasattr(array, 'to_index'):
index = array.to_index()
else:
kwargs = {}
if hasattr(array, 'dtype') and array.dtype.kind == 'O':
kwargs['dtype'] = object
index = pd.Index(np.asarray(array), **kwargs)
return index
def maybe_wrap_array(original, new_array):
"""Wrap a transformed array with __array_wrap__ is it can be done safely.
This lets us treat arbitrary functions that take and return ndarray objects
like ufuncs, as long as they return an array with the same shape.
"""
# in case func lost array's metadata
if isinstance(new_array, np.ndarray) and new_array.shape == original.shape:
return original.__array_wrap__(new_array)
else:
return new_array
def equivalent(first, second):
"""Compare two objects for equivalence (identity or equality), using
array_equiv if either object is an ndarray
"""
if isinstance(first, np.ndarray) or isinstance(second, np.ndarray):
return ops.array_equiv(first, second)
else:
return first is second or first == second
def peek_at(iterable):
"""Returns the first value from iterable, as well as a new iterable with
the same content as the original iterable
"""
gen = iter(iterable)
peek = next(gen)
return peek, itertools.chain([peek], gen)
def update_safety_check(first_dict, second_dict, compat=equivalent):
"""Check the safety of updating one dictionary with another.
Raises ValueError if dictionaries have non-compatible values for any key,
where compatibility is determined by identity (they are the same item) or
the `compat` function.
Parameters
----------
first_dict, second_dict : dict-like
All items in the second dictionary are checked against for conflicts
against items in the first dictionary.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k, v in iteritems(second_dict):
if k in first_dict and not compat(v, first_dict[k]):
raise ValueError('unsafe to merge dictionaries without '
'overriding values; conflicting key %r' % k)
def remove_incompatible_items(first_dict, second_dict, compat=equivalent):
"""Remove incompatible items from the first dictionary in-place.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k in list(first_dict):
if (k not in second_dict or
(k in second_dict and
not compat(first_dict[k], second_dict[k]))):
del first_dict[k]
def is_dict_like(value):
return hasattr(value, '__getitem__') and hasattr(value, 'keys')
def is_full_slice(value):
return isinstance(value, slice) and value == slice(None)
def combine_pos_and_kw_args(pos_kwargs, kw_kwargs, func_name):
if pos_kwargs is not None:
if not is_dict_like(pos_kwargs):
raise ValueError('the first argument to .%s must be a dictionary'
% func_name)
if kw_kwargs:
raise ValueError('cannot specify both keyword and positional '
'arguments to .%s' % func_name)
return pos_kwargs
else:
return kw_kwargs
_SCALAR_TYPES = (datetime.datetime, datetime.date, datetime.timedelta)
def is_scalar(value):
"""np.isscalar only works on primitive numeric types and (bizarrely)
excludes 0-d ndarrays; this version does more comprehensive checks
"""
if hasattr(value, 'ndim'):
return value.ndim == 0
return (np.isscalar(value) or
isinstance(value, _SCALAR_TYPES) or
value is None)
def is_valid_numpy_dtype(dtype):
try:
np.dtype(dtype)
except (TypeError, ValueError):
return False
else:
return True
def tuple_to_0darray(value):
result = np.empty((1,), dtype=object)
result[:] = [value]
result.shape = ()
return result
def dict_equiv(first, second, compat=equivalent):
"""Test equivalence of two dict-like objects. If any of the values are
numpy arrays, compare them correctly.
Parameters
----------
first, second : dict-like
Dictionaries to compare for equality
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
equals : bool
True if the dictionaries are equal
"""
for k in first:
if k not in second or not compat(first[k], second[k]):
return False
for k in second:
if k not in first:
return False
return True
def ordered_dict_intersection(first_dict, second_dict, compat=equivalent):
"""Return the intersection of two dictionaries as a new OrderedDict.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
intersection : OrderedDict
Intersection of the contents.
"""
new_dict = OrderedDict(first_dict)
remove_incompatible_items(new_dict, second_dict, compat)
return new_dict
class SingleSlotPickleMixin(object):
"""Mixin class to add the ability to pickle objects whose state is defined
by a single __slots__ attribute. Only necessary under Python 2.
"""
def __getstate__(self):
return getattr(self, self.__slots__[0])
def __setstate__(self, state):
setattr(self, self.__slots__[0], state)
class Frozen(Mapping, SingleSlotPickleMixin):
"""Wrapper around an object implementing the mapping interface to make it
immutable. If you really want to modify the mapping, the mutable version is
saved under the `mapping` attribute.
"""
__slots__ = ['mapping']
def __init__(self, mapping):
self.mapping = mapping
def __getitem__(self, key):
return self.mapping[key]
def __iter__(self):
return iter(self.mapping)
def __len__(self):
return len(self.mapping)
def __contains__(self, key):
return key in self.mapping
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.mapping)
def FrozenOrderedDict(*args, **kwargs):
return Frozen(OrderedDict(*args, **kwargs))
class SortedKeysDict(MutableMapping, SingleSlotPickleMixin):
"""An wrapper for dictionary-like objects that always iterates over its
items in sorted order by key but is otherwise equivalent to the underlying
mapping.
"""
__slots__ = ['mapping']
def __init__(self, mapping=None):
self.mapping = {} if mapping is None else mapping
def __getitem__(self, key):
return self.mapping[key]
def __setitem__(self, key, value):
self.mapping[key] = value
def __delitem__(self, key):
del self.mapping[key]
def __iter__(self):
return iter(sorted(self.mapping))
def __len__(self):
return len(self.mapping)
def __contains__(self, key):
return key in self.mapping
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.mapping)
def copy(self):
return type(self)(self.mapping.copy())
class ChainMap(MutableMapping, SingleSlotPickleMixin):
"""Partial backport of collections.ChainMap from Python>=3.3
Don't return this from any public APIs, since some of the public methods
for a MutableMapping are missing (they will raise a NotImplementedError)
"""
__slots__ = ['maps']
def __init__(self, *maps):
self.maps = maps
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, value): # pragma: no cover
raise NotImplementedError
def __iter__(self):
seen = set()
for mapping in self.maps:
for item in mapping:
if item not in seen:
yield item
seen.add(item)
def __len__(self):
raise len(iter(self))
class NdimSizeLenMixin(object):
"""Mixin class that extends a class that defines a ``shape`` property to
one that also defines ``ndim``, ``size`` and ``__len__``.
"""
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
# cast to int so that shape = () gives size = 1
return int(np.prod(self.shape))
def __len__(self):
try:
return self.shape[0]
except IndexError:
raise TypeError('len() of unsized object')
class NDArrayMixin(NdimSizeLenMixin):
"""Mixin class for making wrappers of N-dimensional arrays that conform to
the ndarray interface required for the data argument to Variable objects.
A subclass should set the `array` property and override one or more of
`dtype`, `shape` and `__getitem__`.
"""
@property
def dtype(self):
return self.array.dtype
@property
def shape(self):
return self.array.shape
def __array__(self, dtype=None):
return np.asarray(self[...], dtype=dtype)
def __getitem__(self, key):
return self.array[key]
def __repr__(self):
return '%s(array=%r)' % (type(self).__name__, self.array)
@contextlib.contextmanager
def close_on_error(f):
"""Context manager to ensure that a file opened by xarray is closed if an
exception is raised before the user sees the file object.
"""
try:
yield
except Exception:
f.close()
raise
def is_remote_uri(path):
return bool(re.search('^https?\://', path))
def is_uniform_spaced(arr, **kwargs):
"""Return True if values of an array are uniformly spaced and sorted.
>>> is_uniform_spaced(range(5))
True
>>> is_uniform_spaced([-4, 0, 100])
False
kwargs are additional arguments to ``np.isclose``
"""
arr = np.array(arr, dtype=float)
diffs = np.diff(arr)
return np.isclose(diffs.min(), diffs.max(), **kwargs)
def hashable(v):
"""Determine whether `v` can be hashed."""
try:
hash(v)
except TypeError:
return False
return True
|
|
import unittest
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestMax(unittest.TestCase):
def test_max_int32_empty(self):
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_completed(250)]
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max())
res = scheduler.start(create=create).messages
self.assertEqual(1, len(res))
assert res[0].value.kind == "E" and res[0].value.exception is not None
assert res[0].time == 250
def test_max_int32_return(self):
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_next(210, 2), on_completed(250)]
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max())
res = scheduler.start(create=create).messages
assert res == [on_next(250, 2), on_completed(250)]
def test_max_int32_some(self):
scheduler = TestScheduler()
msgs = [
on_next(150, 1),
on_next(210, 3),
on_next(220, 4),
on_next(230, 2),
on_completed(250),
]
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max())
res = scheduler.start(create=create).messages
assert res == [on_next(250, 4), on_completed(250)]
def test_max_int32_on_error(self):
ex = "ex"
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_error(210, ex)]
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max())
res = scheduler.start(create=create).messages
assert res == [on_error(210, ex)]
def test_max_int32_never(self):
scheduler = TestScheduler()
msgs = [on_next(150, 1)]
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max())
res = scheduler.start(create=create).messages
assert res == []
def test_max_of_t_comparer_empty(self):
scheduler = TestScheduler()
msgs = [on_next(150, 1), on_completed(250)]
def reverse_comparer(a, b):
if a > b:
return -1
if a < b:
return 1
return 0
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max(reverse_comparer))
res = scheduler.start(create=create).messages
self.assertEqual(1, len(res))
assert res[0].value.kind == "E" and res[0].value.exception is not None
assert res[0].time == 250
def test_max_of_t_comparer_return(self):
scheduler = TestScheduler()
msgs = [on_next(150, "z"), on_next(210, "a"), on_completed(250)]
def reverse_comparer(a, b):
if a > b:
return -1
if a < b:
return 1
return 0
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max(reverse_comparer))
res = scheduler.start(create=create).messages
assert res == [on_next(250, "a"), on_completed(250)]
def test_max_of_t_comparer_some(self):
scheduler = TestScheduler()
msgs = [
on_next(150, "z"),
on_next(210, "b"),
on_next(220, "c"),
on_next(230, "a"),
on_completed(250),
]
def reverse_comparer(a, b):
if a > b:
return -1
if a < b:
return 1
return 0
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max(reverse_comparer))
res = scheduler.start(create=create).messages
assert res == [on_next(250, "a"), on_completed(250)]
def test_max_of_t_comparer_on_error(self):
ex = "ex"
scheduler = TestScheduler()
msgs = [on_next(150, "z"), on_error(210, ex)]
def reverse_comparer(a, b):
if a > b:
return -1
if a < b:
return 1
return 0
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max(reverse_comparer))
res = scheduler.start(create=create).messages
assert res == [on_error(210, ex)]
def test_max_of_t_comparer_never(self):
scheduler = TestScheduler()
msgs = [on_next(150, "z")]
def reverse_comparer(a, b):
if a > b:
return -1
if a < b:
return 1
return 0
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max(reverse_comparer))
res = scheduler.start(create=create).messages
assert res == []
def test_max_of_t_comparer_throws(self):
ex = "ex"
scheduler = TestScheduler()
msgs = [
on_next(150, "z"),
on_next(210, "b"),
on_next(220, "c"),
on_next(230, "a"),
on_completed(250),
]
def reverse_comparer(a, b):
raise Exception(ex)
xs = scheduler.create_hot_observable(msgs)
def create():
return xs.pipe(ops.max(reverse_comparer))
res = scheduler.start(create=create).messages
assert res == [on_error(220, ex)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.