max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
misago/misago/markup/pipeline.py
|
vascoalramos/misago-deployment
| 2
|
6625951
|
from importlib import import_module
from bs4 import BeautifulSoup
from .. import hooks
from ..conf import settings
class MarkupPipeline:
"""small framework for extending parser"""
def extend_markdown(self, md):
for extension in settings.MISAGO_MARKUP_EXTENSIONS:
module = import_module(extension)
if hasattr(module, "extend_markdown"):
hook = getattr(module, "extend_markdown")
hook.extend_markdown(md)
for extension in hooks.markdown_extensions:
extension(md)
return md
def process_result(self, result):
soup = BeautifulSoup(result["parsed_text"], "html5lib")
for extension in settings.MISAGO_MARKUP_EXTENSIONS:
module = import_module(extension)
if hasattr(module, "clean_parsed"):
hook = getattr(module, "clean_parsed")
hook.process_result(result, soup)
for extension in hooks.parsing_result_processors:
extension(result, soup)
souped_text = str(soup.body).strip()[6:-7]
result["parsed_text"] = souped_text.strip()
return result
pipeline = MarkupPipeline()
|
from importlib import import_module
from bs4 import BeautifulSoup
from .. import hooks
from ..conf import settings
class MarkupPipeline:
"""small framework for extending parser"""
def extend_markdown(self, md):
for extension in settings.MISAGO_MARKUP_EXTENSIONS:
module = import_module(extension)
if hasattr(module, "extend_markdown"):
hook = getattr(module, "extend_markdown")
hook.extend_markdown(md)
for extension in hooks.markdown_extensions:
extension(md)
return md
def process_result(self, result):
soup = BeautifulSoup(result["parsed_text"], "html5lib")
for extension in settings.MISAGO_MARKUP_EXTENSIONS:
module = import_module(extension)
if hasattr(module, "clean_parsed"):
hook = getattr(module, "clean_parsed")
hook.process_result(result, soup)
for extension in hooks.parsing_result_processors:
extension(result, soup)
souped_text = str(soup.body).strip()[6:-7]
result["parsed_text"] = souped_text.strip()
return result
pipeline = MarkupPipeline()
|
en
| 0.617929
|
small framework for extending parser
| 2.58963
| 3
|
ebnmpy/point_laplace.py
|
kclamar/ebnmpy
| 0
|
6625952
|
import numpy as np
from numpy import exp, inf, log, mean, sqrt
from scipy.stats import bernoulli
from .ashr import my_e2truncnorm, my_etruncnorm
from .output import result_in_output
from .r_utils import length, numeric, pmax, pmin, rep, stop, unlist
from .r_utils.stats import dnorm, pnorm, rtruncnorm
from .workhorse_parametric import check_g_init
def laplacemix(pi, mean, scale):
return dict(pi=pi, mean=mean, scale=scale)
def pl_checkg(g_init, fix_g, mode, scale, pointmass):
return check_g_init(
g_init=g_init,
fix_g=fix_g,
mode=mode,
scale=scale,
pointmass=pointmass,
class_name="laplacemix",
scale_name="scale",
)
def pl_initpar(g_init, mode, scale, pointmass, x, s):
if g_init is not None and length(g_init["pi"]) == 1:
par = dict(alpha=inf, beta=-log(g_init["scale"]), mu=g_init["mean"])
elif g_init is not None and length(g_init["pi"]) == 2:
par = dict(
alpha=log(1 / g_init["pi"][0] - 1) if g_init["pi"][0] != 0 else inf,
beta=-log(g_init["scale"][1]),
mu=g_init["mean"][0],
)
else:
par = dict()
if not pointmass:
par["alpha"] = inf
else:
par["alpha"] = 0
if scale != "estimate":
if length(scale) != 1:
stop("Argument 'scale' must be either 'estimate' or a scalar.")
par["beta"] = -log(scale)
else:
par["beta"] = -0.5 * log(mean(x ** 2) / 2)
if mode != "estimate":
par["mu"] = mode
else:
par["mu"] = mean(x)
return par
def pl_scalepar(par, scale_factor):
if par["beta"] is not None:
par["beta"] = par["beta"] - log(scale_factor)
if par["mu"] is not None:
par["mu"] = scale_factor * par["mu"]
return par
def pl_precomp(x, s, par_init, fix_par):
fix_mu = fix_par[2]
if not fix_mu and np.any(s == 0):
stop("The mode cannot be estimated if any SE is zero (the gradient does not exist).")
return dict()
def pl_nllik(par, x, s, par_init, fix_par, calc_grad, calc_hess):
fix_pi0, fix_a, fix_mu = fix_par
p = unlist(par_init)
p[~np.array(fix_par)] = par
w = 1 - 1 / (1 + exp(p[0]))
a = exp(p[1])
mu = p[2]
lf = -0.5 * log(2 * np.pi * s ** 2) - 0.5 * (x - mu) ** 2 / s ** 2
xleft = (x - mu) / s + s * a
lpnormleft = pnorm(xleft, log_p=True, lower_tail=False)
lgleft = log(a / 2) + s ** 2 * a ** 2 / 2 + a * (x - mu) + lpnormleft
xright = (x - mu) / s - s * a
lpnormright = pnorm(xright, log_p=True)
lgright = log(a / 2) + s ** 2 * a ** 2 / 2 - a * (x - mu) + lpnormright
lg = logscale_add(lgleft, lgright)
llik = logscale_add(log(1 - w) + lf, log(w) + lg)
nllik = -np.nansum(llik)
if calc_grad or calc_hess:
grad = numeric(len(par))
i = 0
if not fix_pi0:
f = exp(lf - llik)
g = exp(lg - llik)
dnllik_dw = f - g
dw_dalpha = w * (1 - w)
dnllik_dalpha = dnllik_dw * dw_dalpha
grad[i] = np.nansum(dnllik_dalpha)
i += 1
if not fix_a or not fix_mu:
dlogpnorm_left = -exp(-log(2 * np.pi) / 2 - xleft ** 2 / 2 - lpnormleft)
dlogpnorm_right = exp(-log(2 * np.pi) / 2 - xright ** 2 / 2 - lpnormright)
if not fix_a:
dgleft_da = exp(lgleft - llik) * (1 / a + a * s ** 2 + (x - mu) + s * dlogpnorm_left)
dgright_da = exp(lgright - llik) * (1 / a + a * s ** 2 - (x - mu) - s * dlogpnorm_right)
dg_da = dgleft_da + dgright_da
dnllik_da = -w * dg_da
da_dbeta = a
dnllik_dbeta = dnllik_da * da_dbeta
grad[i] = np.nansum(dnllik_dbeta)
i += 1
if not fix_mu:
df_dmu = exp(lf - llik) * ((x - mu) / s ** 2)
dgleft_dmu = exp(lgleft - llik) * (-a - dlogpnorm_left / s)
dgright_dmu = exp(lgright - llik) * (a - dlogpnorm_right / s)
dg_dmu = dgleft_dmu + dgright_dmu
dnllik_dmu = -(1 - w) * df_dmu - w * dg_dmu
grad[i] = np.nansum(dnllik_dmu)
return grad
if calc_hess:
# TODO
raise NotImplementedError
return nllik
def logscale_add(log_x, log_y):
C = pmax(log_x, log_y)
return log(exp(log_x - C) + exp(log_y - C)) + C
def pl_postcomp(optpar, optval, x, s, par_init, fix_par, scale_factor):
llik = -optval
retlist = dict(par=optpar, val=llik)
fix_pi0 = fix_par[0]
fix_mu = fix_par[2]
if not fix_pi0 and fix_mu:
pi0_llik = sum(-0.5 * log(2 * np.pi * s ** 2) - 0.5 * (x - par_init["mu"]) ** 2 / s ** 2)
pi0_llik += sum(np.isfinite(x)) * log(scale_factor)
if pi0_llik > llik:
retlist["par"]["alpha"] = -inf
retlist["par"]["beta"] = 0
retlist["val"] = pi0_llik
return retlist
def pl_summres(x, s, optpar, output):
w = 1 - 1 / (exp(optpar["alpha"]) + 1)
a = exp(optpar["beta"])
mu = optpar["mu"]
return pl_summres_untransformed(x, s, w, a, mu, output)
def pl_summres_untransformed(x, s, w, a, mu, output):
x = x - mu
wpost = wpost_laplace(x, s, w, a)
lm = lambda_(x, s, a)
post = dict()
if result_in_output(output):
post["mean"] = wpost * (
lm * my_etruncnorm(0, inf, x - s ** 2 * a, s)
+ (1 - lm) * my_etruncnorm(-inf, 0, x + s ** 2 * a, s)
)
post["mean2"] = wpost * (
lm * my_e2truncnorm(0, inf, x - s ** 2 * a, s)
+ (1 - lm) * my_e2truncnorm(-inf, 0, x + s ** 2 * a, s)
)
if np.any(np.isinf(s)):
post["mean"][np.isinf(s)] = 0
post["mean2"][np.isinf(s)] = 2 * w / a ** 2
post["sd"] = sqrt(pmax(0, post["mean2"] - post["mean"] ** 2))
post["mean2"] = post["mean2"] + mu ** 2 + 2 * mu * post["mean"]
post["mean"] = post["mean"] + mu
if "lfsr" in output:
post["lfsr"] = (1 - wpost) + wpost * pmin(lm, 1 - lm)
if np.any(np.isinf(s)):
post["lfsr"][np.isinf(s)] = 1 - w / 2
return post
def wpost_laplace(x, s, w, a):
if w == 0:
return np.zeros(len(x))
if w == 1:
return np.ones(len(x))
lf = dnorm(x, 0, s, log=True)
lg = logg_laplace(x, s, a)
wpost = w / (w + (1 - w) * exp(lf - lg))
return wpost
def logg_laplace(x, s, a):
lg1 = -a * x + pnorm((x - s ** 2 * a) / s, log_p=True)
lg2 = a * x + pnorm((x + s ** 2 * a) / s, log_p=True, lower_tail=False)
lfac = pmax(lg1, lg2)
return log(a / 2) + s ** 2 * a ** 2 / 2 + lfac + log(exp(lg1 - lfac) + exp(lg2 - lfac))
def lambda_(x, s, a):
lm1 = -a * x + pnorm(x / s - s * a, log_p=True)
lm2 = a * x + pnorm(x / s + s * a, log_p=True, lower_tail=False)
lm = 1 / (1 + exp(lm2 - lm1))
return lm
def pl_partog(par):
pi0 = 1 / (exp(par["alpha"]) + 1)
scale = exp(-par["beta"])
mean = par["mu"]
if pi0 == 0:
g = laplacemix(pi=1, mean=mean, scale=scale)
else:
g = laplacemix(pi=(pi0, 1 - pi0), mean=(mean,) * 2, scale=(0, scale))
return g
def pl_postsamp(x, s, optpar, nsamp):
w = 1 - 1 / (exp(optpar["alpha"]) + 1)
a = exp(optpar["beta"])
mu = optpar["mu"]
return pl_postsamp_untransformed(x, s, w, a, mu, nsamp)
def pl_postsamp_untransformed(x, s, w, a, mu, nsamp):
x = x - mu
wpost = wpost_laplace(x, s, w, a)
lam = lambda_(x, s, a)
nobs = len(wpost)
is_nonnull = bernoulli.rvs(wpost, size=(nsamp, nobs)) != 0
is_positive = bernoulli.rvs(lam, size=(nsamp, nobs)) != 0
if len(s) == 1:
s = rep(s, nobs)
negative_samp = np.array(
[rtruncnorm(nsamp, -inf, 0, mi, si) for mi, si in zip(x + s ** 2 * a, s)]
).T
positive_samp = np.array(
[rtruncnorm(nsamp, 0, inf, mi, si) for mi, si in zip(x - s ** 2 * a, s)]
).T
samp = np.zeros((nsamp, nobs))
samp[is_nonnull & is_positive] = positive_samp[is_nonnull & is_positive]
samp[is_nonnull & ~is_positive] = negative_samp[is_nonnull & ~is_positive]
samp = samp + mu
return samp
|
import numpy as np
from numpy import exp, inf, log, mean, sqrt
from scipy.stats import bernoulli
from .ashr import my_e2truncnorm, my_etruncnorm
from .output import result_in_output
from .r_utils import length, numeric, pmax, pmin, rep, stop, unlist
from .r_utils.stats import dnorm, pnorm, rtruncnorm
from .workhorse_parametric import check_g_init
def laplacemix(pi, mean, scale):
return dict(pi=pi, mean=mean, scale=scale)
def pl_checkg(g_init, fix_g, mode, scale, pointmass):
return check_g_init(
g_init=g_init,
fix_g=fix_g,
mode=mode,
scale=scale,
pointmass=pointmass,
class_name="laplacemix",
scale_name="scale",
)
def pl_initpar(g_init, mode, scale, pointmass, x, s):
if g_init is not None and length(g_init["pi"]) == 1:
par = dict(alpha=inf, beta=-log(g_init["scale"]), mu=g_init["mean"])
elif g_init is not None and length(g_init["pi"]) == 2:
par = dict(
alpha=log(1 / g_init["pi"][0] - 1) if g_init["pi"][0] != 0 else inf,
beta=-log(g_init["scale"][1]),
mu=g_init["mean"][0],
)
else:
par = dict()
if not pointmass:
par["alpha"] = inf
else:
par["alpha"] = 0
if scale != "estimate":
if length(scale) != 1:
stop("Argument 'scale' must be either 'estimate' or a scalar.")
par["beta"] = -log(scale)
else:
par["beta"] = -0.5 * log(mean(x ** 2) / 2)
if mode != "estimate":
par["mu"] = mode
else:
par["mu"] = mean(x)
return par
def pl_scalepar(par, scale_factor):
if par["beta"] is not None:
par["beta"] = par["beta"] - log(scale_factor)
if par["mu"] is not None:
par["mu"] = scale_factor * par["mu"]
return par
def pl_precomp(x, s, par_init, fix_par):
fix_mu = fix_par[2]
if not fix_mu and np.any(s == 0):
stop("The mode cannot be estimated if any SE is zero (the gradient does not exist).")
return dict()
def pl_nllik(par, x, s, par_init, fix_par, calc_grad, calc_hess):
fix_pi0, fix_a, fix_mu = fix_par
p = unlist(par_init)
p[~np.array(fix_par)] = par
w = 1 - 1 / (1 + exp(p[0]))
a = exp(p[1])
mu = p[2]
lf = -0.5 * log(2 * np.pi * s ** 2) - 0.5 * (x - mu) ** 2 / s ** 2
xleft = (x - mu) / s + s * a
lpnormleft = pnorm(xleft, log_p=True, lower_tail=False)
lgleft = log(a / 2) + s ** 2 * a ** 2 / 2 + a * (x - mu) + lpnormleft
xright = (x - mu) / s - s * a
lpnormright = pnorm(xright, log_p=True)
lgright = log(a / 2) + s ** 2 * a ** 2 / 2 - a * (x - mu) + lpnormright
lg = logscale_add(lgleft, lgright)
llik = logscale_add(log(1 - w) + lf, log(w) + lg)
nllik = -np.nansum(llik)
if calc_grad or calc_hess:
grad = numeric(len(par))
i = 0
if not fix_pi0:
f = exp(lf - llik)
g = exp(lg - llik)
dnllik_dw = f - g
dw_dalpha = w * (1 - w)
dnllik_dalpha = dnllik_dw * dw_dalpha
grad[i] = np.nansum(dnllik_dalpha)
i += 1
if not fix_a or not fix_mu:
dlogpnorm_left = -exp(-log(2 * np.pi) / 2 - xleft ** 2 / 2 - lpnormleft)
dlogpnorm_right = exp(-log(2 * np.pi) / 2 - xright ** 2 / 2 - lpnormright)
if not fix_a:
dgleft_da = exp(lgleft - llik) * (1 / a + a * s ** 2 + (x - mu) + s * dlogpnorm_left)
dgright_da = exp(lgright - llik) * (1 / a + a * s ** 2 - (x - mu) - s * dlogpnorm_right)
dg_da = dgleft_da + dgright_da
dnllik_da = -w * dg_da
da_dbeta = a
dnllik_dbeta = dnllik_da * da_dbeta
grad[i] = np.nansum(dnllik_dbeta)
i += 1
if not fix_mu:
df_dmu = exp(lf - llik) * ((x - mu) / s ** 2)
dgleft_dmu = exp(lgleft - llik) * (-a - dlogpnorm_left / s)
dgright_dmu = exp(lgright - llik) * (a - dlogpnorm_right / s)
dg_dmu = dgleft_dmu + dgright_dmu
dnllik_dmu = -(1 - w) * df_dmu - w * dg_dmu
grad[i] = np.nansum(dnllik_dmu)
return grad
if calc_hess:
# TODO
raise NotImplementedError
return nllik
def logscale_add(log_x, log_y):
C = pmax(log_x, log_y)
return log(exp(log_x - C) + exp(log_y - C)) + C
def pl_postcomp(optpar, optval, x, s, par_init, fix_par, scale_factor):
llik = -optval
retlist = dict(par=optpar, val=llik)
fix_pi0 = fix_par[0]
fix_mu = fix_par[2]
if not fix_pi0 and fix_mu:
pi0_llik = sum(-0.5 * log(2 * np.pi * s ** 2) - 0.5 * (x - par_init["mu"]) ** 2 / s ** 2)
pi0_llik += sum(np.isfinite(x)) * log(scale_factor)
if pi0_llik > llik:
retlist["par"]["alpha"] = -inf
retlist["par"]["beta"] = 0
retlist["val"] = pi0_llik
return retlist
def pl_summres(x, s, optpar, output):
w = 1 - 1 / (exp(optpar["alpha"]) + 1)
a = exp(optpar["beta"])
mu = optpar["mu"]
return pl_summres_untransformed(x, s, w, a, mu, output)
def pl_summres_untransformed(x, s, w, a, mu, output):
x = x - mu
wpost = wpost_laplace(x, s, w, a)
lm = lambda_(x, s, a)
post = dict()
if result_in_output(output):
post["mean"] = wpost * (
lm * my_etruncnorm(0, inf, x - s ** 2 * a, s)
+ (1 - lm) * my_etruncnorm(-inf, 0, x + s ** 2 * a, s)
)
post["mean2"] = wpost * (
lm * my_e2truncnorm(0, inf, x - s ** 2 * a, s)
+ (1 - lm) * my_e2truncnorm(-inf, 0, x + s ** 2 * a, s)
)
if np.any(np.isinf(s)):
post["mean"][np.isinf(s)] = 0
post["mean2"][np.isinf(s)] = 2 * w / a ** 2
post["sd"] = sqrt(pmax(0, post["mean2"] - post["mean"] ** 2))
post["mean2"] = post["mean2"] + mu ** 2 + 2 * mu * post["mean"]
post["mean"] = post["mean"] + mu
if "lfsr" in output:
post["lfsr"] = (1 - wpost) + wpost * pmin(lm, 1 - lm)
if np.any(np.isinf(s)):
post["lfsr"][np.isinf(s)] = 1 - w / 2
return post
def wpost_laplace(x, s, w, a):
if w == 0:
return np.zeros(len(x))
if w == 1:
return np.ones(len(x))
lf = dnorm(x, 0, s, log=True)
lg = logg_laplace(x, s, a)
wpost = w / (w + (1 - w) * exp(lf - lg))
return wpost
def logg_laplace(x, s, a):
lg1 = -a * x + pnorm((x - s ** 2 * a) / s, log_p=True)
lg2 = a * x + pnorm((x + s ** 2 * a) / s, log_p=True, lower_tail=False)
lfac = pmax(lg1, lg2)
return log(a / 2) + s ** 2 * a ** 2 / 2 + lfac + log(exp(lg1 - lfac) + exp(lg2 - lfac))
def lambda_(x, s, a):
lm1 = -a * x + pnorm(x / s - s * a, log_p=True)
lm2 = a * x + pnorm(x / s + s * a, log_p=True, lower_tail=False)
lm = 1 / (1 + exp(lm2 - lm1))
return lm
def pl_partog(par):
pi0 = 1 / (exp(par["alpha"]) + 1)
scale = exp(-par["beta"])
mean = par["mu"]
if pi0 == 0:
g = laplacemix(pi=1, mean=mean, scale=scale)
else:
g = laplacemix(pi=(pi0, 1 - pi0), mean=(mean,) * 2, scale=(0, scale))
return g
def pl_postsamp(x, s, optpar, nsamp):
w = 1 - 1 / (exp(optpar["alpha"]) + 1)
a = exp(optpar["beta"])
mu = optpar["mu"]
return pl_postsamp_untransformed(x, s, w, a, mu, nsamp)
def pl_postsamp_untransformed(x, s, w, a, mu, nsamp):
x = x - mu
wpost = wpost_laplace(x, s, w, a)
lam = lambda_(x, s, a)
nobs = len(wpost)
is_nonnull = bernoulli.rvs(wpost, size=(nsamp, nobs)) != 0
is_positive = bernoulli.rvs(lam, size=(nsamp, nobs)) != 0
if len(s) == 1:
s = rep(s, nobs)
negative_samp = np.array(
[rtruncnorm(nsamp, -inf, 0, mi, si) for mi, si in zip(x + s ** 2 * a, s)]
).T
positive_samp = np.array(
[rtruncnorm(nsamp, 0, inf, mi, si) for mi, si in zip(x - s ** 2 * a, s)]
).T
samp = np.zeros((nsamp, nobs))
samp[is_nonnull & is_positive] = positive_samp[is_nonnull & is_positive]
samp[is_nonnull & ~is_positive] = negative_samp[is_nonnull & ~is_positive]
samp = samp + mu
return samp
|
none
| 1
| 2.16533
| 2
|
|
encoder-decoder-train_1.py
|
kapitsa2811/uTAB
| 0
|
6625953
|
import numpy as np
np.random.seed(1000) # for reproducibility
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D
from keras.layers import Activation
from keras.layers import MaxPooling2D,UpSampling2D
from keras.layers import Dropout,Dense,Flatten,BatchNormalization
from keras.optimizers import *
from keras.models import load_model
from keras import regularizers
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
import os
import cv2
import sys
cwd=os.getcwd()+"//"
oldFiles=os.listdir(cwd+"results//")
for old in oldFiles:
try:
os.remove("/home/kapitsa/PycharmProjects/segmentation//Convolutional-Encoder-Decoder-for-Hand-Segmentation-master/results/"+old)
except Exception as e:
print "\n\t cant delete=",old
pass
'''
this code is modified for new segmentaion
'''
def showImage(name,image):
print "\n\t image=",image.shape
cv2.imshow(name,image)
cv2.waitKey()
'''
angles = range(-2,3)
shifts = [[0,0],[0,1],[1,0],[1,1],[0,2],[2,0],[1,2],[2,1],[2,2],
[0,-1],[-1,0],[-1,-1],[0,-2],[-2,0],[-1,-2],[-2,-1],[-2,-2],
[1,-1],[1,-2],[2,-1],[2,-2],
[-1,1],[-1,2],[-2,1],[-2,2]]
multiplier = len(angles)*len(shifts)
'''
# path_x = cwd+'/newData/X1/' #only hands
# path_y = cwd+'/newData/segment11/' #segmented data
# path_x = cwd+'/newData/image/' #only hands
# path_y = cwd+'/newData/segment/' #segmented data
path_x = cwd+'/newData/imageText1/' #only hands
path_y = cwd+'/newData/segmentText1/' #segmented data
total = 0
dump=os.listdir(path_x)
dumpLen=len(dump)
print("\n\t dumpLen1=",dumpLen)
dump=os.listdir(path_y)
dumpLen=len(dump)
print("\n\t dumpLen2=",dumpLen)
maxImageProcess=dumpLen
#for pos in range(len(path_x)):
noException=0
blackOnWhite=0
X_train=np.zeros((maxImageProcess,128,128,3))
y_train=np.zeros((maxImageProcess,128,128,3))
for indxImg,img in enumerate(sorted(dump)):
print("\n\t img=",img,"\t ",os.path.isfile(path_x+img),"\t ",os.path.isdir(path_x))
continue
if indxImg %100==0:
print "\n\tindxImg=",indxImg,"\t dumpLen=",dumpLen
if indxImg>maxImageProcess:
break
try:
originalIm = cv2.imread(path_x+img)
#print "\n\t indxImg=",indxImg,"\t image shape=",originalIm.shape
segmentedIm = cv2.imread(path_y+img)
print("\n\t isFile=",os.path.isfile(path_y+img))
print "\n\t indxImg=",indxImg,"\t image shape=",segmentedIm.shape
X_train[indxImg] = cv2.resize(originalIm, (128, 128)) #originalIm
y_train[indxImg] = cv2.resize(segmentedIm, (128, 128))
'''
for indxAngle,angle in enumerate(angles):
for indxShift,shift in enumerate(shifts):
M = cv2.getRotationMatrix2D((128/2,128/2),angle,1)
shiftM = np.float32([[1,0,shift[0]],[0,1,shift[1]]])
rotatedIm = cv2.warpAffine(originalIm,M,(128,128))
rotatedSegmentedIm = cv2.warpAffine(segmentedIm,M,(128,128))
rotatedShiftedIm = cv2.warpAffine(rotatedIm,shiftM,(128,128))
rotatedSegmentedShiftedIm = cv2.warpAffine(rotatedSegmentedIm,shiftM,(128,128))
X_train[total]=rotatedShiftedIm
y_train[total]=rotatedSegmentedShiftedIm
cv2.imwrite(cwd+"//newData//"+str(indxImg)+"_"+str(indxAngle)+"_"+str(indxShift)+"_shift.jpg",rotatedShiftedIm)
cv2.imwrite(cwd+"//newData//"+str(indxImg)+"_"+str(indxAngle)+"_"+str(indxShift)+"_segment.jpg",rotatedSegmentedShiftedIm)
total+=1
'''
# showImage("train",originalIm)
# showImage("test",segmentedIm)
except Exception as e:
noException+=1
print "\n\t e=",e
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("\n\t line no", exc_tb.tb_lineno)
#input("check exception")
print "\n\t noException=",noException
tests = os.listdir(cwd+'/newData/test/')#["A-train0101.jpg","A-train0102.jpg","A-train0103.jpg","A-train0104.jpg","A-train0105.jpg"]
noTestImages=len(tests)
print "\n\t noTestImages=",noTestImages
X_test = np.zeros((noTestImages,128,128,3))
X_test1 =[] #np.zeros((noTestImages,512,512,3)) # original images
testException=0
for pos in range(len(tests)):
try:
temp=cv2.imread(cwd+'/newData/test/'+tests[pos])
#print "\n\t test size",temp.shape
#showImage(str(pos),temp)
im = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)
ret2, th2 = cv2.threshold(im, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
if blackOnWhite == 1:
temp = (255 - temp)
X_test[pos] = cv2.resize(temp,(128,128))
X_test1.append(temp)
except Exception as e:
print "\n\t file name =",tests[pos]
testException+=1
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("\n\t line no in test images=", exc_tb.tb_lineno)
print "\n\t testException=",testException
X_train-=128.0
X_train/=128.0
y_train-=128.0
y_train/=128.0
X_test-=128.0
X_test/=128.0
print "1.X_train shape=",X_train.shape
print "2.y_train shape=",X_train.shape
print "3.X_test shape=",X_test.shape
#
# meen = np.mean(X_train,axis=(0,1,2))
# std = np.std(X_train,axis=(0,1,2))
# X_train-=meen
# X_train/=std
#
# #y_train-=meen
# y_train/=255
#
def createModel():
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
clf = Sequential()
clf.add(Convolution2D(filters=64,kernel_size=(5,3),input_shape=(128,128,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3),padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(1,1)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#clf.add(MaxPooling2D(pool_size=(2,2),, strides=(1,1))
clf.add(Convolution2D(filters=512*2,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024*2,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512*2,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=256*2,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(3, (3, 3), padding='same'))
clf.add(Activation('tanh'))
clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
#clf.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['mae'])
return clf
def createModelOriginal():
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
clf = Sequential()
clf.add(Convolution2D(filters=64,kernel_size=(3,3),input_shape=(128,128,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2))) # 1
clf.add(Convolution2D(filters=128,kernel_size=(3,3),padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))# 32 2
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(1,1))) # 3
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2))) # 4
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(3, (3, 3), padding='same'))
clf.add(Activation('tanh'))
clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
return clf
def createModel1():
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
clf = Sequential()
clf.add(Convolution2D(filters=64,kernel_size=(3,3),input_shape=(128,128,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
#clf.add()
'''
clf.add(Convolution2D(filters=128,kernel_size=(7,3),padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(7,5), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(1,1)))
clf.add(Convolution2D(filters=256,kernel_size=(10,10), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=512,kernel_size=(10,5), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
'''
clf.add(Convolution2D(3, (3, 3), padding='same'))
clf.add(Activation('tanh'))
#clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
return clf
#base CV structure
def get_callbacks(filepath, patience=10):
es = EarlyStopping('val_loss', patience=patience, mode="min")
#msave = ModelCheckpoint(filepath, save_best_only=True)
msave =ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True,
save_weights_only=True, mode='auto', period=1)
return [es, msave]
file_path = cwd+"//models//model_weights.hdf5"
callbacks = get_callbacks(filepath=file_path, patience=10)
clf=createModel()
#clf=createModelOriginal()
model_json=clf.to_json()
with open(cwd+"modelArch.json", "w") as json_file:
json_file.write(model_json)
print clf.summary()
#keras.callbacks.ModelCheckpoint(cwd+'//models//', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
clf.fit(X_train,y_train,batch_size=30, epochs=200,validation_split=0.2,callbacks=callbacks,shuffle=True,verbose=2)
#clf.save(cwd+'//models//model-10.h5')
sys.stdout.flush()
y_out = clf.predict(X_test)
y_out*=128.0
y_out+=128.0
for y in range(y_out.shape[0]):
h,w=X_test1[y].shape[0],X_test1[y].shape[1]
tmp= cv2.resize(y_out[y], (h, w)) #originalIm
cv2.imwrite(cwd+"//results//"+'y'+str(y)+'t.jpg',X_test1[y])
cv2.imwrite(cwd+"//results//"+'y'+str(y)+'s1gray.jpg',tmp)
|
import numpy as np
np.random.seed(1000) # for reproducibility
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D
from keras.layers import Activation
from keras.layers import MaxPooling2D,UpSampling2D
from keras.layers import Dropout,Dense,Flatten,BatchNormalization
from keras.optimizers import *
from keras.models import load_model
from keras import regularizers
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
import os
import cv2
import sys
cwd=os.getcwd()+"//"
oldFiles=os.listdir(cwd+"results//")
for old in oldFiles:
try:
os.remove("/home/kapitsa/PycharmProjects/segmentation//Convolutional-Encoder-Decoder-for-Hand-Segmentation-master/results/"+old)
except Exception as e:
print "\n\t cant delete=",old
pass
'''
this code is modified for new segmentaion
'''
def showImage(name,image):
print "\n\t image=",image.shape
cv2.imshow(name,image)
cv2.waitKey()
'''
angles = range(-2,3)
shifts = [[0,0],[0,1],[1,0],[1,1],[0,2],[2,0],[1,2],[2,1],[2,2],
[0,-1],[-1,0],[-1,-1],[0,-2],[-2,0],[-1,-2],[-2,-1],[-2,-2],
[1,-1],[1,-2],[2,-1],[2,-2],
[-1,1],[-1,2],[-2,1],[-2,2]]
multiplier = len(angles)*len(shifts)
'''
# path_x = cwd+'/newData/X1/' #only hands
# path_y = cwd+'/newData/segment11/' #segmented data
# path_x = cwd+'/newData/image/' #only hands
# path_y = cwd+'/newData/segment/' #segmented data
path_x = cwd+'/newData/imageText1/' #only hands
path_y = cwd+'/newData/segmentText1/' #segmented data
total = 0
dump=os.listdir(path_x)
dumpLen=len(dump)
print("\n\t dumpLen1=",dumpLen)
dump=os.listdir(path_y)
dumpLen=len(dump)
print("\n\t dumpLen2=",dumpLen)
maxImageProcess=dumpLen
#for pos in range(len(path_x)):
noException=0
blackOnWhite=0
X_train=np.zeros((maxImageProcess,128,128,3))
y_train=np.zeros((maxImageProcess,128,128,3))
for indxImg,img in enumerate(sorted(dump)):
print("\n\t img=",img,"\t ",os.path.isfile(path_x+img),"\t ",os.path.isdir(path_x))
continue
if indxImg %100==0:
print "\n\tindxImg=",indxImg,"\t dumpLen=",dumpLen
if indxImg>maxImageProcess:
break
try:
originalIm = cv2.imread(path_x+img)
#print "\n\t indxImg=",indxImg,"\t image shape=",originalIm.shape
segmentedIm = cv2.imread(path_y+img)
print("\n\t isFile=",os.path.isfile(path_y+img))
print "\n\t indxImg=",indxImg,"\t image shape=",segmentedIm.shape
X_train[indxImg] = cv2.resize(originalIm, (128, 128)) #originalIm
y_train[indxImg] = cv2.resize(segmentedIm, (128, 128))
'''
for indxAngle,angle in enumerate(angles):
for indxShift,shift in enumerate(shifts):
M = cv2.getRotationMatrix2D((128/2,128/2),angle,1)
shiftM = np.float32([[1,0,shift[0]],[0,1,shift[1]]])
rotatedIm = cv2.warpAffine(originalIm,M,(128,128))
rotatedSegmentedIm = cv2.warpAffine(segmentedIm,M,(128,128))
rotatedShiftedIm = cv2.warpAffine(rotatedIm,shiftM,(128,128))
rotatedSegmentedShiftedIm = cv2.warpAffine(rotatedSegmentedIm,shiftM,(128,128))
X_train[total]=rotatedShiftedIm
y_train[total]=rotatedSegmentedShiftedIm
cv2.imwrite(cwd+"//newData//"+str(indxImg)+"_"+str(indxAngle)+"_"+str(indxShift)+"_shift.jpg",rotatedShiftedIm)
cv2.imwrite(cwd+"//newData//"+str(indxImg)+"_"+str(indxAngle)+"_"+str(indxShift)+"_segment.jpg",rotatedSegmentedShiftedIm)
total+=1
'''
# showImage("train",originalIm)
# showImage("test",segmentedIm)
except Exception as e:
noException+=1
print "\n\t e=",e
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("\n\t line no", exc_tb.tb_lineno)
#input("check exception")
print "\n\t noException=",noException
tests = os.listdir(cwd+'/newData/test/')#["A-train0101.jpg","A-train0102.jpg","A-train0103.jpg","A-train0104.jpg","A-train0105.jpg"]
noTestImages=len(tests)
print "\n\t noTestImages=",noTestImages
X_test = np.zeros((noTestImages,128,128,3))
X_test1 =[] #np.zeros((noTestImages,512,512,3)) # original images
testException=0
for pos in range(len(tests)):
try:
temp=cv2.imread(cwd+'/newData/test/'+tests[pos])
#print "\n\t test size",temp.shape
#showImage(str(pos),temp)
im = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)
ret2, th2 = cv2.threshold(im, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
if blackOnWhite == 1:
temp = (255 - temp)
X_test[pos] = cv2.resize(temp,(128,128))
X_test1.append(temp)
except Exception as e:
print "\n\t file name =",tests[pos]
testException+=1
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print("\n\t line no in test images=", exc_tb.tb_lineno)
print "\n\t testException=",testException
X_train-=128.0
X_train/=128.0
y_train-=128.0
y_train/=128.0
X_test-=128.0
X_test/=128.0
print "1.X_train shape=",X_train.shape
print "2.y_train shape=",X_train.shape
print "3.X_test shape=",X_test.shape
#
# meen = np.mean(X_train,axis=(0,1,2))
# std = np.std(X_train,axis=(0,1,2))
# X_train-=meen
# X_train/=std
#
# #y_train-=meen
# y_train/=255
#
def createModel():
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
clf = Sequential()
clf.add(Convolution2D(filters=64,kernel_size=(5,3),input_shape=(128,128,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3),padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(1,1)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#clf.add(MaxPooling2D(pool_size=(2,2),, strides=(1,1))
clf.add(Convolution2D(filters=512*2,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024*2,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512*2,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=256*2,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(3, (3, 3), padding='same'))
clf.add(Activation('tanh'))
clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
#clf.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['mae'])
return clf
def createModelOriginal():
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
clf = Sequential()
clf.add(Convolution2D(filters=64,kernel_size=(3,3),input_shape=(128,128,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2))) # 1
clf.add(Convolution2D(filters=128,kernel_size=(3,3),padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))# 32 2
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(1,1))) # 3
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2))) # 4
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(3, (3, 3), padding='same'))
clf.add(Activation('tanh'))
clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
return clf
def createModel1():
adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
clf = Sequential()
clf.add(Convolution2D(filters=64,kernel_size=(3,3),input_shape=(128,128,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
#clf.add()
'''
clf.add(Convolution2D(filters=128,kernel_size=(7,3),padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(7,5), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(1,1)))
clf.add(Convolution2D(filters=256,kernel_size=(10,10), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(MaxPooling2D(pool_size=(2,2)))
clf.add(Convolution2D(filters=512,kernel_size=(10,5), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
#writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
clf.add(UpSampling2D((2,2)))
clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same'))
clf.add(BatchNormalization())
clf.add(Activation('relu'))
'''
clf.add(Convolution2D(3, (3, 3), padding='same'))
clf.add(Activation('tanh'))
#clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
clf.compile(optimizer=adam,loss='mse',metrics=['mae'])
return clf
#base CV structure
def get_callbacks(filepath, patience=10):
es = EarlyStopping('val_loss', patience=patience, mode="min")
#msave = ModelCheckpoint(filepath, save_best_only=True)
msave =ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True,
save_weights_only=True, mode='auto', period=1)
return [es, msave]
file_path = cwd+"//models//model_weights.hdf5"
callbacks = get_callbacks(filepath=file_path, patience=10)
clf=createModel()
#clf=createModelOriginal()
model_json=clf.to_json()
with open(cwd+"modelArch.json", "w") as json_file:
json_file.write(model_json)
print clf.summary()
#keras.callbacks.ModelCheckpoint(cwd+'//models//', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
clf.fit(X_train,y_train,batch_size=30, epochs=200,validation_split=0.2,callbacks=callbacks,shuffle=True,verbose=2)
#clf.save(cwd+'//models//model-10.h5')
sys.stdout.flush()
y_out = clf.predict(X_test)
y_out*=128.0
y_out+=128.0
for y in range(y_out.shape[0]):
h,w=X_test1[y].shape[0],X_test1[y].shape[1]
tmp= cv2.resize(y_out[y], (h, w)) #originalIm
cv2.imwrite(cwd+"//results//"+'y'+str(y)+'t.jpg',X_test1[y])
cv2.imwrite(cwd+"//results//"+'y'+str(y)+'s1gray.jpg',tmp)
|
en
| 0.211887
|
# for reproducibility this code is modified for new segmentaion angles = range(-2,3) shifts = [[0,0],[0,1],[1,0],[1,1],[0,2],[2,0],[1,2],[2,1],[2,2], [0,-1],[-1,0],[-1,-1],[0,-2],[-2,0],[-1,-2],[-2,-1],[-2,-2], [1,-1],[1,-2],[2,-1],[2,-2], [-1,1],[-1,2],[-2,1],[-2,2]] multiplier = len(angles)*len(shifts) # path_x = cwd+'/newData/X1/' #only hands # path_y = cwd+'/newData/segment11/' #segmented data # path_x = cwd+'/newData/image/' #only hands # path_y = cwd+'/newData/segment/' #segmented data #only hands #segmented data #for pos in range(len(path_x)): #print "\n\t indxImg=",indxImg,"\t image shape=",originalIm.shape #originalIm for indxAngle,angle in enumerate(angles): for indxShift,shift in enumerate(shifts): M = cv2.getRotationMatrix2D((128/2,128/2),angle,1) shiftM = np.float32([[1,0,shift[0]],[0,1,shift[1]]]) rotatedIm = cv2.warpAffine(originalIm,M,(128,128)) rotatedSegmentedIm = cv2.warpAffine(segmentedIm,M,(128,128)) rotatedShiftedIm = cv2.warpAffine(rotatedIm,shiftM,(128,128)) rotatedSegmentedShiftedIm = cv2.warpAffine(rotatedSegmentedIm,shiftM,(128,128)) X_train[total]=rotatedShiftedIm y_train[total]=rotatedSegmentedShiftedIm cv2.imwrite(cwd+"//newData//"+str(indxImg)+"_"+str(indxAngle)+"_"+str(indxShift)+"_shift.jpg",rotatedShiftedIm) cv2.imwrite(cwd+"//newData//"+str(indxImg)+"_"+str(indxAngle)+"_"+str(indxShift)+"_segment.jpg",rotatedSegmentedShiftedIm) total+=1 # showImage("train",originalIm) # showImage("test",segmentedIm) #input("check exception") #["A-train0101.jpg","A-train0102.jpg","A-train0103.jpg","A-train0104.jpg","A-train0105.jpg"] #np.zeros((noTestImages,512,512,3)) # original images #print "\n\t test size",temp.shape #showImage(str(pos),temp) # # meen = np.mean(X_train,axis=(0,1,2)) # std = np.std(X_train,axis=(0,1,2)) # X_train-=meen # X_train/=std # # #y_train-=meen # y_train/=255 # #clf.add(MaxPooling2D(pool_size=(2,2),, strides=(1,1)) #writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name #clf.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['mae']) # 1 # 32 2 # 3 # 4 #writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name #clf.add() clf.add(Convolution2D(filters=128,kernel_size=(7,3),padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(MaxPooling2D(pool_size=(2,2))) clf.add(Convolution2D(filters=256,kernel_size=(7,5), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(MaxPooling2D(pool_size=(1,1))) clf.add(Convolution2D(filters=256,kernel_size=(10,10), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(MaxPooling2D(pool_size=(2,2))) clf.add(Convolution2D(filters=512,kernel_size=(10,5), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=1024,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=2048,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(Convolution2D(filters=512,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(UpSampling2D((2,2))) clf.add(Convolution2D(filters=256,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) #writeName = "fusion_" + str(j) + "_" + str(i) + "_" + str(hitIndx) # this is image name clf.add(UpSampling2D((2,2))) clf.add(Convolution2D(filters=128,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) clf.add(UpSampling2D((2,2))) clf.add(Convolution2D(filters=64,kernel_size=(3,3), padding='same')) clf.add(BatchNormalization()) clf.add(Activation('relu')) #clf.compile(optimizer=adam,loss='mse',metrics=['mae']) #base CV structure #msave = ModelCheckpoint(filepath, save_best_only=True) #clf=createModelOriginal() #keras.callbacks.ModelCheckpoint(cwd+'//models//', monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1) #clf.save(cwd+'//models//model-10.h5') #originalIm
| 2.333805
| 2
|
test/mapreduce/data/stream_data.py
|
chuyqa/pydoop
| 0
|
6625954
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import pydoop.mapreduce.streams as streams
JOB_CONF = (
'k', 'v',
'mapreduce.job.inputformat.class', 'foo',
'mapreduce.pipes.isjavarecordreader', 'true',
'mapreduce.pipes.isjavarecordwriter', 'true',
)
STREAM_1_DATA = [
(streams.MAP_ITEM, 'key1', 'val1'),
(streams.MAP_ITEM, 'key2', 'val2'),
(streams.MAP_ITEM, 'key3', 'val3'),
(streams.CLOSE,),
(streams.MAP_ITEM, 'key3', 'val3'), # should not get here
]
STREAM_2_DATA = [
(streams.REDUCE_KEY, 'key1'),
(streams.REDUCE_VALUE, 'val11'),
(streams.REDUCE_VALUE, 'val12'),
(streams.REDUCE_VALUE, 'val13'),
(streams.REDUCE_KEY, 'key2'),
(streams.REDUCE_VALUE, 'val21'),
(streams.REDUCE_VALUE, 'val22'),
(streams.REDUCE_VALUE, 'val23'),
(streams.CLOSE,),
(streams.REDUCE_VALUE, 'val24'), # should not get here
]
STREAM_3_DATA = [
(streams.START_MESSAGE, 0),
(streams.SET_JOB_CONF,) + JOB_CONF,
(streams.RUN_MAP, 'input_split', 0, 1),
(streams.SET_INPUT_TYPES, 'key_type', 'value_type'),
(streams.MAP_ITEM, 'key1', 'the blue fox jumps on the table'),
(streams.MAP_ITEM, 'key1', 'a yellow fox turns around'),
(streams.MAP_ITEM, 'key2', 'a blue yellow fox sits on the table'),
(streams.RUN_REDUCE, 0, 0),
(streams.REDUCE_KEY, 'key1'),
(streams.REDUCE_VALUE, 'val1'),
(streams.REDUCE_VALUE, 'val2'),
(streams.REDUCE_KEY, 'key2'),
(streams.REDUCE_VALUE, 'val3'),
(streams.CLOSE,),
]
STREAM_4_DATA = [
(streams.OUTPUT, 'key1', 'val1'),
(streams.PARTITIONED_OUTPUT, 22, 'key2', 'val2'),
(streams.STATUS, 'jolly good'),
(streams.PROGRESS, 0.99),
(streams.DONE,),
(streams.REGISTER_COUNTER, 22, 'cgroup', 'cname'),
(streams.INCREMENT_COUNTER, 22, 123),
]
STREAM_5_DATA = [
(streams.START_MESSAGE, 0),
(streams.SET_JOB_CONF,) + JOB_CONF,
(streams.RUN_MAP, 'input_split', 0, 1),
(streams.SET_INPUT_TYPES, 'key_type', 'value_type'),
(streams.MAP_ITEM, 'key1', 'the blue fox jumps on the table'),
(streams.MAP_ITEM, 'key1', 'a yellow fox turns around'),
(streams.MAP_ITEM, 'key2', 'a blue yellow fox sits on the table'),
(streams.CLOSE,),
]
STREAM_6_DATA = [
(streams.START_MESSAGE, 0),
(streams.SET_JOB_CONF,) + JOB_CONF,
(streams.RUN_MAP, 'input_split', 1, 1),
(streams.SET_INPUT_TYPES, 'key_type', 'value_type'),
(streams.MAP_ITEM, 'key1', 'the blue fox jumps on the table'),
(streams.MAP_ITEM, 'key1', 'a yellow fox turns around'),
(streams.MAP_ITEM, 'key2', 'a blue yellow fox sits on the table'),
(streams.CLOSE,),
]
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import pydoop.mapreduce.streams as streams
JOB_CONF = (
'k', 'v',
'mapreduce.job.inputformat.class', 'foo',
'mapreduce.pipes.isjavarecordreader', 'true',
'mapreduce.pipes.isjavarecordwriter', 'true',
)
STREAM_1_DATA = [
(streams.MAP_ITEM, 'key1', 'val1'),
(streams.MAP_ITEM, 'key2', 'val2'),
(streams.MAP_ITEM, 'key3', 'val3'),
(streams.CLOSE,),
(streams.MAP_ITEM, 'key3', 'val3'), # should not get here
]
STREAM_2_DATA = [
(streams.REDUCE_KEY, 'key1'),
(streams.REDUCE_VALUE, 'val11'),
(streams.REDUCE_VALUE, 'val12'),
(streams.REDUCE_VALUE, 'val13'),
(streams.REDUCE_KEY, 'key2'),
(streams.REDUCE_VALUE, 'val21'),
(streams.REDUCE_VALUE, 'val22'),
(streams.REDUCE_VALUE, 'val23'),
(streams.CLOSE,),
(streams.REDUCE_VALUE, 'val24'), # should not get here
]
STREAM_3_DATA = [
(streams.START_MESSAGE, 0),
(streams.SET_JOB_CONF,) + JOB_CONF,
(streams.RUN_MAP, 'input_split', 0, 1),
(streams.SET_INPUT_TYPES, 'key_type', 'value_type'),
(streams.MAP_ITEM, 'key1', 'the blue fox jumps on the table'),
(streams.MAP_ITEM, 'key1', 'a yellow fox turns around'),
(streams.MAP_ITEM, 'key2', 'a blue yellow fox sits on the table'),
(streams.RUN_REDUCE, 0, 0),
(streams.REDUCE_KEY, 'key1'),
(streams.REDUCE_VALUE, 'val1'),
(streams.REDUCE_VALUE, 'val2'),
(streams.REDUCE_KEY, 'key2'),
(streams.REDUCE_VALUE, 'val3'),
(streams.CLOSE,),
]
STREAM_4_DATA = [
(streams.OUTPUT, 'key1', 'val1'),
(streams.PARTITIONED_OUTPUT, 22, 'key2', 'val2'),
(streams.STATUS, 'jolly good'),
(streams.PROGRESS, 0.99),
(streams.DONE,),
(streams.REGISTER_COUNTER, 22, 'cgroup', 'cname'),
(streams.INCREMENT_COUNTER, 22, 123),
]
STREAM_5_DATA = [
(streams.START_MESSAGE, 0),
(streams.SET_JOB_CONF,) + JOB_CONF,
(streams.RUN_MAP, 'input_split', 0, 1),
(streams.SET_INPUT_TYPES, 'key_type', 'value_type'),
(streams.MAP_ITEM, 'key1', 'the blue fox jumps on the table'),
(streams.MAP_ITEM, 'key1', 'a yellow fox turns around'),
(streams.MAP_ITEM, 'key2', 'a blue yellow fox sits on the table'),
(streams.CLOSE,),
]
STREAM_6_DATA = [
(streams.START_MESSAGE, 0),
(streams.SET_JOB_CONF,) + JOB_CONF,
(streams.RUN_MAP, 'input_split', 1, 1),
(streams.SET_INPUT_TYPES, 'key_type', 'value_type'),
(streams.MAP_ITEM, 'key1', 'the blue fox jumps on the table'),
(streams.MAP_ITEM, 'key1', 'a yellow fox turns around'),
(streams.MAP_ITEM, 'key2', 'a blue yellow fox sits on the table'),
(streams.CLOSE,),
]
|
en
| 0.823592
|
# BEGIN_COPYRIGHT # # Copyright 2009-2018 CRS4. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # END_COPYRIGHT # should not get here # should not get here
| 1.669763
| 2
|
src/decimal_to_octal/main.py
|
pranshuag9/my-hackerblocks-codes
| 0
|
6625955
|
'''
@author: <NAME>
@problem: https://hack.codingblocks.com/app/practice/1/217/problem
'''
def decimal_to_octal(n):
i, BASE, sum = 0, 8, 0
while n > 0:
rem = n % BASE
sum += rem * (10**i)
n = int(n / BASE)
i += 1
print(sum)
if __name__ == "__main__":
n = int(input().strip())
decimal_to_octal(n)
|
'''
@author: <NAME>
@problem: https://hack.codingblocks.com/app/practice/1/217/problem
'''
def decimal_to_octal(n):
i, BASE, sum = 0, 8, 0
while n > 0:
rem = n % BASE
sum += rem * (10**i)
n = int(n / BASE)
i += 1
print(sum)
if __name__ == "__main__":
n = int(input().strip())
decimal_to_octal(n)
|
en
| 0.584632
|
@author: <NAME>
@problem: https://hack.codingblocks.com/app/practice/1/217/problem
| 3.844155
| 4
|
src/compute.client/python_client/compute_rhino3d/Curve.py
|
tt-acm/compute.rhino3d
| 1
|
6625956
|
from . import Util
def GetConicSectionType(thisCurve, multiple=False):
url = "rhino/geometry/curve/getconicsectiontype-curve"
if multiple: url += "?multiple=true"
args = [thisCurve]
if multiple: args = zip(thisCurve)
response = Util.ComputeFetch(url, args)
return response
def CreateInterpolatedCurve(points, degree, multiple=False):
url = "rhino/geometry/curve/createinterpolatedcurve-point3darray_int"
if multiple: url += "?multiple=true"
args = [points, degree]
if multiple: args = zip(points, degree)
response = Util.ComputeFetch(url, args)
return response
def CreateInterpolatedCurve1(points, degree, knots, multiple=False):
url = "rhino/geometry/curve/createinterpolatedcurve-point3darray_int_curveknotstyle"
if multiple: url += "?multiple=true"
args = [points, degree, knots]
if multiple: args = zip(points, degree, knots)
response = Util.ComputeFetch(url, args)
return response
def CreateInterpolatedCurve2(points, degree, knots, startTangent, endTangent, multiple=False):
url = "rhino/geometry/curve/createinterpolatedcurve-point3darray_int_curveknotstyle_vector3d_vector3d"
if multiple: url += "?multiple=true"
args = [points, degree, knots, startTangent, endTangent]
if multiple: args = zip(points, degree, knots, startTangent, endTangent)
response = Util.ComputeFetch(url, args)
return response
def CreateSoftEditCurve(curve, t, delta, length, fixEnds, multiple=False):
url = "rhino/geometry/curve/createsofteditcurve-curve_double_vector3d_double_bool"
if multiple: url += "?multiple=true"
args = [curve, t, delta, length, fixEnds]
if multiple: args = zip(curve, t, delta, length, fixEnds)
response = Util.ComputeFetch(url, args)
return response
def CreateFilletCornersCurve(curve, radius, tolerance, angleTolerance, multiple=False):
url = "rhino/geometry/curve/createfilletcornerscurve-curve_double_double_double"
if multiple: url += "?multiple=true"
args = [curve, radius, tolerance, angleTolerance]
if multiple: args = zip(curve, radius, tolerance, angleTolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateArcBlend(startPt, startDir, endPt, endDir, controlPointLengthRatio, multiple=False):
url = "rhino/geometry/curve/createarcblend-point3d_vector3d_point3d_vector3d_double"
if multiple: url += "?multiple=true"
args = [startPt, startDir, endPt, endDir, controlPointLengthRatio]
if multiple: args = zip(startPt, startDir, endPt, endDir, controlPointLengthRatio)
response = Util.ComputeFetch(url, args)
return response
def CreateMeanCurve(curveA, curveB, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/createmeancurve-curve_curve_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, angleToleranceRadians]
if multiple: args = zip(curveA, curveB, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def CreateMeanCurve1(curveA, curveB, multiple=False):
url = "rhino/geometry/curve/createmeancurve-curve_curve"
if multiple: url += "?multiple=true"
args = [curveA, curveB]
if multiple: args = zip(curveA, curveB)
response = Util.ComputeFetch(url, args)
return response
def CreateBlendCurve(curveA, curveB, continuity, multiple=False):
url = "rhino/geometry/curve/createblendcurve-curve_curve_blendcontinuity"
if multiple: url += "?multiple=true"
args = [curveA, curveB, continuity]
if multiple: args = zip(curveA, curveB, continuity)
response = Util.ComputeFetch(url, args)
return response
def CreateBlendCurve1(curveA, curveB, continuity, bulgeA, bulgeB, multiple=False):
url = "rhino/geometry/curve/createblendcurve-curve_curve_blendcontinuity_double_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, continuity, bulgeA, bulgeB]
if multiple: args = zip(curveA, curveB, continuity, bulgeA, bulgeB)
response = Util.ComputeFetch(url, args)
return response
def CreateBlendCurve2(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1, multiple=False):
url = "rhino/geometry/curve/createblendcurve-curve_double_bool_blendcontinuity_curve_double_bool_blendcontinuity"
if multiple: url += "?multiple=true"
args = [curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1]
if multiple: args = zip(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurves(curve0, curve1, numCurves, multiple=False):
url = "rhino/geometry/curve/createtweencurves-curve_curve_int"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves]
if multiple: args = zip(curve0, curve1, numCurves)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurves1(curve0, curve1, numCurves, tolerance, multiple=False):
url = "rhino/geometry/curve/createtweencurves-curve_curve_int_double"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves, tolerance]
if multiple: args = zip(curve0, curve1, numCurves, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurvesWithMatching(curve0, curve1, numCurves, multiple=False):
url = "rhino/geometry/curve/createtweencurveswithmatching-curve_curve_int"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves]
if multiple: args = zip(curve0, curve1, numCurves)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurvesWithMatching1(curve0, curve1, numCurves, tolerance, multiple=False):
url = "rhino/geometry/curve/createtweencurveswithmatching-curve_curve_int_double"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves, tolerance]
if multiple: args = zip(curve0, curve1, numCurves, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurvesWithSampling(curve0, curve1, numCurves, numSamples, multiple=False):
url = "rhino/geometry/curve/createtweencurveswithsampling-curve_curve_int_int"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves, numSamples]
if multiple: args = zip(curve0, curve1, numCurves, numSamples)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurvesWithSampling1(curve0, curve1, numCurves, numSamples, tolerance, multiple=False):
url = "rhino/geometry/curve/createtweencurveswithsampling-curve_curve_int_int_double"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves, numSamples, tolerance]
if multiple: args = zip(curve0, curve1, numCurves, numSamples, tolerance)
response = Util.ComputeFetch(url, args)
return response
def JoinCurves(inputCurves, multiple=False):
url = "rhino/geometry/curve/joincurves-curvearray"
if multiple: url += "?multiple=true"
args = [inputCurves]
if multiple: args = zip(inputCurves)
response = Util.ComputeFetch(url, args)
return response
def JoinCurves1(inputCurves, joinTolerance, multiple=False):
url = "rhino/geometry/curve/joincurves-curvearray_double"
if multiple: url += "?multiple=true"
args = [inputCurves, joinTolerance]
if multiple: args = zip(inputCurves, joinTolerance)
response = Util.ComputeFetch(url, args)
return response
def JoinCurves2(inputCurves, joinTolerance, preserveDirection, multiple=False):
url = "rhino/geometry/curve/joincurves-curvearray_double_bool"
if multiple: url += "?multiple=true"
args = [inputCurves, joinTolerance, preserveDirection]
if multiple: args = zip(inputCurves, joinTolerance, preserveDirection)
response = Util.ComputeFetch(url, args)
return response
def MakeEndsMeet(curveA, adjustStartCurveA, curveB, adjustStartCurveB, multiple=False):
url = "rhino/geometry/curve/makeendsmeet-curve_bool_curve_bool"
if multiple: url += "?multiple=true"
args = [curveA, adjustStartCurveA, curveB, adjustStartCurveB]
if multiple: args = zip(curveA, adjustStartCurveA, curveB, adjustStartCurveB)
response = Util.ComputeFetch(url, args)
return response
def CreateFillet(curve0, curve1, radius, t0Base, t1Base, multiple=False):
url = "rhino/geometry/curve/createfillet-curve_curve_double_double_double"
if multiple: url += "?multiple=true"
args = [curve0, curve1, radius, t0Base, t1Base]
if multiple: args = zip(curve0, curve1, radius, t0Base, t1Base)
response = Util.ComputeFetch(url, args)
return response
def CreateFilletCurves(curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance, multiple=False):
url = "rhino/geometry/curve/createfilletcurves-curve_point3d_curve_point3d_double_bool_bool_bool_double_double"
if multiple: url += "?multiple=true"
args = [curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance]
if multiple: args = zip(curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanUnion(curves, multiple=False):
url = "rhino/geometry/curve/createbooleanunion-curvearray"
if multiple: url += "?multiple=true"
args = [curves]
if multiple: args = zip(curves)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanUnion1(curves, tolerance, multiple=False):
url = "rhino/geometry/curve/createbooleanunion-curvearray_double"
if multiple: url += "?multiple=true"
args = [curves, tolerance]
if multiple: args = zip(curves, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanIntersection(curveA, curveB, multiple=False):
url = "rhino/geometry/curve/createbooleanintersection-curve_curve"
if multiple: url += "?multiple=true"
args = [curveA, curveB]
if multiple: args = zip(curveA, curveB)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanIntersection1(curveA, curveB, tolerance, multiple=False):
url = "rhino/geometry/curve/createbooleanintersection-curve_curve_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, tolerance]
if multiple: args = zip(curveA, curveB, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanDifference(curveA, curveB, multiple=False):
url = "rhino/geometry/curve/createbooleandifference-curve_curve"
if multiple: url += "?multiple=true"
args = [curveA, curveB]
if multiple: args = zip(curveA, curveB)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanDifference1(curveA, curveB, tolerance, multiple=False):
url = "rhino/geometry/curve/createbooleandifference-curve_curve_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, tolerance]
if multiple: args = zip(curveA, curveB, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanDifference2(curveA, subtractors, multiple=False):
url = "rhino/geometry/curve/createbooleandifference-curve_curvearray"
if multiple: url += "?multiple=true"
args = [curveA, subtractors]
if multiple: args = zip(curveA, subtractors)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanDifference3(curveA, subtractors, tolerance, multiple=False):
url = "rhino/geometry/curve/createbooleandifference-curve_curvearray_double"
if multiple: url += "?multiple=true"
args = [curveA, subtractors, tolerance]
if multiple: args = zip(curveA, subtractors, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateTextOutlines(text, font, textHeight, textStyle, closeLoops, plane, smallCapsScale, tolerance, multiple=False):
url = "rhino/geometry/curve/createtextoutlines-string_string_double_int_bool_plane_double_double"
if multiple: url += "?multiple=true"
args = [text, font, textHeight, textStyle, closeLoops, plane, smallCapsScale, tolerance]
if multiple: args = zip(text, font, textHeight, textStyle, closeLoops, plane, smallCapsScale, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateCurve2View(curveA, curveB, vectorA, vectorB, tolerance, angleTolerance, multiple=False):
url = "rhino/geometry/curve/createcurve2view-curve_curve_vector3d_vector3d_double_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, vectorA, vectorB, tolerance, angleTolerance]
if multiple: args = zip(curveA, curveB, vectorA, vectorB, tolerance, angleTolerance)
response = Util.ComputeFetch(url, args)
return response
def DoDirectionsMatch(curveA, curveB, multiple=False):
url = "rhino/geometry/curve/dodirectionsmatch-curve_curve"
if multiple: url += "?multiple=true"
args = [curveA, curveB]
if multiple: args = zip(curveA, curveB)
response = Util.ComputeFetch(url, args)
return response
def ProjectToMesh(curve, mesh, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttomesh-curve_mesh_vector3d_double"
if multiple: url += "?multiple=true"
args = [curve, mesh, direction, tolerance]
if multiple: args = zip(curve, mesh, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToMesh1(curve, meshes, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttomesh-curve_mesharray_vector3d_double"
if multiple: url += "?multiple=true"
args = [curve, meshes, direction, tolerance]
if multiple: args = zip(curve, meshes, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToMesh2(curves, meshes, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttomesh-curvearray_mesharray_vector3d_double"
if multiple: url += "?multiple=true"
args = [curves, meshes, direction, tolerance]
if multiple: args = zip(curves, meshes, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToBrep(curve, brep, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttobrep-curve_brep_vector3d_double"
if multiple: url += "?multiple=true"
args = [curve, brep, direction, tolerance]
if multiple: args = zip(curve, brep, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToBrep1(curve, breps, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttobrep-curve_breparray_vector3d_double"
if multiple: url += "?multiple=true"
args = [curve, breps, direction, tolerance]
if multiple: args = zip(curve, breps, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToBrep2(curve, breps, direction, tolerance, brepIndices, multiple=False):
url = "rhino/geometry/curve/projecttobrep-curve_breparray_vector3d_double_intarray"
if multiple: url += "?multiple=true"
args = [curve, breps, direction, tolerance, brepIndices]
if multiple: args = zip(curve, breps, direction, tolerance, brepIndices)
response = Util.ComputeFetch(url, args)
return response
def ProjectToBrep3(curves, breps, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttobrep-curvearray_breparray_vector3d_double"
if multiple: url += "?multiple=true"
args = [curves, breps, direction, tolerance]
if multiple: args = zip(curves, breps, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToPlane(curve, plane, multiple=False):
url = "rhino/geometry/curve/projecttoplane-curve_plane"
if multiple: url += "?multiple=true"
args = [curve, plane]
if multiple: args = zip(curve, plane)
response = Util.ComputeFetch(url, args)
return response
def PullToBrepFace(curve, face, tolerance, multiple=False):
url = "rhino/geometry/curve/pulltobrepface-curve_brepface_double"
if multiple: url += "?multiple=true"
args = [curve, face, tolerance]
if multiple: args = zip(curve, face, tolerance)
response = Util.ComputeFetch(url, args)
return response
def PlanarClosedCurveRelationship(curveA, curveB, testPlane, tolerance, multiple=False):
url = "rhino/geometry/curve/planarclosedcurverelationship-curve_curve_plane_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, testPlane, tolerance]
if multiple: args = zip(curveA, curveB, testPlane, tolerance)
response = Util.ComputeFetch(url, args)
return response
def PlanarCurveCollision(curveA, curveB, testPlane, tolerance, multiple=False):
url = "rhino/geometry/curve/planarcurvecollision-curve_curve_plane_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, testPlane, tolerance]
if multiple: args = zip(curveA, curveB, testPlane, tolerance)
response = Util.ComputeFetch(url, args)
return response
def DuplicateSegments(thisCurve, multiple=False):
url = "rhino/geometry/curve/duplicatesegments-curve"
if multiple: url += "?multiple=true"
args = [thisCurve]
if multiple: args = zip(thisCurve)
response = Util.ComputeFetch(url, args)
return response
def Smooth(thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem, multiple=False):
url = "rhino/geometry/curve/smooth-curve_double_bool_bool_bool_bool_smoothingcoordinatesystem"
if multiple: url += "?multiple=true"
args = [thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem]
if multiple: args = zip(thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem)
response = Util.ComputeFetch(url, args)
return response
def Smooth1(thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem, plane, multiple=False):
url = "rhino/geometry/curve/smooth-curve_double_bool_bool_bool_bool_smoothingcoordinatesystem_plane"
if multiple: url += "?multiple=true"
args = [thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem, plane]
if multiple: args = zip(thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem, plane)
response = Util.ComputeFetch(url, args)
return response
def MakeClosed(thisCurve, tolerance, multiple=False):
url = "rhino/geometry/curve/makeclosed-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance]
if multiple: args = zip(thisCurve, tolerance)
response = Util.ComputeFetch(url, args)
return response
def LcoalClosestPoint(thisCurve, testPoint, seed, t, multiple=False):
url = "rhino/geometry/curve/lcoalclosestpoint-curve_point3d_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, seed, t]
if multiple: args = zip(thisCurve, testPoint, seed, t)
response = Util.ComputeFetch(url, args)
return response
def ClosestPoint(thisCurve, testPoint, t, multiple=False):
url = "rhino/geometry/curve/closestpoint-curve_point3d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, t]
if multiple: args = zip(thisCurve, testPoint, t)
response = Util.ComputeFetch(url, args)
return response
def ClosestPoint1(thisCurve, testPoint, t, maximumDistance, multiple=False):
url = "rhino/geometry/curve/closestpoint-curve_point3d_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, t, maximumDistance]
if multiple: args = zip(thisCurve, testPoint, t, maximumDistance)
response = Util.ComputeFetch(url, args)
return response
def Contains(thisCurve, testPoint, multiple=False):
url = "rhino/geometry/curve/contains-curve_point3d"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint]
if multiple: args = zip(thisCurve, testPoint)
response = Util.ComputeFetch(url, args)
return response
def Contains1(thisCurve, testPoint, plane, multiple=False):
url = "rhino/geometry/curve/contains-curve_point3d_plane"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, plane]
if multiple: args = zip(thisCurve, testPoint, plane)
response = Util.ComputeFetch(url, args)
return response
def Contains2(thisCurve, testPoint, plane, tolerance, multiple=False):
url = "rhino/geometry/curve/contains-curve_point3d_plane_double"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, plane, tolerance]
if multiple: args = zip(thisCurve, testPoint, plane, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ExtremeParameters(thisCurve, direction, multiple=False):
url = "rhino/geometry/curve/extremeparameters-curve_vector3d"
if multiple: url += "?multiple=true"
args = [thisCurve, direction]
if multiple: args = zip(thisCurve, direction)
response = Util.ComputeFetch(url, args)
return response
def CreatePeriodicCurve(curve, multiple=False):
url = "rhino/geometry/curve/createperiodiccurve-curve"
if multiple: url += "?multiple=true"
args = [curve]
if multiple: args = zip(curve)
response = Util.ComputeFetch(url, args)
return response
def CreatePeriodicCurve1(curve, smooth, multiple=False):
url = "rhino/geometry/curve/createperiodiccurve-curve_bool"
if multiple: url += "?multiple=true"
args = [curve, smooth]
if multiple: args = zip(curve, smooth)
response = Util.ComputeFetch(url, args)
return response
def PointAtLength(thisCurve, length, multiple=False):
url = "rhino/geometry/curve/pointatlength-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, length]
if multiple: args = zip(thisCurve, length)
response = Util.ComputeFetch(url, args)
return response
def PointAtNormalizedLength(thisCurve, length, multiple=False):
url = "rhino/geometry/curve/pointatnormalizedlength-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, length]
if multiple: args = zip(thisCurve, length)
response = Util.ComputeFetch(url, args)
return response
def PerpendicularFrameAt(thisCurve, t, plane, multiple=False):
url = "rhino/geometry/curve/perpendicularframeat-curve_double_plane"
if multiple: url += "?multiple=true"
args = [thisCurve, t, plane]
if multiple: args = zip(thisCurve, t, plane)
response = Util.ComputeFetch(url, args)
return response
def GetPerpendicularFrames(thisCurve, parameters, multiple=False):
url = "rhino/geometry/curve/getperpendicularframes-curve_doublearray"
if multiple: url += "?multiple=true"
args = [thisCurve, parameters]
if multiple: args = zip(thisCurve, parameters)
response = Util.ComputeFetch(url, args)
return response
def GetLength(thisCurve, multiple=False):
url = "rhino/geometry/curve/getlength-curve"
if multiple: url += "?multiple=true"
args = [thisCurve]
if multiple: args = zip(thisCurve)
response = Util.ComputeFetch(url, args)
return response
def GetLength1(thisCurve, fractionalTolerance, multiple=False):
url = "rhino/geometry/curve/getlength-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, fractionalTolerance]
if multiple: args = zip(thisCurve, fractionalTolerance)
response = Util.ComputeFetch(url, args)
return response
def GetLength2(thisCurve, subdomain, multiple=False):
url = "rhino/geometry/curve/getlength-curve_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, subdomain]
if multiple: args = zip(thisCurve, subdomain)
response = Util.ComputeFetch(url, args)
return response
def GetLength3(thisCurve, fractionalTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/getlength-curve_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, fractionalTolerance, subdomain]
if multiple: args = zip(thisCurve, fractionalTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def IsShort(thisCurve, tolerance, multiple=False):
url = "rhino/geometry/curve/isshort-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance]
if multiple: args = zip(thisCurve, tolerance)
response = Util.ComputeFetch(url, args)
return response
def IsShort1(thisCurve, tolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/isshort-curve_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance, subdomain]
if multiple: args = zip(thisCurve, tolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def RemoveShortSegments(thisCurve, tolerance, multiple=False):
url = "rhino/geometry/curve/removeshortsegments-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance]
if multiple: args = zip(thisCurve, tolerance)
response = Util.ComputeFetch(url, args)
return response
def LengthParameter(thisCurve, segmentLength, t, multiple=False):
url = "rhino/geometry/curve/lengthparameter-curve_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, t]
if multiple: args = zip(thisCurve, segmentLength, t)
response = Util.ComputeFetch(url, args)
return response
def LengthParameter1(thisCurve, segmentLength, t, fractionalTolerance, multiple=False):
url = "rhino/geometry/curve/lengthparameter-curve_double_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, t, fractionalTolerance]
if multiple: args = zip(thisCurve, segmentLength, t, fractionalTolerance)
response = Util.ComputeFetch(url, args)
return response
def LengthParameter2(thisCurve, segmentLength, t, subdomain, multiple=False):
url = "rhino/geometry/curve/lengthparameter-curve_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, t, subdomain]
if multiple: args = zip(thisCurve, segmentLength, t, subdomain)
response = Util.ComputeFetch(url, args)
return response
def LengthParameter3(thisCurve, segmentLength, t, fractionalTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/lengthparameter-curve_double_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, t, fractionalTolerance, subdomain]
if multiple: args = zip(thisCurve, segmentLength, t, fractionalTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameter(thisCurve, s, t, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameter-curve_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, s, t]
if multiple: args = zip(thisCurve, s, t)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameter1(thisCurve, s, t, fractionalTolerance, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameter-curve_double_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, s, t, fractionalTolerance]
if multiple: args = zip(thisCurve, s, t, fractionalTolerance)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameter2(thisCurve, s, t, subdomain, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameter-curve_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, s, t, subdomain]
if multiple: args = zip(thisCurve, s, t, subdomain)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameter3(thisCurve, s, t, fractionalTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameter-curve_double_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, s, t, fractionalTolerance, subdomain]
if multiple: args = zip(thisCurve, s, t, fractionalTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameters(thisCurve, s, absoluteTolerance, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameters-curve_doublearray_double"
if multiple: url += "?multiple=true"
args = [thisCurve, s, absoluteTolerance]
if multiple: args = zip(thisCurve, s, absoluteTolerance)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameters1(thisCurve, s, absoluteTolerance, fractionalTolerance, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameters-curve_doublearray_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, s, absoluteTolerance, fractionalTolerance]
if multiple: args = zip(thisCurve, s, absoluteTolerance, fractionalTolerance)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameters2(thisCurve, s, absoluteTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameters-curve_doublearray_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, s, absoluteTolerance, subdomain]
if multiple: args = zip(thisCurve, s, absoluteTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameters3(thisCurve, s, absoluteTolerance, fractionalTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameters-curve_doublearray_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, s, absoluteTolerance, fractionalTolerance, subdomain]
if multiple: args = zip(thisCurve, s, absoluteTolerance, fractionalTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def DivideByCount(thisCurve, segmentCount, includeEnds, multiple=False):
url = "rhino/geometry/curve/dividebycount-curve_int_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentCount, includeEnds]
if multiple: args = zip(thisCurve, segmentCount, includeEnds)
response = Util.ComputeFetch(url, args)
return response
def DivideByCount1(thisCurve, segmentCount, includeEnds, points, multiple=False):
url = "rhino/geometry/curve/dividebycount-curve_int_bool_point3darray"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentCount, includeEnds, points]
if multiple: args = zip(thisCurve, segmentCount, includeEnds, points)
response = Util.ComputeFetch(url, args)
return response
def DivideByLength(thisCurve, segmentLength, includeEnds, multiple=False):
url = "rhino/geometry/curve/dividebylength-curve_double_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, includeEnds]
if multiple: args = zip(thisCurve, segmentLength, includeEnds)
response = Util.ComputeFetch(url, args)
return response
def DivideByLength1(thisCurve, segmentLength, includeEnds, reverse, multiple=False):
url = "rhino/geometry/curve/dividebylength-curve_double_bool_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, includeEnds, reverse]
if multiple: args = zip(thisCurve, segmentLength, includeEnds, reverse)
response = Util.ComputeFetch(url, args)
return response
def DivideByLength2(thisCurve, segmentLength, includeEnds, points, multiple=False):
url = "rhino/geometry/curve/dividebylength-curve_double_bool_point3darray"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, includeEnds, points]
if multiple: args = zip(thisCurve, segmentLength, includeEnds, points)
response = Util.ComputeFetch(url, args)
return response
def DivideByLength3(thisCurve, segmentLength, includeEnds, reverse, points, multiple=False):
url = "rhino/geometry/curve/dividebylength-curve_double_bool_bool_point3darray"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, includeEnds, reverse, points]
if multiple: args = zip(thisCurve, segmentLength, includeEnds, reverse, points)
response = Util.ComputeFetch(url, args)
return response
def DivideEquidistant(thisCurve, distance, multiple=False):
url = "rhino/geometry/curve/divideequidistant-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, distance]
if multiple: args = zip(thisCurve, distance)
response = Util.ComputeFetch(url, args)
return response
def DivideAsContour(thisCurve, contourStart, contourEnd, interval, multiple=False):
url = "rhino/geometry/curve/divideascontour-curve_point3d_point3d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, contourStart, contourEnd, interval]
if multiple: args = zip(thisCurve, contourStart, contourEnd, interval)
response = Util.ComputeFetch(url, args)
return response
def Trim(thisCurve, side, length, multiple=False):
url = "rhino/geometry/curve/trim-curve_curveend_double"
if multiple: url += "?multiple=true"
args = [thisCurve, side, length]
if multiple: args = zip(thisCurve, side, length)
response = Util.ComputeFetch(url, args)
return response
def Split(thisCurve, cutter, tolerance, multiple=False):
url = "rhino/geometry/curve/split-curve_brep_double"
if multiple: url += "?multiple=true"
args = [thisCurve, cutter, tolerance]
if multiple: args = zip(thisCurve, cutter, tolerance)
response = Util.ComputeFetch(url, args)
return response
def Split1(thisCurve, cutter, tolerance, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/split-curve_brep_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, cutter, tolerance, angleToleranceRadians]
if multiple: args = zip(thisCurve, cutter, tolerance, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def Split2(thisCurve, cutter, tolerance, multiple=False):
url = "rhino/geometry/curve/split-curve_surface_double"
if multiple: url += "?multiple=true"
args = [thisCurve, cutter, tolerance]
if multiple: args = zip(thisCurve, cutter, tolerance)
response = Util.ComputeFetch(url, args)
return response
def Split3(thisCurve, cutter, tolerance, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/split-curve_surface_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, cutter, tolerance, angleToleranceRadians]
if multiple: args = zip(thisCurve, cutter, tolerance, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def Extend(thisCurve, t0, t1, multiple=False):
url = "rhino/geometry/curve/extend-curve_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, t0, t1]
if multiple: args = zip(thisCurve, t0, t1)
response = Util.ComputeFetch(url, args)
return response
def Extend1(thisCurve, domain, multiple=False):
url = "rhino/geometry/curve/extend-curve_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, domain]
if multiple: args = zip(thisCurve, domain)
response = Util.ComputeFetch(url, args)
return response
def Extend2(thisCurve, side, length, style, multiple=False):
url = "rhino/geometry/curve/extend-curve_curveend_double_curveextensionstyle"
if multiple: url += "?multiple=true"
args = [thisCurve, side, length, style]
if multiple: args = zip(thisCurve, side, length, style)
response = Util.ComputeFetch(url, args)
return response
def Extend3(thisCurve, side, style, geometry, multiple=False):
url = "rhino/geometry/curve/extend-curve_curveend_curveextensionstyle_geometrybasearray"
if multiple: url += "?multiple=true"
args = [thisCurve, side, style, geometry]
if multiple: args = zip(thisCurve, side, style, geometry)
response = Util.ComputeFetch(url, args)
return response
def Extend4(thisCurve, side, style, endPoint, multiple=False):
url = "rhino/geometry/curve/extend-curve_curveend_curveextensionstyle_point3d"
if multiple: url += "?multiple=true"
args = [thisCurve, side, style, endPoint]
if multiple: args = zip(thisCurve, side, style, endPoint)
response = Util.ComputeFetch(url, args)
return response
def ExtendOnSurface(thisCurve, side, surface, multiple=False):
url = "rhino/geometry/curve/extendonsurface-curve_curveend_surface"
if multiple: url += "?multiple=true"
args = [thisCurve, side, surface]
if multiple: args = zip(thisCurve, side, surface)
response = Util.ComputeFetch(url, args)
return response
def ExtendOnSurface1(thisCurve, side, face, multiple=False):
url = "rhino/geometry/curve/extendonsurface-curve_curveend_brepface"
if multiple: url += "?multiple=true"
args = [thisCurve, side, face]
if multiple: args = zip(thisCurve, side, face)
response = Util.ComputeFetch(url, args)
return response
def ExtendByLine(thisCurve, side, geometry, multiple=False):
url = "rhino/geometry/curve/extendbyline-curve_curveend_geometrybasearray"
if multiple: url += "?multiple=true"
args = [thisCurve, side, geometry]
if multiple: args = zip(thisCurve, side, geometry)
response = Util.ComputeFetch(url, args)
return response
def ExtendByArc(thisCurve, side, geometry, multiple=False):
url = "rhino/geometry/curve/extendbyarc-curve_curveend_geometrybasearray"
if multiple: url += "?multiple=true"
args = [thisCurve, side, geometry]
if multiple: args = zip(thisCurve, side, geometry)
response = Util.ComputeFetch(url, args)
return response
def Simplify(thisCurve, options, distanceTolerance, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/simplify-curve_curvesimplifyoptions_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, options, distanceTolerance, angleToleranceRadians]
if multiple: args = zip(thisCurve, options, distanceTolerance, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def SimplifyEnd(thisCurve, end, options, distanceTolerance, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/simplifyend-curve_curveend_curvesimplifyoptions_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, end, options, distanceTolerance, angleToleranceRadians]
if multiple: args = zip(thisCurve, end, options, distanceTolerance, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def Fair(thisCurve, distanceTolerance, angleTolerance, clampStart, clampEnd, iterations, multiple=False):
url = "rhino/geometry/curve/fair-curve_double_double_int_int_int"
if multiple: url += "?multiple=true"
args = [thisCurve, distanceTolerance, angleTolerance, clampStart, clampEnd, iterations]
if multiple: args = zip(thisCurve, distanceTolerance, angleTolerance, clampStart, clampEnd, iterations)
response = Util.ComputeFetch(url, args)
return response
def Fit(thisCurve, degree, fitTolerance, angleTolerance, multiple=False):
url = "rhino/geometry/curve/fit-curve_int_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, degree, fitTolerance, angleTolerance]
if multiple: args = zip(thisCurve, degree, fitTolerance, angleTolerance)
response = Util.ComputeFetch(url, args)
return response
def Rebuild(thisCurve, pointCount, degree, preserveTangents, multiple=False):
url = "rhino/geometry/curve/rebuild-curve_int_int_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, pointCount, degree, preserveTangents]
if multiple: args = zip(thisCurve, pointCount, degree, preserveTangents)
response = Util.ComputeFetch(url, args)
return response
def ToPolyline(thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint, multiple=False):
url = "rhino/geometry/curve/topolyline-curve_int_int_double_double_double_double_double_double_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint]
if multiple: args = zip(thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint)
response = Util.ComputeFetch(url, args)
return response
def ToPolyline1(thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint, curveDomain, multiple=False):
url = "rhino/geometry/curve/topolyline-curve_int_int_double_double_double_double_double_double_bool_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint, curveDomain]
if multiple: args = zip(thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint, curveDomain)
response = Util.ComputeFetch(url, args)
return response
def ToPolyline2(thisCurve, tolerance, angleTolerance, minimumLength, maximumLength, multiple=False):
url = "rhino/geometry/curve/topolyline-curve_double_double_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance, angleTolerance, minimumLength, maximumLength]
if multiple: args = zip(thisCurve, tolerance, angleTolerance, minimumLength, maximumLength)
response = Util.ComputeFetch(url, args)
return response
def ToArcsAndLines(thisCurve, tolerance, angleTolerance, minimumLength, maximumLength, multiple=False):
url = "rhino/geometry/curve/toarcsandlines-curve_double_double_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance, angleTolerance, minimumLength, maximumLength]
if multiple: args = zip(thisCurve, tolerance, angleTolerance, minimumLength, maximumLength)
response = Util.ComputeFetch(url, args)
return response
def PullToMesh(thisCurve, mesh, tolerance, multiple=False):
url = "rhino/geometry/curve/pulltomesh-curve_mesh_double"
if multiple: url += "?multiple=true"
args = [thisCurve, mesh, tolerance]
if multiple: args = zip(thisCurve, mesh, tolerance)
response = Util.ComputeFetch(url, args)
return response
def Offset(thisCurve, plane, distance, tolerance, cornerStyle, multiple=False):
url = "rhino/geometry/curve/offset-curve_plane_double_double_curveoffsetcornerstyle"
if multiple: url += "?multiple=true"
args = [thisCurve, plane, distance, tolerance, cornerStyle]
if multiple: args = zip(thisCurve, plane, distance, tolerance, cornerStyle)
response = Util.ComputeFetch(url, args)
return response
def Offset1(thisCurve, directionPoint, normal, distance, tolerance, cornerStyle, multiple=False):
url = "rhino/geometry/curve/offset-curve_point3d_vector3d_double_double_curveoffsetcornerstyle"
if multiple: url += "?multiple=true"
args = [thisCurve, directionPoint, normal, distance, tolerance, cornerStyle]
if multiple: args = zip(thisCurve, directionPoint, normal, distance, tolerance, cornerStyle)
response = Util.ComputeFetch(url, args)
return response
def RibbonOffset(thisCurve, distance, blendRadius, directionPoint, normal, tolerance, multiple=False):
url = "rhino/geometry/curve/ribbonoffset-curve_double_double_point3d_vector3d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, distance, blendRadius, directionPoint, normal, tolerance]
if multiple: args = zip(thisCurve, distance, blendRadius, directionPoint, normal, tolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface(thisCurve, face, distance, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_brepface_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, face, distance, fittingTolerance]
if multiple: args = zip(thisCurve, face, distance, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface1(thisCurve, face, throughPoint, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_brepface_point2d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, face, throughPoint, fittingTolerance]
if multiple: args = zip(thisCurve, face, throughPoint, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface2(thisCurve, face, curveParameters, offsetDistances, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_brepface_doublearray_doublearray_double"
if multiple: url += "?multiple=true"
args = [thisCurve, face, curveParameters, offsetDistances, fittingTolerance]
if multiple: args = zip(thisCurve, face, curveParameters, offsetDistances, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface3(thisCurve, surface, distance, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_surface_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, surface, distance, fittingTolerance]
if multiple: args = zip(thisCurve, surface, distance, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface4(thisCurve, surface, throughPoint, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_surface_point2d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, surface, throughPoint, fittingTolerance]
if multiple: args = zip(thisCurve, surface, throughPoint, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface5(thisCurve, surface, curveParameters, offsetDistances, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_surface_doublearray_doublearray_double"
if multiple: url += "?multiple=true"
args = [thisCurve, surface, curveParameters, offsetDistances, fittingTolerance]
if multiple: args = zip(thisCurve, surface, curveParameters, offsetDistances, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def PullToBrepFace(thisCurve, face, tolerance, multiple=False):
url = "rhino/geometry/curve/pulltobrepface-curve_brepface_double"
if multiple: url += "?multiple=true"
args = [thisCurve, face, tolerance]
if multiple: args = zip(thisCurve, face, tolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetNormalToSurface(thisCurve, surface, height, multiple=False):
url = "rhino/geometry/curve/offsetnormaltosurface-curve_surface_double"
if multiple: url += "?multiple=true"
args = [thisCurve, surface, height]
if multiple: args = zip(thisCurve, surface, height)
response = Util.ComputeFetch(url, args)
return response
|
from . import Util
def GetConicSectionType(thisCurve, multiple=False):
url = "rhino/geometry/curve/getconicsectiontype-curve"
if multiple: url += "?multiple=true"
args = [thisCurve]
if multiple: args = zip(thisCurve)
response = Util.ComputeFetch(url, args)
return response
def CreateInterpolatedCurve(points, degree, multiple=False):
url = "rhino/geometry/curve/createinterpolatedcurve-point3darray_int"
if multiple: url += "?multiple=true"
args = [points, degree]
if multiple: args = zip(points, degree)
response = Util.ComputeFetch(url, args)
return response
def CreateInterpolatedCurve1(points, degree, knots, multiple=False):
url = "rhino/geometry/curve/createinterpolatedcurve-point3darray_int_curveknotstyle"
if multiple: url += "?multiple=true"
args = [points, degree, knots]
if multiple: args = zip(points, degree, knots)
response = Util.ComputeFetch(url, args)
return response
def CreateInterpolatedCurve2(points, degree, knots, startTangent, endTangent, multiple=False):
url = "rhino/geometry/curve/createinterpolatedcurve-point3darray_int_curveknotstyle_vector3d_vector3d"
if multiple: url += "?multiple=true"
args = [points, degree, knots, startTangent, endTangent]
if multiple: args = zip(points, degree, knots, startTangent, endTangent)
response = Util.ComputeFetch(url, args)
return response
def CreateSoftEditCurve(curve, t, delta, length, fixEnds, multiple=False):
url = "rhino/geometry/curve/createsofteditcurve-curve_double_vector3d_double_bool"
if multiple: url += "?multiple=true"
args = [curve, t, delta, length, fixEnds]
if multiple: args = zip(curve, t, delta, length, fixEnds)
response = Util.ComputeFetch(url, args)
return response
def CreateFilletCornersCurve(curve, radius, tolerance, angleTolerance, multiple=False):
url = "rhino/geometry/curve/createfilletcornerscurve-curve_double_double_double"
if multiple: url += "?multiple=true"
args = [curve, radius, tolerance, angleTolerance]
if multiple: args = zip(curve, radius, tolerance, angleTolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateArcBlend(startPt, startDir, endPt, endDir, controlPointLengthRatio, multiple=False):
url = "rhino/geometry/curve/createarcblend-point3d_vector3d_point3d_vector3d_double"
if multiple: url += "?multiple=true"
args = [startPt, startDir, endPt, endDir, controlPointLengthRatio]
if multiple: args = zip(startPt, startDir, endPt, endDir, controlPointLengthRatio)
response = Util.ComputeFetch(url, args)
return response
def CreateMeanCurve(curveA, curveB, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/createmeancurve-curve_curve_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, angleToleranceRadians]
if multiple: args = zip(curveA, curveB, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def CreateMeanCurve1(curveA, curveB, multiple=False):
url = "rhino/geometry/curve/createmeancurve-curve_curve"
if multiple: url += "?multiple=true"
args = [curveA, curveB]
if multiple: args = zip(curveA, curveB)
response = Util.ComputeFetch(url, args)
return response
def CreateBlendCurve(curveA, curveB, continuity, multiple=False):
url = "rhino/geometry/curve/createblendcurve-curve_curve_blendcontinuity"
if multiple: url += "?multiple=true"
args = [curveA, curveB, continuity]
if multiple: args = zip(curveA, curveB, continuity)
response = Util.ComputeFetch(url, args)
return response
def CreateBlendCurve1(curveA, curveB, continuity, bulgeA, bulgeB, multiple=False):
url = "rhino/geometry/curve/createblendcurve-curve_curve_blendcontinuity_double_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, continuity, bulgeA, bulgeB]
if multiple: args = zip(curveA, curveB, continuity, bulgeA, bulgeB)
response = Util.ComputeFetch(url, args)
return response
def CreateBlendCurve2(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1, multiple=False):
url = "rhino/geometry/curve/createblendcurve-curve_double_bool_blendcontinuity_curve_double_bool_blendcontinuity"
if multiple: url += "?multiple=true"
args = [curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1]
if multiple: args = zip(curve0, t0, reverse0, continuity0, curve1, t1, reverse1, continuity1)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurves(curve0, curve1, numCurves, multiple=False):
url = "rhino/geometry/curve/createtweencurves-curve_curve_int"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves]
if multiple: args = zip(curve0, curve1, numCurves)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurves1(curve0, curve1, numCurves, tolerance, multiple=False):
url = "rhino/geometry/curve/createtweencurves-curve_curve_int_double"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves, tolerance]
if multiple: args = zip(curve0, curve1, numCurves, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurvesWithMatching(curve0, curve1, numCurves, multiple=False):
url = "rhino/geometry/curve/createtweencurveswithmatching-curve_curve_int"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves]
if multiple: args = zip(curve0, curve1, numCurves)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurvesWithMatching1(curve0, curve1, numCurves, tolerance, multiple=False):
url = "rhino/geometry/curve/createtweencurveswithmatching-curve_curve_int_double"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves, tolerance]
if multiple: args = zip(curve0, curve1, numCurves, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurvesWithSampling(curve0, curve1, numCurves, numSamples, multiple=False):
url = "rhino/geometry/curve/createtweencurveswithsampling-curve_curve_int_int"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves, numSamples]
if multiple: args = zip(curve0, curve1, numCurves, numSamples)
response = Util.ComputeFetch(url, args)
return response
def CreateTweenCurvesWithSampling1(curve0, curve1, numCurves, numSamples, tolerance, multiple=False):
url = "rhino/geometry/curve/createtweencurveswithsampling-curve_curve_int_int_double"
if multiple: url += "?multiple=true"
args = [curve0, curve1, numCurves, numSamples, tolerance]
if multiple: args = zip(curve0, curve1, numCurves, numSamples, tolerance)
response = Util.ComputeFetch(url, args)
return response
def JoinCurves(inputCurves, multiple=False):
url = "rhino/geometry/curve/joincurves-curvearray"
if multiple: url += "?multiple=true"
args = [inputCurves]
if multiple: args = zip(inputCurves)
response = Util.ComputeFetch(url, args)
return response
def JoinCurves1(inputCurves, joinTolerance, multiple=False):
url = "rhino/geometry/curve/joincurves-curvearray_double"
if multiple: url += "?multiple=true"
args = [inputCurves, joinTolerance]
if multiple: args = zip(inputCurves, joinTolerance)
response = Util.ComputeFetch(url, args)
return response
def JoinCurves2(inputCurves, joinTolerance, preserveDirection, multiple=False):
url = "rhino/geometry/curve/joincurves-curvearray_double_bool"
if multiple: url += "?multiple=true"
args = [inputCurves, joinTolerance, preserveDirection]
if multiple: args = zip(inputCurves, joinTolerance, preserveDirection)
response = Util.ComputeFetch(url, args)
return response
def MakeEndsMeet(curveA, adjustStartCurveA, curveB, adjustStartCurveB, multiple=False):
url = "rhino/geometry/curve/makeendsmeet-curve_bool_curve_bool"
if multiple: url += "?multiple=true"
args = [curveA, adjustStartCurveA, curveB, adjustStartCurveB]
if multiple: args = zip(curveA, adjustStartCurveA, curveB, adjustStartCurveB)
response = Util.ComputeFetch(url, args)
return response
def CreateFillet(curve0, curve1, radius, t0Base, t1Base, multiple=False):
url = "rhino/geometry/curve/createfillet-curve_curve_double_double_double"
if multiple: url += "?multiple=true"
args = [curve0, curve1, radius, t0Base, t1Base]
if multiple: args = zip(curve0, curve1, radius, t0Base, t1Base)
response = Util.ComputeFetch(url, args)
return response
def CreateFilletCurves(curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance, multiple=False):
url = "rhino/geometry/curve/createfilletcurves-curve_point3d_curve_point3d_double_bool_bool_bool_double_double"
if multiple: url += "?multiple=true"
args = [curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance]
if multiple: args = zip(curve0, point0, curve1, point1, radius, join, trim, arcExtension, tolerance, angleTolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanUnion(curves, multiple=False):
url = "rhino/geometry/curve/createbooleanunion-curvearray"
if multiple: url += "?multiple=true"
args = [curves]
if multiple: args = zip(curves)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanUnion1(curves, tolerance, multiple=False):
url = "rhino/geometry/curve/createbooleanunion-curvearray_double"
if multiple: url += "?multiple=true"
args = [curves, tolerance]
if multiple: args = zip(curves, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanIntersection(curveA, curveB, multiple=False):
url = "rhino/geometry/curve/createbooleanintersection-curve_curve"
if multiple: url += "?multiple=true"
args = [curveA, curveB]
if multiple: args = zip(curveA, curveB)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanIntersection1(curveA, curveB, tolerance, multiple=False):
url = "rhino/geometry/curve/createbooleanintersection-curve_curve_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, tolerance]
if multiple: args = zip(curveA, curveB, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanDifference(curveA, curveB, multiple=False):
url = "rhino/geometry/curve/createbooleandifference-curve_curve"
if multiple: url += "?multiple=true"
args = [curveA, curveB]
if multiple: args = zip(curveA, curveB)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanDifference1(curveA, curveB, tolerance, multiple=False):
url = "rhino/geometry/curve/createbooleandifference-curve_curve_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, tolerance]
if multiple: args = zip(curveA, curveB, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanDifference2(curveA, subtractors, multiple=False):
url = "rhino/geometry/curve/createbooleandifference-curve_curvearray"
if multiple: url += "?multiple=true"
args = [curveA, subtractors]
if multiple: args = zip(curveA, subtractors)
response = Util.ComputeFetch(url, args)
return response
def CreateBooleanDifference3(curveA, subtractors, tolerance, multiple=False):
url = "rhino/geometry/curve/createbooleandifference-curve_curvearray_double"
if multiple: url += "?multiple=true"
args = [curveA, subtractors, tolerance]
if multiple: args = zip(curveA, subtractors, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateTextOutlines(text, font, textHeight, textStyle, closeLoops, plane, smallCapsScale, tolerance, multiple=False):
url = "rhino/geometry/curve/createtextoutlines-string_string_double_int_bool_plane_double_double"
if multiple: url += "?multiple=true"
args = [text, font, textHeight, textStyle, closeLoops, plane, smallCapsScale, tolerance]
if multiple: args = zip(text, font, textHeight, textStyle, closeLoops, plane, smallCapsScale, tolerance)
response = Util.ComputeFetch(url, args)
return response
def CreateCurve2View(curveA, curveB, vectorA, vectorB, tolerance, angleTolerance, multiple=False):
url = "rhino/geometry/curve/createcurve2view-curve_curve_vector3d_vector3d_double_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, vectorA, vectorB, tolerance, angleTolerance]
if multiple: args = zip(curveA, curveB, vectorA, vectorB, tolerance, angleTolerance)
response = Util.ComputeFetch(url, args)
return response
def DoDirectionsMatch(curveA, curveB, multiple=False):
url = "rhino/geometry/curve/dodirectionsmatch-curve_curve"
if multiple: url += "?multiple=true"
args = [curveA, curveB]
if multiple: args = zip(curveA, curveB)
response = Util.ComputeFetch(url, args)
return response
def ProjectToMesh(curve, mesh, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttomesh-curve_mesh_vector3d_double"
if multiple: url += "?multiple=true"
args = [curve, mesh, direction, tolerance]
if multiple: args = zip(curve, mesh, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToMesh1(curve, meshes, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttomesh-curve_mesharray_vector3d_double"
if multiple: url += "?multiple=true"
args = [curve, meshes, direction, tolerance]
if multiple: args = zip(curve, meshes, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToMesh2(curves, meshes, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttomesh-curvearray_mesharray_vector3d_double"
if multiple: url += "?multiple=true"
args = [curves, meshes, direction, tolerance]
if multiple: args = zip(curves, meshes, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToBrep(curve, brep, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttobrep-curve_brep_vector3d_double"
if multiple: url += "?multiple=true"
args = [curve, brep, direction, tolerance]
if multiple: args = zip(curve, brep, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToBrep1(curve, breps, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttobrep-curve_breparray_vector3d_double"
if multiple: url += "?multiple=true"
args = [curve, breps, direction, tolerance]
if multiple: args = zip(curve, breps, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToBrep2(curve, breps, direction, tolerance, brepIndices, multiple=False):
url = "rhino/geometry/curve/projecttobrep-curve_breparray_vector3d_double_intarray"
if multiple: url += "?multiple=true"
args = [curve, breps, direction, tolerance, brepIndices]
if multiple: args = zip(curve, breps, direction, tolerance, brepIndices)
response = Util.ComputeFetch(url, args)
return response
def ProjectToBrep3(curves, breps, direction, tolerance, multiple=False):
url = "rhino/geometry/curve/projecttobrep-curvearray_breparray_vector3d_double"
if multiple: url += "?multiple=true"
args = [curves, breps, direction, tolerance]
if multiple: args = zip(curves, breps, direction, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ProjectToPlane(curve, plane, multiple=False):
url = "rhino/geometry/curve/projecttoplane-curve_plane"
if multiple: url += "?multiple=true"
args = [curve, plane]
if multiple: args = zip(curve, plane)
response = Util.ComputeFetch(url, args)
return response
def PullToBrepFace(curve, face, tolerance, multiple=False):
url = "rhino/geometry/curve/pulltobrepface-curve_brepface_double"
if multiple: url += "?multiple=true"
args = [curve, face, tolerance]
if multiple: args = zip(curve, face, tolerance)
response = Util.ComputeFetch(url, args)
return response
def PlanarClosedCurveRelationship(curveA, curveB, testPlane, tolerance, multiple=False):
url = "rhino/geometry/curve/planarclosedcurverelationship-curve_curve_plane_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, testPlane, tolerance]
if multiple: args = zip(curveA, curveB, testPlane, tolerance)
response = Util.ComputeFetch(url, args)
return response
def PlanarCurveCollision(curveA, curveB, testPlane, tolerance, multiple=False):
url = "rhino/geometry/curve/planarcurvecollision-curve_curve_plane_double"
if multiple: url += "?multiple=true"
args = [curveA, curveB, testPlane, tolerance]
if multiple: args = zip(curveA, curveB, testPlane, tolerance)
response = Util.ComputeFetch(url, args)
return response
def DuplicateSegments(thisCurve, multiple=False):
url = "rhino/geometry/curve/duplicatesegments-curve"
if multiple: url += "?multiple=true"
args = [thisCurve]
if multiple: args = zip(thisCurve)
response = Util.ComputeFetch(url, args)
return response
def Smooth(thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem, multiple=False):
url = "rhino/geometry/curve/smooth-curve_double_bool_bool_bool_bool_smoothingcoordinatesystem"
if multiple: url += "?multiple=true"
args = [thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem]
if multiple: args = zip(thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem)
response = Util.ComputeFetch(url, args)
return response
def Smooth1(thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem, plane, multiple=False):
url = "rhino/geometry/curve/smooth-curve_double_bool_bool_bool_bool_smoothingcoordinatesystem_plane"
if multiple: url += "?multiple=true"
args = [thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem, plane]
if multiple: args = zip(thisCurve, smoothFactor, bXSmooth, bYSmooth, bZSmooth, bFixBoundaries, coordinateSystem, plane)
response = Util.ComputeFetch(url, args)
return response
def MakeClosed(thisCurve, tolerance, multiple=False):
url = "rhino/geometry/curve/makeclosed-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance]
if multiple: args = zip(thisCurve, tolerance)
response = Util.ComputeFetch(url, args)
return response
def LcoalClosestPoint(thisCurve, testPoint, seed, t, multiple=False):
url = "rhino/geometry/curve/lcoalclosestpoint-curve_point3d_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, seed, t]
if multiple: args = zip(thisCurve, testPoint, seed, t)
response = Util.ComputeFetch(url, args)
return response
def ClosestPoint(thisCurve, testPoint, t, multiple=False):
url = "rhino/geometry/curve/closestpoint-curve_point3d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, t]
if multiple: args = zip(thisCurve, testPoint, t)
response = Util.ComputeFetch(url, args)
return response
def ClosestPoint1(thisCurve, testPoint, t, maximumDistance, multiple=False):
url = "rhino/geometry/curve/closestpoint-curve_point3d_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, t, maximumDistance]
if multiple: args = zip(thisCurve, testPoint, t, maximumDistance)
response = Util.ComputeFetch(url, args)
return response
def Contains(thisCurve, testPoint, multiple=False):
url = "rhino/geometry/curve/contains-curve_point3d"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint]
if multiple: args = zip(thisCurve, testPoint)
response = Util.ComputeFetch(url, args)
return response
def Contains1(thisCurve, testPoint, plane, multiple=False):
url = "rhino/geometry/curve/contains-curve_point3d_plane"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, plane]
if multiple: args = zip(thisCurve, testPoint, plane)
response = Util.ComputeFetch(url, args)
return response
def Contains2(thisCurve, testPoint, plane, tolerance, multiple=False):
url = "rhino/geometry/curve/contains-curve_point3d_plane_double"
if multiple: url += "?multiple=true"
args = [thisCurve, testPoint, plane, tolerance]
if multiple: args = zip(thisCurve, testPoint, plane, tolerance)
response = Util.ComputeFetch(url, args)
return response
def ExtremeParameters(thisCurve, direction, multiple=False):
url = "rhino/geometry/curve/extremeparameters-curve_vector3d"
if multiple: url += "?multiple=true"
args = [thisCurve, direction]
if multiple: args = zip(thisCurve, direction)
response = Util.ComputeFetch(url, args)
return response
def CreatePeriodicCurve(curve, multiple=False):
url = "rhino/geometry/curve/createperiodiccurve-curve"
if multiple: url += "?multiple=true"
args = [curve]
if multiple: args = zip(curve)
response = Util.ComputeFetch(url, args)
return response
def CreatePeriodicCurve1(curve, smooth, multiple=False):
url = "rhino/geometry/curve/createperiodiccurve-curve_bool"
if multiple: url += "?multiple=true"
args = [curve, smooth]
if multiple: args = zip(curve, smooth)
response = Util.ComputeFetch(url, args)
return response
def PointAtLength(thisCurve, length, multiple=False):
url = "rhino/geometry/curve/pointatlength-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, length]
if multiple: args = zip(thisCurve, length)
response = Util.ComputeFetch(url, args)
return response
def PointAtNormalizedLength(thisCurve, length, multiple=False):
url = "rhino/geometry/curve/pointatnormalizedlength-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, length]
if multiple: args = zip(thisCurve, length)
response = Util.ComputeFetch(url, args)
return response
def PerpendicularFrameAt(thisCurve, t, plane, multiple=False):
url = "rhino/geometry/curve/perpendicularframeat-curve_double_plane"
if multiple: url += "?multiple=true"
args = [thisCurve, t, plane]
if multiple: args = zip(thisCurve, t, plane)
response = Util.ComputeFetch(url, args)
return response
def GetPerpendicularFrames(thisCurve, parameters, multiple=False):
url = "rhino/geometry/curve/getperpendicularframes-curve_doublearray"
if multiple: url += "?multiple=true"
args = [thisCurve, parameters]
if multiple: args = zip(thisCurve, parameters)
response = Util.ComputeFetch(url, args)
return response
def GetLength(thisCurve, multiple=False):
url = "rhino/geometry/curve/getlength-curve"
if multiple: url += "?multiple=true"
args = [thisCurve]
if multiple: args = zip(thisCurve)
response = Util.ComputeFetch(url, args)
return response
def GetLength1(thisCurve, fractionalTolerance, multiple=False):
url = "rhino/geometry/curve/getlength-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, fractionalTolerance]
if multiple: args = zip(thisCurve, fractionalTolerance)
response = Util.ComputeFetch(url, args)
return response
def GetLength2(thisCurve, subdomain, multiple=False):
url = "rhino/geometry/curve/getlength-curve_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, subdomain]
if multiple: args = zip(thisCurve, subdomain)
response = Util.ComputeFetch(url, args)
return response
def GetLength3(thisCurve, fractionalTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/getlength-curve_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, fractionalTolerance, subdomain]
if multiple: args = zip(thisCurve, fractionalTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def IsShort(thisCurve, tolerance, multiple=False):
url = "rhino/geometry/curve/isshort-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance]
if multiple: args = zip(thisCurve, tolerance)
response = Util.ComputeFetch(url, args)
return response
def IsShort1(thisCurve, tolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/isshort-curve_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance, subdomain]
if multiple: args = zip(thisCurve, tolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def RemoveShortSegments(thisCurve, tolerance, multiple=False):
url = "rhino/geometry/curve/removeshortsegments-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance]
if multiple: args = zip(thisCurve, tolerance)
response = Util.ComputeFetch(url, args)
return response
def LengthParameter(thisCurve, segmentLength, t, multiple=False):
url = "rhino/geometry/curve/lengthparameter-curve_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, t]
if multiple: args = zip(thisCurve, segmentLength, t)
response = Util.ComputeFetch(url, args)
return response
def LengthParameter1(thisCurve, segmentLength, t, fractionalTolerance, multiple=False):
url = "rhino/geometry/curve/lengthparameter-curve_double_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, t, fractionalTolerance]
if multiple: args = zip(thisCurve, segmentLength, t, fractionalTolerance)
response = Util.ComputeFetch(url, args)
return response
def LengthParameter2(thisCurve, segmentLength, t, subdomain, multiple=False):
url = "rhino/geometry/curve/lengthparameter-curve_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, t, subdomain]
if multiple: args = zip(thisCurve, segmentLength, t, subdomain)
response = Util.ComputeFetch(url, args)
return response
def LengthParameter3(thisCurve, segmentLength, t, fractionalTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/lengthparameter-curve_double_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, t, fractionalTolerance, subdomain]
if multiple: args = zip(thisCurve, segmentLength, t, fractionalTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameter(thisCurve, s, t, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameter-curve_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, s, t]
if multiple: args = zip(thisCurve, s, t)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameter1(thisCurve, s, t, fractionalTolerance, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameter-curve_double_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, s, t, fractionalTolerance]
if multiple: args = zip(thisCurve, s, t, fractionalTolerance)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameter2(thisCurve, s, t, subdomain, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameter-curve_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, s, t, subdomain]
if multiple: args = zip(thisCurve, s, t, subdomain)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameter3(thisCurve, s, t, fractionalTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameter-curve_double_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, s, t, fractionalTolerance, subdomain]
if multiple: args = zip(thisCurve, s, t, fractionalTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameters(thisCurve, s, absoluteTolerance, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameters-curve_doublearray_double"
if multiple: url += "?multiple=true"
args = [thisCurve, s, absoluteTolerance]
if multiple: args = zip(thisCurve, s, absoluteTolerance)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameters1(thisCurve, s, absoluteTolerance, fractionalTolerance, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameters-curve_doublearray_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, s, absoluteTolerance, fractionalTolerance]
if multiple: args = zip(thisCurve, s, absoluteTolerance, fractionalTolerance)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameters2(thisCurve, s, absoluteTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameters-curve_doublearray_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, s, absoluteTolerance, subdomain]
if multiple: args = zip(thisCurve, s, absoluteTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def NormalizedLengthParameters3(thisCurve, s, absoluteTolerance, fractionalTolerance, subdomain, multiple=False):
url = "rhino/geometry/curve/normalizedlengthparameters-curve_doublearray_double_double_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, s, absoluteTolerance, fractionalTolerance, subdomain]
if multiple: args = zip(thisCurve, s, absoluteTolerance, fractionalTolerance, subdomain)
response = Util.ComputeFetch(url, args)
return response
def DivideByCount(thisCurve, segmentCount, includeEnds, multiple=False):
url = "rhino/geometry/curve/dividebycount-curve_int_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentCount, includeEnds]
if multiple: args = zip(thisCurve, segmentCount, includeEnds)
response = Util.ComputeFetch(url, args)
return response
def DivideByCount1(thisCurve, segmentCount, includeEnds, points, multiple=False):
url = "rhino/geometry/curve/dividebycount-curve_int_bool_point3darray"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentCount, includeEnds, points]
if multiple: args = zip(thisCurve, segmentCount, includeEnds, points)
response = Util.ComputeFetch(url, args)
return response
def DivideByLength(thisCurve, segmentLength, includeEnds, multiple=False):
url = "rhino/geometry/curve/dividebylength-curve_double_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, includeEnds]
if multiple: args = zip(thisCurve, segmentLength, includeEnds)
response = Util.ComputeFetch(url, args)
return response
def DivideByLength1(thisCurve, segmentLength, includeEnds, reverse, multiple=False):
url = "rhino/geometry/curve/dividebylength-curve_double_bool_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, includeEnds, reverse]
if multiple: args = zip(thisCurve, segmentLength, includeEnds, reverse)
response = Util.ComputeFetch(url, args)
return response
def DivideByLength2(thisCurve, segmentLength, includeEnds, points, multiple=False):
url = "rhino/geometry/curve/dividebylength-curve_double_bool_point3darray"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, includeEnds, points]
if multiple: args = zip(thisCurve, segmentLength, includeEnds, points)
response = Util.ComputeFetch(url, args)
return response
def DivideByLength3(thisCurve, segmentLength, includeEnds, reverse, points, multiple=False):
url = "rhino/geometry/curve/dividebylength-curve_double_bool_bool_point3darray"
if multiple: url += "?multiple=true"
args = [thisCurve, segmentLength, includeEnds, reverse, points]
if multiple: args = zip(thisCurve, segmentLength, includeEnds, reverse, points)
response = Util.ComputeFetch(url, args)
return response
def DivideEquidistant(thisCurve, distance, multiple=False):
url = "rhino/geometry/curve/divideequidistant-curve_double"
if multiple: url += "?multiple=true"
args = [thisCurve, distance]
if multiple: args = zip(thisCurve, distance)
response = Util.ComputeFetch(url, args)
return response
def DivideAsContour(thisCurve, contourStart, contourEnd, interval, multiple=False):
url = "rhino/geometry/curve/divideascontour-curve_point3d_point3d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, contourStart, contourEnd, interval]
if multiple: args = zip(thisCurve, contourStart, contourEnd, interval)
response = Util.ComputeFetch(url, args)
return response
def Trim(thisCurve, side, length, multiple=False):
url = "rhino/geometry/curve/trim-curve_curveend_double"
if multiple: url += "?multiple=true"
args = [thisCurve, side, length]
if multiple: args = zip(thisCurve, side, length)
response = Util.ComputeFetch(url, args)
return response
def Split(thisCurve, cutter, tolerance, multiple=False):
url = "rhino/geometry/curve/split-curve_brep_double"
if multiple: url += "?multiple=true"
args = [thisCurve, cutter, tolerance]
if multiple: args = zip(thisCurve, cutter, tolerance)
response = Util.ComputeFetch(url, args)
return response
def Split1(thisCurve, cutter, tolerance, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/split-curve_brep_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, cutter, tolerance, angleToleranceRadians]
if multiple: args = zip(thisCurve, cutter, tolerance, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def Split2(thisCurve, cutter, tolerance, multiple=False):
url = "rhino/geometry/curve/split-curve_surface_double"
if multiple: url += "?multiple=true"
args = [thisCurve, cutter, tolerance]
if multiple: args = zip(thisCurve, cutter, tolerance)
response = Util.ComputeFetch(url, args)
return response
def Split3(thisCurve, cutter, tolerance, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/split-curve_surface_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, cutter, tolerance, angleToleranceRadians]
if multiple: args = zip(thisCurve, cutter, tolerance, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def Extend(thisCurve, t0, t1, multiple=False):
url = "rhino/geometry/curve/extend-curve_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, t0, t1]
if multiple: args = zip(thisCurve, t0, t1)
response = Util.ComputeFetch(url, args)
return response
def Extend1(thisCurve, domain, multiple=False):
url = "rhino/geometry/curve/extend-curve_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, domain]
if multiple: args = zip(thisCurve, domain)
response = Util.ComputeFetch(url, args)
return response
def Extend2(thisCurve, side, length, style, multiple=False):
url = "rhino/geometry/curve/extend-curve_curveend_double_curveextensionstyle"
if multiple: url += "?multiple=true"
args = [thisCurve, side, length, style]
if multiple: args = zip(thisCurve, side, length, style)
response = Util.ComputeFetch(url, args)
return response
def Extend3(thisCurve, side, style, geometry, multiple=False):
url = "rhino/geometry/curve/extend-curve_curveend_curveextensionstyle_geometrybasearray"
if multiple: url += "?multiple=true"
args = [thisCurve, side, style, geometry]
if multiple: args = zip(thisCurve, side, style, geometry)
response = Util.ComputeFetch(url, args)
return response
def Extend4(thisCurve, side, style, endPoint, multiple=False):
url = "rhino/geometry/curve/extend-curve_curveend_curveextensionstyle_point3d"
if multiple: url += "?multiple=true"
args = [thisCurve, side, style, endPoint]
if multiple: args = zip(thisCurve, side, style, endPoint)
response = Util.ComputeFetch(url, args)
return response
def ExtendOnSurface(thisCurve, side, surface, multiple=False):
url = "rhino/geometry/curve/extendonsurface-curve_curveend_surface"
if multiple: url += "?multiple=true"
args = [thisCurve, side, surface]
if multiple: args = zip(thisCurve, side, surface)
response = Util.ComputeFetch(url, args)
return response
def ExtendOnSurface1(thisCurve, side, face, multiple=False):
url = "rhino/geometry/curve/extendonsurface-curve_curveend_brepface"
if multiple: url += "?multiple=true"
args = [thisCurve, side, face]
if multiple: args = zip(thisCurve, side, face)
response = Util.ComputeFetch(url, args)
return response
def ExtendByLine(thisCurve, side, geometry, multiple=False):
url = "rhino/geometry/curve/extendbyline-curve_curveend_geometrybasearray"
if multiple: url += "?multiple=true"
args = [thisCurve, side, geometry]
if multiple: args = zip(thisCurve, side, geometry)
response = Util.ComputeFetch(url, args)
return response
def ExtendByArc(thisCurve, side, geometry, multiple=False):
url = "rhino/geometry/curve/extendbyarc-curve_curveend_geometrybasearray"
if multiple: url += "?multiple=true"
args = [thisCurve, side, geometry]
if multiple: args = zip(thisCurve, side, geometry)
response = Util.ComputeFetch(url, args)
return response
def Simplify(thisCurve, options, distanceTolerance, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/simplify-curve_curvesimplifyoptions_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, options, distanceTolerance, angleToleranceRadians]
if multiple: args = zip(thisCurve, options, distanceTolerance, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def SimplifyEnd(thisCurve, end, options, distanceTolerance, angleToleranceRadians, multiple=False):
url = "rhino/geometry/curve/simplifyend-curve_curveend_curvesimplifyoptions_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, end, options, distanceTolerance, angleToleranceRadians]
if multiple: args = zip(thisCurve, end, options, distanceTolerance, angleToleranceRadians)
response = Util.ComputeFetch(url, args)
return response
def Fair(thisCurve, distanceTolerance, angleTolerance, clampStart, clampEnd, iterations, multiple=False):
url = "rhino/geometry/curve/fair-curve_double_double_int_int_int"
if multiple: url += "?multiple=true"
args = [thisCurve, distanceTolerance, angleTolerance, clampStart, clampEnd, iterations]
if multiple: args = zip(thisCurve, distanceTolerance, angleTolerance, clampStart, clampEnd, iterations)
response = Util.ComputeFetch(url, args)
return response
def Fit(thisCurve, degree, fitTolerance, angleTolerance, multiple=False):
url = "rhino/geometry/curve/fit-curve_int_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, degree, fitTolerance, angleTolerance]
if multiple: args = zip(thisCurve, degree, fitTolerance, angleTolerance)
response = Util.ComputeFetch(url, args)
return response
def Rebuild(thisCurve, pointCount, degree, preserveTangents, multiple=False):
url = "rhino/geometry/curve/rebuild-curve_int_int_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, pointCount, degree, preserveTangents]
if multiple: args = zip(thisCurve, pointCount, degree, preserveTangents)
response = Util.ComputeFetch(url, args)
return response
def ToPolyline(thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint, multiple=False):
url = "rhino/geometry/curve/topolyline-curve_int_int_double_double_double_double_double_double_bool"
if multiple: url += "?multiple=true"
args = [thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint]
if multiple: args = zip(thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint)
response = Util.ComputeFetch(url, args)
return response
def ToPolyline1(thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint, curveDomain, multiple=False):
url = "rhino/geometry/curve/topolyline-curve_int_int_double_double_double_double_double_double_bool_interval"
if multiple: url += "?multiple=true"
args = [thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint, curveDomain]
if multiple: args = zip(thisCurve, mainSegmentCount, subSegmentCount, maxAngleRadians, maxChordLengthRatio, maxAspectRatio, tolerance, minEdgeLength, maxEdgeLength, keepStartPoint, curveDomain)
response = Util.ComputeFetch(url, args)
return response
def ToPolyline2(thisCurve, tolerance, angleTolerance, minimumLength, maximumLength, multiple=False):
url = "rhino/geometry/curve/topolyline-curve_double_double_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance, angleTolerance, minimumLength, maximumLength]
if multiple: args = zip(thisCurve, tolerance, angleTolerance, minimumLength, maximumLength)
response = Util.ComputeFetch(url, args)
return response
def ToArcsAndLines(thisCurve, tolerance, angleTolerance, minimumLength, maximumLength, multiple=False):
url = "rhino/geometry/curve/toarcsandlines-curve_double_double_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, tolerance, angleTolerance, minimumLength, maximumLength]
if multiple: args = zip(thisCurve, tolerance, angleTolerance, minimumLength, maximumLength)
response = Util.ComputeFetch(url, args)
return response
def PullToMesh(thisCurve, mesh, tolerance, multiple=False):
url = "rhino/geometry/curve/pulltomesh-curve_mesh_double"
if multiple: url += "?multiple=true"
args = [thisCurve, mesh, tolerance]
if multiple: args = zip(thisCurve, mesh, tolerance)
response = Util.ComputeFetch(url, args)
return response
def Offset(thisCurve, plane, distance, tolerance, cornerStyle, multiple=False):
url = "rhino/geometry/curve/offset-curve_plane_double_double_curveoffsetcornerstyle"
if multiple: url += "?multiple=true"
args = [thisCurve, plane, distance, tolerance, cornerStyle]
if multiple: args = zip(thisCurve, plane, distance, tolerance, cornerStyle)
response = Util.ComputeFetch(url, args)
return response
def Offset1(thisCurve, directionPoint, normal, distance, tolerance, cornerStyle, multiple=False):
url = "rhino/geometry/curve/offset-curve_point3d_vector3d_double_double_curveoffsetcornerstyle"
if multiple: url += "?multiple=true"
args = [thisCurve, directionPoint, normal, distance, tolerance, cornerStyle]
if multiple: args = zip(thisCurve, directionPoint, normal, distance, tolerance, cornerStyle)
response = Util.ComputeFetch(url, args)
return response
def RibbonOffset(thisCurve, distance, blendRadius, directionPoint, normal, tolerance, multiple=False):
url = "rhino/geometry/curve/ribbonoffset-curve_double_double_point3d_vector3d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, distance, blendRadius, directionPoint, normal, tolerance]
if multiple: args = zip(thisCurve, distance, blendRadius, directionPoint, normal, tolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface(thisCurve, face, distance, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_brepface_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, face, distance, fittingTolerance]
if multiple: args = zip(thisCurve, face, distance, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface1(thisCurve, face, throughPoint, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_brepface_point2d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, face, throughPoint, fittingTolerance]
if multiple: args = zip(thisCurve, face, throughPoint, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface2(thisCurve, face, curveParameters, offsetDistances, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_brepface_doublearray_doublearray_double"
if multiple: url += "?multiple=true"
args = [thisCurve, face, curveParameters, offsetDistances, fittingTolerance]
if multiple: args = zip(thisCurve, face, curveParameters, offsetDistances, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface3(thisCurve, surface, distance, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_surface_double_double"
if multiple: url += "?multiple=true"
args = [thisCurve, surface, distance, fittingTolerance]
if multiple: args = zip(thisCurve, surface, distance, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface4(thisCurve, surface, throughPoint, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_surface_point2d_double"
if multiple: url += "?multiple=true"
args = [thisCurve, surface, throughPoint, fittingTolerance]
if multiple: args = zip(thisCurve, surface, throughPoint, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetOnSurface5(thisCurve, surface, curveParameters, offsetDistances, fittingTolerance, multiple=False):
url = "rhino/geometry/curve/offsetonsurface-curve_surface_doublearray_doublearray_double"
if multiple: url += "?multiple=true"
args = [thisCurve, surface, curveParameters, offsetDistances, fittingTolerance]
if multiple: args = zip(thisCurve, surface, curveParameters, offsetDistances, fittingTolerance)
response = Util.ComputeFetch(url, args)
return response
def PullToBrepFace(thisCurve, face, tolerance, multiple=False):
url = "rhino/geometry/curve/pulltobrepface-curve_brepface_double"
if multiple: url += "?multiple=true"
args = [thisCurve, face, tolerance]
if multiple: args = zip(thisCurve, face, tolerance)
response = Util.ComputeFetch(url, args)
return response
def OffsetNormalToSurface(thisCurve, surface, height, multiple=False):
url = "rhino/geometry/curve/offsetnormaltosurface-curve_surface_double"
if multiple: url += "?multiple=true"
args = [thisCurve, surface, height]
if multiple: args = zip(thisCurve, surface, height)
response = Util.ComputeFetch(url, args)
return response
|
none
| 1
| 2.479616
| 2
|
|
src/presets.py
|
slobos/datanga
| 0
|
6625957
|
<filename>src/presets.py<gh_stars>0
from PySide.QtCore import *
from PySide.QtWebKit import *
from PySide.QtGui import *
import os
import sys
import re
import json
from textviewer import *
from urlparse import urlparse
import requests
class PresetWindow(QDialog):
def __init__(self, parent=None):
super(PresetWindow,self).__init__(parent)
self.mainWindow = parent
self.setWindowTitle("Presets")
self.setMinimumWidth(700);
self.setMinimumHeight(600);
#layout
layout = QVBoxLayout(self)
central = QHBoxLayout()
layout.addLayout(central,1)
self.setLayout(layout)
#list view
self.presetList = QListWidget(self)
self.presetList.itemSelectionChanged.connect(self.currentChanged)
central.addWidget(self.presetList,2)
#detail view
self.detailView=QScrollArea()
self.detailView.setWidgetResizable(True)
self.detailWidget = QWidget()
self.detailWidget.setAutoFillBackground(True)
self.detailWidget.setStyleSheet("background-color: rgb(255,255,255);")
#self.detailView.setFrameStyle(QFrame.Box)
self.detailLayout=QVBoxLayout()
self.detailWidget.setLayout(self.detailLayout)
self.detailView.setWidget(self.detailWidget)
central.addWidget(self.detailView,3)
self.detailName = QLabel('')
self.detailName.setWordWrap(True)
self.detailName.setStyleSheet("QLabel {font-size:15pt;}")
self.detailLayout.addWidget(self.detailName)
self.detailDescription = TextViewer()
self.detailLayout.addWidget(self.detailDescription)
self.detailForm=QFormLayout()
self.detailForm.setRowWrapPolicy(QFormLayout.DontWrapRows);
self.detailForm.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow);
self.detailForm.setFormAlignment(Qt.AlignLeft | Qt.AlignTop);
self.detailForm.setLabelAlignment(Qt.AlignLeft);
self.detailLayout.addLayout(self.detailForm,1)
self.detailModule = QLabel('')
self.detailForm.addRow('<b>Module</b>',self.detailModule)
self.detailOptions = QLabel()
self.detailOptions.setWordWrap(True)
#self.detailOptions.setStyleSheet("background: rgba(0,0,0,0);border:0px;")
self.detailForm.addRow('<b>Options</b>',self.detailOptions)
self.detailColumns = QLabel()
self.detailColumns.setWordWrap(True)
#self.detailColumns.setStyleSheet("background: rgba(0,0,0,0);border:0px;")
self.detailForm.addRow('<b>Columns</b>',self.detailColumns)
#buttons
buttons= QHBoxLayout() #QDialogButtonBox()
self.saveButton = QPushButton('New preset')
self.saveButton.clicked.connect(self.newPreset)
self.saveButton.setToolTip("Create a new preset using the current tab and parameters")
#buttons.addButton(self.saveButton,QDialogButtonBox.ActionRole)
buttons.addWidget(self.saveButton)
self.overwriteButton = QPushButton('Overwrite preset')
self.overwriteButton.clicked.connect(self.overwritePreset)
self.overwriteButton.setToolTip("Overwrite the selected presets with the current tab and parameters")
#buttons.addButton(self.overwriteButton,QDialogButtonBox.ActionRole)
buttons.addWidget(self.overwriteButton)
self.deleteButton = QPushButton('Delete preset')
self.deleteButton.clicked.connect(self.deletePreset)
self.deleteButton.setToolTip("Delete the selected preset. Default presets can not be deleted.")
#buttons.addButton(self.deleteButton,QDialogButtonBox.ActionRole)
buttons.addWidget(self.deleteButton)
#layout.addWidget(buttons,1)
buttons.addStretch()
#buttons=QDialogButtonBox()
self.rejectButton=QPushButton('Cancel')
self.rejectButton.clicked.connect(self.close)
self.rejectButton.setToolTip("Close the preset dialog.")
buttons.addWidget(self.rejectButton)
self.applyButton=QPushButton('Apply')
self.applyButton.setDefault(True)
self.applyButton.clicked.connect(self.loadPreset)
self.applyButton.setToolTip("Load the selected preset.")
#buttons.addButton(self.applyButton,QDialogButtonBox.AcceptRole)
buttons.addWidget(self.applyButton)
#buttons.addButton(QDialogButtonBox.Cancel)
#buttons.rejected.connect(self.close)
#layout.addWidget(buttons,0)
layout.addLayout(buttons)
#self.presetFolder = os.path.join(os.path.dirname(self.mainWindow.settings.fileName()),'presets')
self.presetFolder = os.path.join(os.path.expanduser("~"),'Facepager','Presets')
self.presetVersion = '3_9'
self.presetSuffix = '-'+self.presetVersion+'.json'
# if getattr(sys, 'frozen', False):
# self.defaultPresetFolder = os.path.join(os.path.dirname(sys.executable),'presets')
# elif __file__:
# self.defaultPresetFolder = os.path.join(os.path.dirname(__file__),'presets')
def currentChanged(self):
#hide
self.detailName.setText("")
self.detailModule.setText("")
self.detailDescription.setText("")
self.detailOptions.setText("")
self.detailColumns.setText("")
self.detailWidget.hide()
current = self.presetList.currentItem()
if current and current.isSelected():
data = current.data(Qt.UserRole)
self.detailName.setText(data.get('name'))
self.detailModule.setText(data.get('module'))
self.detailDescription.setText(data.get('description')+"\n")
self.detailOptions.setText(json.dumps(data.get('options'),indent=2, separators=(',', ': '))[2:-2].replace('\"',''))
self.detailColumns.setText("\n".join(data.get('columns',[])))
self.detailWidget.show()
def showPresets(self):
self.initPresets()
self.exec_()
def addPresetItem(self,folder,filename,default=False,online=False):
try:
if online:
data= requests.get(folder+filename).json()
else:
with open(os.path.join(folder, filename), 'r') as input:
data = json.load(input)
data['filename'] = filename
data['default'] = default
data['online'] = online
if (data.get('module') == 'Generic'):
try: data['caption'] = data.get('module') + ' ('+urlparse(data['options']['urlpath']).netloc + "): "+data.get('name')
except: data['caption'] = data.get('module') + ": "+data.get('name')
else: data['caption'] = data.get('module') + ": "+data.get('name')
if default: data['caption'] = data['caption'] +"*"
newItem = QListWidgetItem()
newItem.setText(data['caption'])
newItem.setData(Qt.UserRole,data)
# if default:
# ft = newItem.font()
# ft.setWeight(QFont.Bold)
# newItem.setFont(ft)
self.presetList.addItem(newItem)
except Exception as e:
QMessageBox.information(self,"Facepager","Error loading preset:"+str(e))
def initPresets(self):
#self.defaultPresetFolder
self.presetList.clear()
self.detailWidget.hide()
try:
files = requests.get("https://api.github.com/repos/strohne/Facepager/contents/src/presets").json()
files = [f['path'] for f in files if f['path'].endswith(self.presetSuffix)]
for filename in files:
self.addPresetItem("https://raw.githubusercontent.com/strohne/Facepager/master/",filename,True,True)
except Exception as e:
QMessageBox.information(self,"Facepager","Error loading online presets:"+str(e))
# if os.path.exists(self.defaultPresetFolder):
# files = [f for f in os.listdir(self.defaultPresetFolder) if f.endswith(self.presetSuffix)]
# for filename in files:
# self.addPresetItem(self.defaultPresetFolder,filename,True)
if os.path.exists(self.presetFolder):
files = [f for f in os.listdir(self.presetFolder) if f.endswith(self.presetSuffix)]
for filename in files:
self.addPresetItem(self.presetFolder,filename)
self.presetList.setFocus()
self.presetList.setCurrentRow(0)
self.presetList.sortItems()
self.applyButton.setDefault(True)
#self.currentChanged()
def loadPreset(self):
if not self.presetList.currentItem(): return False
data = self.presetList.currentItem().data(Qt.UserRole)
#Find API module
for i in range(0, self.mainWindow.RequestTabs.count()):
if self.mainWindow.RequestTabs.widget(i).name == data.get('module',''):
tab = self.mainWindow.RequestTabs.widget(i)
tab.setOptions(data.get('options',{}))
self.mainWindow.RequestTabs.setCurrentWidget(tab)
break
#Set columns
self.mainWindow.fieldList.setPlainText("\n".join(data.get('columns',[])))
self.mainWindow.actions.showColumns()
self.close()
def uniqueFilename(self,name):
filename = os.path.join(self.presetFolder,re.sub('[^a-zA-Z0-9_-]+', '_', name )+self.presetSuffix)
i = 1
while os.path.exists(filename) and i < 10000:
filename = os.path.join(self.presetFolder,re.sub('[^a-zA-Z0-9_-]+', '_', name )+"-"+str(i)+self.presetSuffix)
i+=1
if os.path.exists(filename):
raise Exception('Could not find unique filename')
return filename
def deletePreset(self):
if not self.presetList.currentItem(): return False
data = self.presetList.currentItem().data(Qt.UserRole)
if data.get('default',False):
QMessageBox.information(self,"Facepager","Cannot delete default presets.")
return False
reply = QMessageBox.question(self, 'Delete Preset',u"Are you sure to delete the preset \"{0}\"?".format(data.get('name','')), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply != QMessageBox.Yes: return
os.remove(os.path.join(self.presetFolder, data.get('filename')))
self.initPresets()
def newPreset(self):
dialog=QDialog(self.mainWindow)
dialog.setWindowTitle("New Preset")
layout=QVBoxLayout()
label=QLabel("<b>Name</b>")
layout.addWidget(label)
name=QLineEdit()
layout.addWidget(name,0)
label=QLabel("<b>Description</b>")
layout.addWidget(label)
description=QTextEdit()
description.setMinimumWidth(500)
description.acceptRichText=False
description.setFocus()
layout.addWidget(description,1)
buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
layout.addWidget(buttons,0)
dialog.setLayout(layout)
def save():
filename= self.uniqueFilename(self.mainWindow.RequestTabs.currentWidget().name+"-"+name.text())
data = {
'name':name.text(),
'description':description.toPlainText(),
'module':self.mainWindow.RequestTabs.currentWidget().name,
'options':self.mainWindow.RequestTabs.currentWidget().getOptions('preset'),
'columns':self.mainWindow.fieldList.toPlainText().splitlines()
}
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as outfile:
json.dump(data, outfile,indent=2, separators=(',', ': '))
self.initPresets()
dialog.close()
def close():
dialog.close()
#connect the nested functions above to the dialog-buttons
buttons.accepted.connect(save)
buttons.rejected.connect(close)
dialog.exec_()
def overwritePreset(self):
if not self.presetList.currentItem():
return False
data = self.presetList.currentItem().data(Qt.UserRole)
if data.get('default',False):
QMessageBox.information(self,"Facepager","Cannot overwrite default presets.")
return False
dialog=QDialog(self.mainWindow)
dialog.setWindowTitle("Overwrite selected preset")
layout=QVBoxLayout()
label=QLabel("<b>Name</b>")
layout.addWidget(label)
name=QLineEdit()
name.setText(data.get('name'))
layout.addWidget(name,0)
label=QLabel("<b>Description</b>")
layout.addWidget(label)
description=QTextEdit()
description.setMinimumWidth(500)
description.acceptRichText=False
description.setPlainText(data.get('description'))
description.setFocus()
layout.addWidget(description,1)
buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
layout.addWidget(buttons,0)
dialog.setLayout(layout)
def save():
filename = os.path.join(self.presetFolder,data.get('filename'))
#filename= self.uniqueFilename(name.text())
data.update ({
'name':name.text(),
'description':description.toPlainText(),
'module':self.mainWindow.RequestTabs.currentWidget().name,
'options':self.mainWindow.RequestTabs.currentWidget().getOptions('preset'),
'columns':self.mainWindow.fieldList.toPlainText().splitlines()
})
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
reply = QMessageBox.question(self, 'Overwrite Preset',u"Are you sure to overwrite the selected preset \"{0}\" with the current settings?".format(data.get('name','')), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
with open(filename, 'w') as outfile:
json.dump(data, outfile,indent=2, separators=(',', ': '))
self.initPresets()
dialog.close()
def close():
dialog.close()
#connect the nested functions above to the dialog-buttons
buttons.accepted.connect(save)
buttons.rejected.connect(close)
dialog.exec_()
|
<filename>src/presets.py<gh_stars>0
from PySide.QtCore import *
from PySide.QtWebKit import *
from PySide.QtGui import *
import os
import sys
import re
import json
from textviewer import *
from urlparse import urlparse
import requests
class PresetWindow(QDialog):
def __init__(self, parent=None):
super(PresetWindow,self).__init__(parent)
self.mainWindow = parent
self.setWindowTitle("Presets")
self.setMinimumWidth(700);
self.setMinimumHeight(600);
#layout
layout = QVBoxLayout(self)
central = QHBoxLayout()
layout.addLayout(central,1)
self.setLayout(layout)
#list view
self.presetList = QListWidget(self)
self.presetList.itemSelectionChanged.connect(self.currentChanged)
central.addWidget(self.presetList,2)
#detail view
self.detailView=QScrollArea()
self.detailView.setWidgetResizable(True)
self.detailWidget = QWidget()
self.detailWidget.setAutoFillBackground(True)
self.detailWidget.setStyleSheet("background-color: rgb(255,255,255);")
#self.detailView.setFrameStyle(QFrame.Box)
self.detailLayout=QVBoxLayout()
self.detailWidget.setLayout(self.detailLayout)
self.detailView.setWidget(self.detailWidget)
central.addWidget(self.detailView,3)
self.detailName = QLabel('')
self.detailName.setWordWrap(True)
self.detailName.setStyleSheet("QLabel {font-size:15pt;}")
self.detailLayout.addWidget(self.detailName)
self.detailDescription = TextViewer()
self.detailLayout.addWidget(self.detailDescription)
self.detailForm=QFormLayout()
self.detailForm.setRowWrapPolicy(QFormLayout.DontWrapRows);
self.detailForm.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow);
self.detailForm.setFormAlignment(Qt.AlignLeft | Qt.AlignTop);
self.detailForm.setLabelAlignment(Qt.AlignLeft);
self.detailLayout.addLayout(self.detailForm,1)
self.detailModule = QLabel('')
self.detailForm.addRow('<b>Module</b>',self.detailModule)
self.detailOptions = QLabel()
self.detailOptions.setWordWrap(True)
#self.detailOptions.setStyleSheet("background: rgba(0,0,0,0);border:0px;")
self.detailForm.addRow('<b>Options</b>',self.detailOptions)
self.detailColumns = QLabel()
self.detailColumns.setWordWrap(True)
#self.detailColumns.setStyleSheet("background: rgba(0,0,0,0);border:0px;")
self.detailForm.addRow('<b>Columns</b>',self.detailColumns)
#buttons
buttons= QHBoxLayout() #QDialogButtonBox()
self.saveButton = QPushButton('New preset')
self.saveButton.clicked.connect(self.newPreset)
self.saveButton.setToolTip("Create a new preset using the current tab and parameters")
#buttons.addButton(self.saveButton,QDialogButtonBox.ActionRole)
buttons.addWidget(self.saveButton)
self.overwriteButton = QPushButton('Overwrite preset')
self.overwriteButton.clicked.connect(self.overwritePreset)
self.overwriteButton.setToolTip("Overwrite the selected presets with the current tab and parameters")
#buttons.addButton(self.overwriteButton,QDialogButtonBox.ActionRole)
buttons.addWidget(self.overwriteButton)
self.deleteButton = QPushButton('Delete preset')
self.deleteButton.clicked.connect(self.deletePreset)
self.deleteButton.setToolTip("Delete the selected preset. Default presets can not be deleted.")
#buttons.addButton(self.deleteButton,QDialogButtonBox.ActionRole)
buttons.addWidget(self.deleteButton)
#layout.addWidget(buttons,1)
buttons.addStretch()
#buttons=QDialogButtonBox()
self.rejectButton=QPushButton('Cancel')
self.rejectButton.clicked.connect(self.close)
self.rejectButton.setToolTip("Close the preset dialog.")
buttons.addWidget(self.rejectButton)
self.applyButton=QPushButton('Apply')
self.applyButton.setDefault(True)
self.applyButton.clicked.connect(self.loadPreset)
self.applyButton.setToolTip("Load the selected preset.")
#buttons.addButton(self.applyButton,QDialogButtonBox.AcceptRole)
buttons.addWidget(self.applyButton)
#buttons.addButton(QDialogButtonBox.Cancel)
#buttons.rejected.connect(self.close)
#layout.addWidget(buttons,0)
layout.addLayout(buttons)
#self.presetFolder = os.path.join(os.path.dirname(self.mainWindow.settings.fileName()),'presets')
self.presetFolder = os.path.join(os.path.expanduser("~"),'Facepager','Presets')
self.presetVersion = '3_9'
self.presetSuffix = '-'+self.presetVersion+'.json'
# if getattr(sys, 'frozen', False):
# self.defaultPresetFolder = os.path.join(os.path.dirname(sys.executable),'presets')
# elif __file__:
# self.defaultPresetFolder = os.path.join(os.path.dirname(__file__),'presets')
def currentChanged(self):
#hide
self.detailName.setText("")
self.detailModule.setText("")
self.detailDescription.setText("")
self.detailOptions.setText("")
self.detailColumns.setText("")
self.detailWidget.hide()
current = self.presetList.currentItem()
if current and current.isSelected():
data = current.data(Qt.UserRole)
self.detailName.setText(data.get('name'))
self.detailModule.setText(data.get('module'))
self.detailDescription.setText(data.get('description')+"\n")
self.detailOptions.setText(json.dumps(data.get('options'),indent=2, separators=(',', ': '))[2:-2].replace('\"',''))
self.detailColumns.setText("\n".join(data.get('columns',[])))
self.detailWidget.show()
def showPresets(self):
self.initPresets()
self.exec_()
def addPresetItem(self,folder,filename,default=False,online=False):
try:
if online:
data= requests.get(folder+filename).json()
else:
with open(os.path.join(folder, filename), 'r') as input:
data = json.load(input)
data['filename'] = filename
data['default'] = default
data['online'] = online
if (data.get('module') == 'Generic'):
try: data['caption'] = data.get('module') + ' ('+urlparse(data['options']['urlpath']).netloc + "): "+data.get('name')
except: data['caption'] = data.get('module') + ": "+data.get('name')
else: data['caption'] = data.get('module') + ": "+data.get('name')
if default: data['caption'] = data['caption'] +"*"
newItem = QListWidgetItem()
newItem.setText(data['caption'])
newItem.setData(Qt.UserRole,data)
# if default:
# ft = newItem.font()
# ft.setWeight(QFont.Bold)
# newItem.setFont(ft)
self.presetList.addItem(newItem)
except Exception as e:
QMessageBox.information(self,"Facepager","Error loading preset:"+str(e))
def initPresets(self):
#self.defaultPresetFolder
self.presetList.clear()
self.detailWidget.hide()
try:
files = requests.get("https://api.github.com/repos/strohne/Facepager/contents/src/presets").json()
files = [f['path'] for f in files if f['path'].endswith(self.presetSuffix)]
for filename in files:
self.addPresetItem("https://raw.githubusercontent.com/strohne/Facepager/master/",filename,True,True)
except Exception as e:
QMessageBox.information(self,"Facepager","Error loading online presets:"+str(e))
# if os.path.exists(self.defaultPresetFolder):
# files = [f for f in os.listdir(self.defaultPresetFolder) if f.endswith(self.presetSuffix)]
# for filename in files:
# self.addPresetItem(self.defaultPresetFolder,filename,True)
if os.path.exists(self.presetFolder):
files = [f for f in os.listdir(self.presetFolder) if f.endswith(self.presetSuffix)]
for filename in files:
self.addPresetItem(self.presetFolder,filename)
self.presetList.setFocus()
self.presetList.setCurrentRow(0)
self.presetList.sortItems()
self.applyButton.setDefault(True)
#self.currentChanged()
def loadPreset(self):
if not self.presetList.currentItem(): return False
data = self.presetList.currentItem().data(Qt.UserRole)
#Find API module
for i in range(0, self.mainWindow.RequestTabs.count()):
if self.mainWindow.RequestTabs.widget(i).name == data.get('module',''):
tab = self.mainWindow.RequestTabs.widget(i)
tab.setOptions(data.get('options',{}))
self.mainWindow.RequestTabs.setCurrentWidget(tab)
break
#Set columns
self.mainWindow.fieldList.setPlainText("\n".join(data.get('columns',[])))
self.mainWindow.actions.showColumns()
self.close()
def uniqueFilename(self,name):
filename = os.path.join(self.presetFolder,re.sub('[^a-zA-Z0-9_-]+', '_', name )+self.presetSuffix)
i = 1
while os.path.exists(filename) and i < 10000:
filename = os.path.join(self.presetFolder,re.sub('[^a-zA-Z0-9_-]+', '_', name )+"-"+str(i)+self.presetSuffix)
i+=1
if os.path.exists(filename):
raise Exception('Could not find unique filename')
return filename
def deletePreset(self):
if not self.presetList.currentItem(): return False
data = self.presetList.currentItem().data(Qt.UserRole)
if data.get('default',False):
QMessageBox.information(self,"Facepager","Cannot delete default presets.")
return False
reply = QMessageBox.question(self, 'Delete Preset',u"Are you sure to delete the preset \"{0}\"?".format(data.get('name','')), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply != QMessageBox.Yes: return
os.remove(os.path.join(self.presetFolder, data.get('filename')))
self.initPresets()
def newPreset(self):
dialog=QDialog(self.mainWindow)
dialog.setWindowTitle("New Preset")
layout=QVBoxLayout()
label=QLabel("<b>Name</b>")
layout.addWidget(label)
name=QLineEdit()
layout.addWidget(name,0)
label=QLabel("<b>Description</b>")
layout.addWidget(label)
description=QTextEdit()
description.setMinimumWidth(500)
description.acceptRichText=False
description.setFocus()
layout.addWidget(description,1)
buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
layout.addWidget(buttons,0)
dialog.setLayout(layout)
def save():
filename= self.uniqueFilename(self.mainWindow.RequestTabs.currentWidget().name+"-"+name.text())
data = {
'name':name.text(),
'description':description.toPlainText(),
'module':self.mainWindow.RequestTabs.currentWidget().name,
'options':self.mainWindow.RequestTabs.currentWidget().getOptions('preset'),
'columns':self.mainWindow.fieldList.toPlainText().splitlines()
}
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as outfile:
json.dump(data, outfile,indent=2, separators=(',', ': '))
self.initPresets()
dialog.close()
def close():
dialog.close()
#connect the nested functions above to the dialog-buttons
buttons.accepted.connect(save)
buttons.rejected.connect(close)
dialog.exec_()
def overwritePreset(self):
if not self.presetList.currentItem():
return False
data = self.presetList.currentItem().data(Qt.UserRole)
if data.get('default',False):
QMessageBox.information(self,"Facepager","Cannot overwrite default presets.")
return False
dialog=QDialog(self.mainWindow)
dialog.setWindowTitle("Overwrite selected preset")
layout=QVBoxLayout()
label=QLabel("<b>Name</b>")
layout.addWidget(label)
name=QLineEdit()
name.setText(data.get('name'))
layout.addWidget(name,0)
label=QLabel("<b>Description</b>")
layout.addWidget(label)
description=QTextEdit()
description.setMinimumWidth(500)
description.acceptRichText=False
description.setPlainText(data.get('description'))
description.setFocus()
layout.addWidget(description,1)
buttons=QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
layout.addWidget(buttons,0)
dialog.setLayout(layout)
def save():
filename = os.path.join(self.presetFolder,data.get('filename'))
#filename= self.uniqueFilename(name.text())
data.update ({
'name':name.text(),
'description':description.toPlainText(),
'module':self.mainWindow.RequestTabs.currentWidget().name,
'options':self.mainWindow.RequestTabs.currentWidget().getOptions('preset'),
'columns':self.mainWindow.fieldList.toPlainText().splitlines()
})
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
reply = QMessageBox.question(self, 'Overwrite Preset',u"Are you sure to overwrite the selected preset \"{0}\" with the current settings?".format(data.get('name','')), QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
with open(filename, 'w') as outfile:
json.dump(data, outfile,indent=2, separators=(',', ': '))
self.initPresets()
dialog.close()
def close():
dialog.close()
#connect the nested functions above to the dialog-buttons
buttons.accepted.connect(save)
buttons.rejected.connect(close)
dialog.exec_()
|
en
| 0.146027
|
#layout #list view #detail view #self.detailView.setFrameStyle(QFrame.Box) #self.detailOptions.setStyleSheet("background: rgba(0,0,0,0);border:0px;") #self.detailColumns.setStyleSheet("background: rgba(0,0,0,0);border:0px;") #buttons #QDialogButtonBox() #buttons.addButton(self.saveButton,QDialogButtonBox.ActionRole) #buttons.addButton(self.overwriteButton,QDialogButtonBox.ActionRole) #buttons.addButton(self.deleteButton,QDialogButtonBox.ActionRole) #layout.addWidget(buttons,1) #buttons=QDialogButtonBox() #buttons.addButton(self.applyButton,QDialogButtonBox.AcceptRole) #buttons.addButton(QDialogButtonBox.Cancel) #buttons.rejected.connect(self.close) #layout.addWidget(buttons,0) #self.presetFolder = os.path.join(os.path.dirname(self.mainWindow.settings.fileName()),'presets') # if getattr(sys, 'frozen', False): # self.defaultPresetFolder = os.path.join(os.path.dirname(sys.executable),'presets') # elif __file__: # self.defaultPresetFolder = os.path.join(os.path.dirname(__file__),'presets') #hide # if default: # ft = newItem.font() # ft.setWeight(QFont.Bold) # newItem.setFont(ft) #self.defaultPresetFolder # if os.path.exists(self.defaultPresetFolder): # files = [f for f in os.listdir(self.defaultPresetFolder) if f.endswith(self.presetSuffix)] # for filename in files: # self.addPresetItem(self.defaultPresetFolder,filename,True) #self.currentChanged() #Find API module #Set columns #connect the nested functions above to the dialog-buttons #filename= self.uniqueFilename(name.text()) #connect the nested functions above to the dialog-buttons
| 2.135059
| 2
|
pytube/models.py
|
thedataincubator/pytube
| 0
|
6625958
|
<filename>pytube/models.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import normpath, isfile
from os import remove
from time import clock
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from sys import exit
from pytube.utils import sizeof
class Video(object):
"""
Class representation of a single instance of a YouTube video.
"""
def __init__(self, url, filename, **attributes):
"""
Define the variables required to declare a new video.
Keyword arguments:
extention -- The file extention the video should be saved as.
resolution -- The broadcasting standard of the video.
url -- The url of the video. (e.g.: youtube.com/watch?v=..)
filename -- The filename (minus the extention) to save the video.
"""
self.url = url
self.filename = filename
self.__dict__.update(**attributes)
def download(self, path=None, chunk_size=8 * 1024,
on_progress=None, on_finish=None):
"""
Downloads the file of the URL defined within the class
instance.
Keyword arguments:
path -- Destination directory
chunk_size -- File size (in bytes) to write to buffer at a time
(default: 8 bytes).
on_progress -- A function to be called every time the buffer was
written out. Arguments passed are the current and
the full size.
on_finish -- To be called when the download is finished. The full
path to the file is passed as an argument.
"""
path = (normpath(path) + '/' if path else '')
fullpath = '{0}{1}.{2}'.format(path, self.filename, self.extension)
# Check for conflicting filenames
if isfile(fullpath):
print("\n\nError: Conflicting filename:'{}'.\n\n".format(
self.filename))
exit(1)
response = urlopen(self.url)
meta_data = dict(response.info().items())
file_size = int(meta_data.get("Content-Length") or
meta_data.get("content-length"))
self._bytes_received = 0
start = clock()
try:
with open(fullpath, 'wb') as dst_file:
# Print downloading message
print("\nDownloading: '{0}.{1}' (Bytes: {2}) \nto path: {3}\n\n".format(
self.filename, self.extension, sizeof(file_size), path))
while True:
self._buffer = response.read(chunk_size)
if not self._buffer:
if on_finish:
on_finish(fullpath)
break
self._bytes_received += len(self._buffer)
dst_file.write(self._buffer)
if on_progress:
on_progress(self._bytes_received, file_size, start)
# Catch possible exceptions occurring during download
except IOError:
print("\n\nError: Failed to open file.\n"
"Check that: ('{0}'), is a valid pathname.\n\n"
"Or that ('{1}.{2}') is a valid filename.\n\n".format(
path, self.filename, self.extension))
exit(2)
except BufferError:
print("\n\nError: Failed on writing buffer.\n"
"Failed to write video to file.\n\n")
exit(1)
except KeyboardInterrupt:
print("\n\nInterrupt signal given.\nDeleting incomplete video"
"('{0}.{1}').\n\n".format(self.filename, self.extension))
remove(fullpath)
exit(1)
def __repr__(self):
"""A cleaner representation of the class instance."""
return "<Video: {0} (.{1}) - {2} - {3}>".format(
self.video_codec,
self.extension,
self.resolution,
self.profile)
def __lt__(self, other):
if type(other) == Video:
v1 = "{0} {1}".format(self.extension, self.resolution)
v2 = "{0} {1}".format(other.extension, other.resolution)
return (v1 > v2) - (v1 < v2) < 0
|
<filename>pytube/models.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from os.path import normpath, isfile
from os import remove
from time import clock
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from sys import exit
from pytube.utils import sizeof
class Video(object):
"""
Class representation of a single instance of a YouTube video.
"""
def __init__(self, url, filename, **attributes):
"""
Define the variables required to declare a new video.
Keyword arguments:
extention -- The file extention the video should be saved as.
resolution -- The broadcasting standard of the video.
url -- The url of the video. (e.g.: youtube.com/watch?v=..)
filename -- The filename (minus the extention) to save the video.
"""
self.url = url
self.filename = filename
self.__dict__.update(**attributes)
def download(self, path=None, chunk_size=8 * 1024,
on_progress=None, on_finish=None):
"""
Downloads the file of the URL defined within the class
instance.
Keyword arguments:
path -- Destination directory
chunk_size -- File size (in bytes) to write to buffer at a time
(default: 8 bytes).
on_progress -- A function to be called every time the buffer was
written out. Arguments passed are the current and
the full size.
on_finish -- To be called when the download is finished. The full
path to the file is passed as an argument.
"""
path = (normpath(path) + '/' if path else '')
fullpath = '{0}{1}.{2}'.format(path, self.filename, self.extension)
# Check for conflicting filenames
if isfile(fullpath):
print("\n\nError: Conflicting filename:'{}'.\n\n".format(
self.filename))
exit(1)
response = urlopen(self.url)
meta_data = dict(response.info().items())
file_size = int(meta_data.get("Content-Length") or
meta_data.get("content-length"))
self._bytes_received = 0
start = clock()
try:
with open(fullpath, 'wb') as dst_file:
# Print downloading message
print("\nDownloading: '{0}.{1}' (Bytes: {2}) \nto path: {3}\n\n".format(
self.filename, self.extension, sizeof(file_size), path))
while True:
self._buffer = response.read(chunk_size)
if not self._buffer:
if on_finish:
on_finish(fullpath)
break
self._bytes_received += len(self._buffer)
dst_file.write(self._buffer)
if on_progress:
on_progress(self._bytes_received, file_size, start)
# Catch possible exceptions occurring during download
except IOError:
print("\n\nError: Failed to open file.\n"
"Check that: ('{0}'), is a valid pathname.\n\n"
"Or that ('{1}.{2}') is a valid filename.\n\n".format(
path, self.filename, self.extension))
exit(2)
except BufferError:
print("\n\nError: Failed on writing buffer.\n"
"Failed to write video to file.\n\n")
exit(1)
except KeyboardInterrupt:
print("\n\nInterrupt signal given.\nDeleting incomplete video"
"('{0}.{1}').\n\n".format(self.filename, self.extension))
remove(fullpath)
exit(1)
def __repr__(self):
"""A cleaner representation of the class instance."""
return "<Video: {0} (.{1}) - {2} - {3}>".format(
self.video_codec,
self.extension,
self.resolution,
self.profile)
def __lt__(self, other):
if type(other) == Video:
v1 = "{0} {1}".format(self.extension, self.resolution)
v2 = "{0} {1}".format(other.extension, other.resolution)
return (v1 > v2) - (v1 < v2) < 0
|
en
| 0.788088
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Class representation of a single instance of a YouTube video. Define the variables required to declare a new video. Keyword arguments: extention -- The file extention the video should be saved as. resolution -- The broadcasting standard of the video. url -- The url of the video. (e.g.: youtube.com/watch?v=..) filename -- The filename (minus the extention) to save the video. Downloads the file of the URL defined within the class instance. Keyword arguments: path -- Destination directory chunk_size -- File size (in bytes) to write to buffer at a time (default: 8 bytes). on_progress -- A function to be called every time the buffer was written out. Arguments passed are the current and the full size. on_finish -- To be called when the download is finished. The full path to the file is passed as an argument. # Check for conflicting filenames # Print downloading message # Catch possible exceptions occurring during download A cleaner representation of the class instance.
| 3.09991
| 3
|
backend/app.py
|
IndiaCFG2/team-19
| 0
|
6625959
|
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir,'db.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
class Feedback(db.Model):
id = db.Column(db.Integer,primary_key = True)
email = db.Column(db.String(200))
title = db.Column(db.String(200))
text = db.Column(db.String(200))
upvotes = db.Column(db.Integer)
downvotes = db.Column(db.Integer)
name = db.Column(db.String(100))
ministry_assigned = db.Column(db.String(200))
userSentiment = db.Column(db.String(200))
rating = db.Column(db.String(200))
policy = db.Column(db.String(200))
language = db.Column(db.String(200))
userAge = db.Column(db.Integer)
userPincode = db.Column(db.Integer)
def __init__(self,email,title,text,upvotes,downvotes,name,
ministry_assigned,userSentiment,rating,policy,language,userAge,userPincode):
self.email = email
self.title = title
self.text = text
self.upvotes = upvotes
self.downvotes = downvotes
self.name = name
self.ministry_assigned = ministry_assigned
self.userSentiment = userSentiment
self.rating = rating
self.policy = policy
self.language = language
self.userAge = userAge
self.userPincode = userPincode
class User(db.Model):
id = db.Column(db.Integer,primary_key = True)
first_name = db.Column(db.String(200))
last_name = db.Column(db.String(200))
email = db.Column(db.String(200))
password = db.Column(db.String(200))
def __init__(self,first_name,last_name,email,password):
self.first_name = first_name
self.last_name = last_name
self.email = email
self.password = password
class UserSchema(ma.SQLAlchemySchema):
class Meta:
model = User
fields = ("id","first_name","last_name","email","password")
user_schema = UserSchema()
users_schema = UserSchema(many = True)
class FeedbackSchema(ma.SQLAlchemySchema):
class Meta:
model = Feedback
fields = ("id","email","title","text","upvotes","downvotes","name","ministry_assigned","userSentiment","rating","policy","language","userAge","userPincode")
feedback_schema = FeedbackSchema()
feedbacks_schema = FeedbackSchema(many = True)
db.create_all()
@app.route('/feedback',methods=['POST'])
def add_feedback():
email = request.json['email']
title = request.json['title']
text = request.json['text']
upvotes = request.json['upvotes']
downvotes = request.json['downvotes']
name = request.json['name']
ministry_assigned = request.json['ministry_assigned']
userSentiment = request.json['userSentiment']
rating = request.json['rating']
policy = request.json['policy']
language = request.json['language']
userAge = request.json['userAge']
userPincode = request.json['userPincode']
new_feedback = Feedback(email,title,text,upvotes,downvotes,name,ministry_assigned,userSentiment,rating,policy,language,userAge,userPincode)
db.session.add(new_feedback)
db.session.commit()
return feedback_schema.jsonify(new_feedback)
@app.route('/feedback',methods=['GET'])
def get_feedbacks():
all_feedbacks = Feedback.query.all()
result = feedbacks_schema.dump(all_feedbacks)
return jsonify(result)
@app.route('/feedback/<id>',methods=['GET'])
def get_feedback(id):
feedback = Feedback.query.get(id)
return feedback_schema.jsonify(feedback)
@app.route('/feedback/<id>',methods=['PUT'])
def update_feedback(id):
feedback = Feedback.query.get(id)
email = request.json['email']
title = request.json['title']
text = request.json['text']
upvotes = request.json['upvotes']
downvotes = request.json['downvotes']
name = request.json['name']
ministry_assigned = request.json['ministry_assigned']
userSentiment = request.json['userSentiment']
rating = request.json['rating']
policy = request.json['policy']
language = request.json['language']
userAge = request.json['userAge']
userPincode = request.json['userPincode']
feedback.email = email
feedback.title = title
feedback.text = text
feedback.upvotes = upvotes
feedback.downvotes = downvotes
feedback.name = name
feedback.ministry_assigned = ministry_assigned
feedback.userSentiment = userSentiment
feedback.rating = rating
feedback.policy = policy
feedback.language = language
feedback.userAge = userAge
feedback.userPincode = userPincode
db.session.commit()
return feedback_schema.jsonify(feedback)
@app.route('/feedback/<id>',methods=['DELETE'])
def delete_feedback(id):
feedback = Feedback.query.get(id)
db.session.delete(feedback)
db.session.commit()
return feedback_schema.jsonify(feedback)
@app.route('/user',methods=['POST'])
def add_user():
first_name = request.json['first_name']
last_name = request.json['last_name']
email = request.json['email']
password = request.json['password']
new_user = User(first_name,last_name,email,password)
db.session.add(new_user)
db.session.commit()
return user_schema.jsonify(new_user)
@app.route('/user',methods=['GET'])
def get_users():
all_users = User.query.all()
result = users_schema.dump(all_users)
return jsonify(result)
@app.route('/user/<id>',methods=['GET'])
def get_user(id):
user = User.query.get(id)
return user_schema.jsonify(user)
@app.route('/user/<id>',methods=['PUT'])
def update_user(id):
user = User.query.get(id)
first_name = request.json['first_name']
last_name = request.json['last_name']
email = request.json['email']
password = request.json['password']
user.first_name = first_name
user.last_name = last_name
user.email = email
user.password = password
db.session.commit()
return user_schema.jsonify(user)
@app.route('/user/<id>',methods=['DELETE'])
def delete_user(id):
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return user_schema.jsonify(user)
if __name__ == "__main__":
app.run(debug=True)
|
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir,'db.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
class Feedback(db.Model):
id = db.Column(db.Integer,primary_key = True)
email = db.Column(db.String(200))
title = db.Column(db.String(200))
text = db.Column(db.String(200))
upvotes = db.Column(db.Integer)
downvotes = db.Column(db.Integer)
name = db.Column(db.String(100))
ministry_assigned = db.Column(db.String(200))
userSentiment = db.Column(db.String(200))
rating = db.Column(db.String(200))
policy = db.Column(db.String(200))
language = db.Column(db.String(200))
userAge = db.Column(db.Integer)
userPincode = db.Column(db.Integer)
def __init__(self,email,title,text,upvotes,downvotes,name,
ministry_assigned,userSentiment,rating,policy,language,userAge,userPincode):
self.email = email
self.title = title
self.text = text
self.upvotes = upvotes
self.downvotes = downvotes
self.name = name
self.ministry_assigned = ministry_assigned
self.userSentiment = userSentiment
self.rating = rating
self.policy = policy
self.language = language
self.userAge = userAge
self.userPincode = userPincode
class User(db.Model):
id = db.Column(db.Integer,primary_key = True)
first_name = db.Column(db.String(200))
last_name = db.Column(db.String(200))
email = db.Column(db.String(200))
password = db.Column(db.String(200))
def __init__(self,first_name,last_name,email,password):
self.first_name = first_name
self.last_name = last_name
self.email = email
self.password = password
class UserSchema(ma.SQLAlchemySchema):
class Meta:
model = User
fields = ("id","first_name","last_name","email","password")
user_schema = UserSchema()
users_schema = UserSchema(many = True)
class FeedbackSchema(ma.SQLAlchemySchema):
class Meta:
model = Feedback
fields = ("id","email","title","text","upvotes","downvotes","name","ministry_assigned","userSentiment","rating","policy","language","userAge","userPincode")
feedback_schema = FeedbackSchema()
feedbacks_schema = FeedbackSchema(many = True)
db.create_all()
@app.route('/feedback',methods=['POST'])
def add_feedback():
email = request.json['email']
title = request.json['title']
text = request.json['text']
upvotes = request.json['upvotes']
downvotes = request.json['downvotes']
name = request.json['name']
ministry_assigned = request.json['ministry_assigned']
userSentiment = request.json['userSentiment']
rating = request.json['rating']
policy = request.json['policy']
language = request.json['language']
userAge = request.json['userAge']
userPincode = request.json['userPincode']
new_feedback = Feedback(email,title,text,upvotes,downvotes,name,ministry_assigned,userSentiment,rating,policy,language,userAge,userPincode)
db.session.add(new_feedback)
db.session.commit()
return feedback_schema.jsonify(new_feedback)
@app.route('/feedback',methods=['GET'])
def get_feedbacks():
all_feedbacks = Feedback.query.all()
result = feedbacks_schema.dump(all_feedbacks)
return jsonify(result)
@app.route('/feedback/<id>',methods=['GET'])
def get_feedback(id):
feedback = Feedback.query.get(id)
return feedback_schema.jsonify(feedback)
@app.route('/feedback/<id>',methods=['PUT'])
def update_feedback(id):
feedback = Feedback.query.get(id)
email = request.json['email']
title = request.json['title']
text = request.json['text']
upvotes = request.json['upvotes']
downvotes = request.json['downvotes']
name = request.json['name']
ministry_assigned = request.json['ministry_assigned']
userSentiment = request.json['userSentiment']
rating = request.json['rating']
policy = request.json['policy']
language = request.json['language']
userAge = request.json['userAge']
userPincode = request.json['userPincode']
feedback.email = email
feedback.title = title
feedback.text = text
feedback.upvotes = upvotes
feedback.downvotes = downvotes
feedback.name = name
feedback.ministry_assigned = ministry_assigned
feedback.userSentiment = userSentiment
feedback.rating = rating
feedback.policy = policy
feedback.language = language
feedback.userAge = userAge
feedback.userPincode = userPincode
db.session.commit()
return feedback_schema.jsonify(feedback)
@app.route('/feedback/<id>',methods=['DELETE'])
def delete_feedback(id):
feedback = Feedback.query.get(id)
db.session.delete(feedback)
db.session.commit()
return feedback_schema.jsonify(feedback)
@app.route('/user',methods=['POST'])
def add_user():
first_name = request.json['first_name']
last_name = request.json['last_name']
email = request.json['email']
password = request.json['password']
new_user = User(first_name,last_name,email,password)
db.session.add(new_user)
db.session.commit()
return user_schema.jsonify(new_user)
@app.route('/user',methods=['GET'])
def get_users():
all_users = User.query.all()
result = users_schema.dump(all_users)
return jsonify(result)
@app.route('/user/<id>',methods=['GET'])
def get_user(id):
user = User.query.get(id)
return user_schema.jsonify(user)
@app.route('/user/<id>',methods=['PUT'])
def update_user(id):
user = User.query.get(id)
first_name = request.json['first_name']
last_name = request.json['last_name']
email = request.json['email']
password = request.json['password']
user.first_name = first_name
user.last_name = last_name
user.email = email
user.password = password
db.session.commit()
return user_schema.jsonify(user)
@app.route('/user/<id>',methods=['DELETE'])
def delete_user(id):
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return user_schema.jsonify(user)
if __name__ == "__main__":
app.run(debug=True)
|
none
| 1
| 2.389867
| 2
|
|
calvin_models/calvin_agent/utils/data_visualization.py
|
nikepupu/calvin
| 0
|
6625960
|
<gh_stars>0
import logging
import hydra
from omegaconf import DictConfig
from pytorch_lightning import seed_everything
logger = logging.getLogger(__name__)
from matplotlib.animation import ArtistAnimation
import matplotlib.pyplot as plt
import numpy as np
def visualize(data):
seq_img = data[1][0][0].numpy()
title = data[4][0]
s, c, h, w = seq_img.shape
seq_img = np.transpose(seq_img, (0, 2, 3, 1))
imgs = []
fig = plt.figure()
for j in range(s):
# imgRGB = seq_img[j].astype(int)
imgRGB = seq_img[j]
imgRGB = (imgRGB - imgRGB.min()) / (imgRGB.max() - imgRGB.min())
img = plt.imshow(imgRGB, animated=True)
imgs.append([img])
anim = ArtistAnimation(fig, imgs, interval=50)
plt.title(title)
plt.show()
@hydra.main(config_path="../../conf", config_name="default.yaml")
def train(cfg: DictConfig) -> None:
# sets seeds for numpy, torch, python.random and PYTHONHASHSEED.
seed_everything(cfg.seed)
data_module = hydra.utils.instantiate(cfg.dataset, num_workers=0)
data_module.setup()
train = data_module.train_dataloader()
dataset = train["lang"]
logger.info(f"Dataset Size: {len(dataset)}")
for i, lang in enumerate(dataset):
logger.info(f"Element : {i}")
visualize(lang)
if __name__ == "__main__":
train()
|
import logging
import hydra
from omegaconf import DictConfig
from pytorch_lightning import seed_everything
logger = logging.getLogger(__name__)
from matplotlib.animation import ArtistAnimation
import matplotlib.pyplot as plt
import numpy as np
def visualize(data):
seq_img = data[1][0][0].numpy()
title = data[4][0]
s, c, h, w = seq_img.shape
seq_img = np.transpose(seq_img, (0, 2, 3, 1))
imgs = []
fig = plt.figure()
for j in range(s):
# imgRGB = seq_img[j].astype(int)
imgRGB = seq_img[j]
imgRGB = (imgRGB - imgRGB.min()) / (imgRGB.max() - imgRGB.min())
img = plt.imshow(imgRGB, animated=True)
imgs.append([img])
anim = ArtistAnimation(fig, imgs, interval=50)
plt.title(title)
plt.show()
@hydra.main(config_path="../../conf", config_name="default.yaml")
def train(cfg: DictConfig) -> None:
# sets seeds for numpy, torch, python.random and PYTHONHASHSEED.
seed_everything(cfg.seed)
data_module = hydra.utils.instantiate(cfg.dataset, num_workers=0)
data_module.setup()
train = data_module.train_dataloader()
dataset = train["lang"]
logger.info(f"Dataset Size: {len(dataset)}")
for i, lang in enumerate(dataset):
logger.info(f"Element : {i}")
visualize(lang)
if __name__ == "__main__":
train()
|
en
| 0.640333
|
# imgRGB = seq_img[j].astype(int) # sets seeds for numpy, torch, python.random and PYTHONHASHSEED.
| 2.219921
| 2
|
skytap/models/Interface.py
|
mapledyne/skytap
| 3
|
6625961
|
<filename>skytap/models/Interface.py
"""Support for an interface resource in Skytap."""
import json
from skytap.framework.ApiClient import ApiClient # noqa
from skytap.models.PublishedServices import PublishedServices # noqa
from skytap.models.SkytapResource import SkytapResource # noqa
class Interface(SkytapResource):
"""One Skytap (network) Interface."""
def __getattr__(self, key):
"""Get attributes.
Interfaces aren't fully returned when the API call is made -
Published Services aren't returned. Often this doesn't matter,
so we don't automatically pull this information. However, if you ask
for the services, this function will go and get the requested
information on demand. This allows saving of API calls (we don't
request this unless you're accessing Published Services), but also
you can treat the object as if the services are there all along. We'll
get the info when you ask for it, and you can move along like it was
there from the start.
If you're doing anything other than asking for services, then this
passes the call upstream to do the default stuff.
"""
if key == 'services':
api = ApiClient()
services_json = json.loads(api.rest(self.url))
self.services = PublishedServices(services_json["services"],
self.url)
return self.services
return super(Interface, self).__getattr__(key)
|
<filename>skytap/models/Interface.py
"""Support for an interface resource in Skytap."""
import json
from skytap.framework.ApiClient import ApiClient # noqa
from skytap.models.PublishedServices import PublishedServices # noqa
from skytap.models.SkytapResource import SkytapResource # noqa
class Interface(SkytapResource):
"""One Skytap (network) Interface."""
def __getattr__(self, key):
"""Get attributes.
Interfaces aren't fully returned when the API call is made -
Published Services aren't returned. Often this doesn't matter,
so we don't automatically pull this information. However, if you ask
for the services, this function will go and get the requested
information on demand. This allows saving of API calls (we don't
request this unless you're accessing Published Services), but also
you can treat the object as if the services are there all along. We'll
get the info when you ask for it, and you can move along like it was
there from the start.
If you're doing anything other than asking for services, then this
passes the call upstream to do the default stuff.
"""
if key == 'services':
api = ApiClient()
services_json = json.loads(api.rest(self.url))
self.services = PublishedServices(services_json["services"],
self.url)
return self.services
return super(Interface, self).__getattr__(key)
|
en
| 0.945481
|
Support for an interface resource in Skytap. # noqa # noqa # noqa One Skytap (network) Interface. Get attributes. Interfaces aren't fully returned when the API call is made - Published Services aren't returned. Often this doesn't matter, so we don't automatically pull this information. However, if you ask for the services, this function will go and get the requested information on demand. This allows saving of API calls (we don't request this unless you're accessing Published Services), but also you can treat the object as if the services are there all along. We'll get the info when you ask for it, and you can move along like it was there from the start. If you're doing anything other than asking for services, then this passes the call upstream to do the default stuff.
| 2.944456
| 3
|
egs/word_embedding/steps/tfrnnlm/rnnlm_skipgram.py
|
charlesliucn/LanMIT
| 17
|
6625962
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import math
import random
import reader
import inspect
import collections
import numpy as np
import tensorflow as tf
reload(sys)
sys.setdefaultencoding("utf-8")
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
session = tf.Session(config = config)
flags = tf.flags
logging = tf.logging
flags.DEFINE_string("data-path", None, "Where the training/test data is stored.")
flags.DEFINE_string("vocab-path", None, "Where the wordlist file is stored.")
flags.DEFINE_string("save-path", None, "Model output directory.")
flags.DEFINE_integer("hidden-size", 200, "hidden dim of RNN")
flags.DEFINE_integer("num-layers", 2, "number of layers of RNN")
flags.DEFINE_integer("batch-size", 64, "batch size of RNN training")
flags.DEFINE_float("keep-prob", 1.0, "Keep Probability of Dropout")
flags.DEFINE_integer("max-epoch", 30, "The number of max epoch")
FLAGS = flags.FLAGS
class Config(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 30
keep_prob = 1
lr_decay = 0.8
batch_size = 64
class RnnlmInput(object):
"""The input data."""
def __init__(self, config, data, name = None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.rnnlm_producer(
data, batch_size, num_steps, name = name)
class RnnlmModel(object):
"""The RNNLM model."""
def __init__(self, is_training, config, input_, skipgram_embeddings):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
hidden_size = config.hidden_size
vocab_size = config.vocab_size
def rnn_cell():
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicRNNCell.__init__).args:
return tf.contrib.rnn.BasicRNNCell(hidden_size, reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicRNNCell(hidden_size)
attn_cell = rnn_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
rnn_cell(), output_keep_prob=config.keep_prob)
self.cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = self.cell.zero_state(batch_size, tf.float32)
self._initial_state_single = self.cell.zero_state(1, tf.float32)
self.initial = tf.reshape(tf.stack(axis=0, values=self._initial_state_single),
[config.num_layers, 1, hidden_size], name="test_initial_state")
# first implement the less efficient version
test_word_in = tf.placeholder(tf.int32, [1, 1], name="test_word_in")
state_placeholder = tf.placeholder(tf.float32, [config.num_layers, 1, hidden_size], name="test_state_in")
# unpacking the input state context
l = tf.unstack(state_placeholder, axis=0)
test_input_state = tuple([l[idx] for idx in range(config.num_layers)])
# self.embedding = tf.get_variable("embedding", [vocab_size, hidden_size], dtype = tf.float32)
# inputs = tf.nn.embedding_lookup(self.embedding, input_.input_data)
# test_inputs = tf.nn.embedding_lookup(self.embedding, test_word_in)
with tf.device("/cpu:0"):
embed_init = tf.constant_initializer(skipgram_embeddings, dtype = tf.float32)
self.embedding = tf.get_variable("embedding", shape = [vocab_size, hidden_size],
dtype = tf.float32, initializer = embed_init)
inputs = tf.nn.embedding_lookup(self.embedding, input_.input_data)
test_inputs = tf.nn.embedding_lookup(self.embedding, test_word_in)
# test time
with tf.variable_scope("RNN"):
(test_cell_output, test_output_state) = self.cell(test_inputs[:, 0, :], test_input_state)
test_state_out = tf.reshape(tf.stack(axis=0, values=test_output_state),
[config.num_layers, 1, hidden_size], name="test_state_out")
test_cell_out = tf.reshape(test_cell_output, [1, hidden_size], name="test_cell_out")
# above is the first part of the graph for test
# test-word-in
# > ---- > test-state-out
# test-state-in > test-cell-out
# below is the 2nd part of the graph for test
# test-word-out
# > prob(word | test-word-out)
# test-cell-in
test_word_out = tf.placeholder(tf.int32, [1, 1], name="test_word_out")
cellout_placeholder = tf.placeholder(tf.float32, [1, hidden_size], name="test_cell_in")
softmax_w = tf.get_variable("softmax_w", [hidden_size, vocab_size], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=tf.float32)
test_logits = tf.matmul(cellout_placeholder, softmax_w) + softmax_b
test_softmaxed = tf.nn.log_softmax(test_logits)
p_word = test_softmaxed[0, test_word_out[0,0]]
test_out = tf.identity(p_word, name="test_out")
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.contrib.rnn.static_rnn(
# cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > -1: tf.get_variable_scope().reuse_variables()
(cell_output, state) = self.cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, hidden_size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=tf.float32)])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm)
optimizer = tf.train.MomentumOptimizer(self._lr, 0.9)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict = {self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, h in enumerate(model.initial_state):
feed_dict[h] = state[i]
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
data_index = 0
def generate_batch(train_data, embed_batch_size, num_skips, skip_window):
global data_index
assert embed_batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape = (embed_batch_size), dtype = np.int32)
labels = np.ndarray(shape = (embed_batch_size, 1), dtype = np.int32)
span = 2 * skip_window + 1
buffer = collections.deque(maxlen = span)
for _ in range(span):
buffer.append(train_data[data_index])
data_index = (data_index + 1) % len(train_data)
for i in range(embed_batch_size // num_skips):
target = skip_window
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(train_data[data_index])
data_index = (data_index + 1) % len(train_data)
return batch, labels
def get_config():
return Config()
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to RNNLM data directory")
# 读取数据
raw_data = reader.rnnlm_raw_data(FLAGS.data_path, FLAGS.vocab_path)
train_data, valid_data, _, word_map = raw_data
# word_map: dictionary
# train_data: data
reverse_wordmap = dict(zip(word_map.values(), word_map.keys()))
# word embedding参数设置
embed_batch_size = 128
embedding_size = 200
skip_window = 1
num_skips = 2
valid_size = 16
valid_window = 100
embed_num_steps = 100001
config = get_config()
config.vocab_size = len(word_map)
config.hidden_size = FLAGS.hidden_size
config.num_layers = FLAGS.num_layers
config.batch_size = FLAGS.batch_size
config.keep_prob= FLAGS.keep_prob
config.max_max_epoch = FLAGS.max_epoch
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
vocabulary_size = len(word_map)
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(
valid_examples,
random.sample(range(1000, 1000 + valid_window),valid_size // 2))
num_sampled = 64
graph_skipgram = tf.Graph()
with graph_skipgram.as_default():
train_dataset = tf.placeholder(tf.int32, shape = [embed_batch_size])
train_labels = tf.placeholder(tf.int32, shape = [embed_batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype = tf.int32)
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev = 1.0/math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
print("Embed size: %s" % embed.get_shape().as_list())
loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(
weights = softmax_weights,
biases = softmax_biases,
inputs = embed,
labels = train_labels,
num_sampled = num_sampled,
num_classes = vocabulary_size))
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims = True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
with tf.Session(graph = graph_skipgram) as session:
tf.global_variables_initializer().run()
print("Initialized!")
average_loss = 0
for step in range(embed_num_steps):
batch_data, batch_labels = generate_batch(
train_data = train_data,
embed_batch_size = embed_batch_size,
num_skips = num_skips,
skip_window = skip_window)
feed_dict = {train_dataset: batch_data, train_labels: batch_labels}
_, lo = session.run([optimizer, loss], feed_dict = feed_dict)
average_loss += lo
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
print("Averge loss at step %d: %f" % (step, average_loss))
average_loss = 0
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_wordmap[valid_examples[i]]
top_k = 8
nearest = (-sim[i,:]).argsort()[1:top_k+1]
log = "Nearest to %s:" % valid_word
for k in range(top_k):
close_word = reverse_wordmap[nearest[k]]
log = log + " " + close_word + ","
print(log)
final_embeddings = normalized_embeddings.eval()
graph_rnnlm = tf.Graph()
with graph_rnnlm.as_default():
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.name_scope("Train"):
train_input = RnnlmInput(config = config, data = train_data, name = "TrainInput")
with tf.variable_scope("Model", reuse = None, initializer = initializer):
m = RnnlmModel(is_training = True, config = config, input_ = train_input,
skipgram_embeddings = final_embeddings)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = RnnlmInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = RnnlmModel(is_training=False, config=config, input_=valid_input,
skipgram_embeddings = final_embeddings)
tf.summary.scalar("Validation Loss", mvalid.cost)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path)
if __name__ == "__main__":
tf.app.run()
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import math
import random
import reader
import inspect
import collections
import numpy as np
import tensorflow as tf
reload(sys)
sys.setdefaultencoding("utf-8")
os.environ["CUDA_VISIBLE_DEVICES"] = "5"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.95
session = tf.Session(config = config)
flags = tf.flags
logging = tf.logging
flags.DEFINE_string("data-path", None, "Where the training/test data is stored.")
flags.DEFINE_string("vocab-path", None, "Where the wordlist file is stored.")
flags.DEFINE_string("save-path", None, "Model output directory.")
flags.DEFINE_integer("hidden-size", 200, "hidden dim of RNN")
flags.DEFINE_integer("num-layers", 2, "number of layers of RNN")
flags.DEFINE_integer("batch-size", 64, "batch size of RNN training")
flags.DEFINE_float("keep-prob", 1.0, "Keep Probability of Dropout")
flags.DEFINE_integer("max-epoch", 30, "The number of max epoch")
FLAGS = flags.FLAGS
class Config(object):
"""Small config."""
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 1
num_layers = 1
num_steps = 20
hidden_size = 200
max_epoch = 4
max_max_epoch = 30
keep_prob = 1
lr_decay = 0.8
batch_size = 64
class RnnlmInput(object):
"""The input data."""
def __init__(self, config, data, name = None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.rnnlm_producer(
data, batch_size, num_steps, name = name)
class RnnlmModel(object):
"""The RNNLM model."""
def __init__(self, is_training, config, input_, skipgram_embeddings):
self._input = input_
batch_size = input_.batch_size
num_steps = input_.num_steps
hidden_size = config.hidden_size
vocab_size = config.vocab_size
def rnn_cell():
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicRNNCell.__init__).args:
return tf.contrib.rnn.BasicRNNCell(hidden_size, reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicRNNCell(hidden_size)
attn_cell = rnn_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
rnn_cell(), output_keep_prob=config.keep_prob)
self.cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = self.cell.zero_state(batch_size, tf.float32)
self._initial_state_single = self.cell.zero_state(1, tf.float32)
self.initial = tf.reshape(tf.stack(axis=0, values=self._initial_state_single),
[config.num_layers, 1, hidden_size], name="test_initial_state")
# first implement the less efficient version
test_word_in = tf.placeholder(tf.int32, [1, 1], name="test_word_in")
state_placeholder = tf.placeholder(tf.float32, [config.num_layers, 1, hidden_size], name="test_state_in")
# unpacking the input state context
l = tf.unstack(state_placeholder, axis=0)
test_input_state = tuple([l[idx] for idx in range(config.num_layers)])
# self.embedding = tf.get_variable("embedding", [vocab_size, hidden_size], dtype = tf.float32)
# inputs = tf.nn.embedding_lookup(self.embedding, input_.input_data)
# test_inputs = tf.nn.embedding_lookup(self.embedding, test_word_in)
with tf.device("/cpu:0"):
embed_init = tf.constant_initializer(skipgram_embeddings, dtype = tf.float32)
self.embedding = tf.get_variable("embedding", shape = [vocab_size, hidden_size],
dtype = tf.float32, initializer = embed_init)
inputs = tf.nn.embedding_lookup(self.embedding, input_.input_data)
test_inputs = tf.nn.embedding_lookup(self.embedding, test_word_in)
# test time
with tf.variable_scope("RNN"):
(test_cell_output, test_output_state) = self.cell(test_inputs[:, 0, :], test_input_state)
test_state_out = tf.reshape(tf.stack(axis=0, values=test_output_state),
[config.num_layers, 1, hidden_size], name="test_state_out")
test_cell_out = tf.reshape(test_cell_output, [1, hidden_size], name="test_cell_out")
# above is the first part of the graph for test
# test-word-in
# > ---- > test-state-out
# test-state-in > test-cell-out
# below is the 2nd part of the graph for test
# test-word-out
# > prob(word | test-word-out)
# test-cell-in
test_word_out = tf.placeholder(tf.int32, [1, 1], name="test_word_out")
cellout_placeholder = tf.placeholder(tf.float32, [1, hidden_size], name="test_cell_in")
softmax_w = tf.get_variable("softmax_w", [hidden_size, vocab_size], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=tf.float32)
test_logits = tf.matmul(cellout_placeholder, softmax_w) + softmax_b
test_softmaxed = tf.nn.log_softmax(test_logits)
p_word = test_softmaxed[0, test_word_out[0,0]]
test_out = tf.identity(p_word, name="test_out")
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
# The alternative version of the code below is:
#
# inputs = tf.unstack(inputs, num=num_steps, axis=1)
# outputs, state = tf.contrib.rnn.static_rnn(
# cell, inputs, initial_state=self._initial_state)
outputs = []
state = self._initial_state
with tf.variable_scope("RNN"):
for time_step in range(num_steps):
if time_step > -1: tf.get_variable_scope().reuse_variables()
(cell_output, state) = self.cell(inputs[:, time_step, :], state)
outputs.append(cell_output)
output = tf.reshape(tf.stack(axis=1, values=outputs), [-1, hidden_size])
logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=tf.float32)])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm)
optimizer = tf.train.MomentumOptimizer(self._lr, 0.9)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict = {self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
def run_epoch(session, model, eval_op=None, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
}
if eval_op is not None:
fetches["eval_op"] = eval_op
for step in range(model.input.epoch_size):
feed_dict = {}
for i, h in enumerate(model.initial_state):
feed_dict[h] = state[i]
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
costs += cost
iters += model.input.num_steps
if verbose and step % (model.input.epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / model.input.epoch_size, np.exp(costs / iters),
iters * model.input.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
data_index = 0
def generate_batch(train_data, embed_batch_size, num_skips, skip_window):
global data_index
assert embed_batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape = (embed_batch_size), dtype = np.int32)
labels = np.ndarray(shape = (embed_batch_size, 1), dtype = np.int32)
span = 2 * skip_window + 1
buffer = collections.deque(maxlen = span)
for _ in range(span):
buffer.append(train_data[data_index])
data_index = (data_index + 1) % len(train_data)
for i in range(embed_batch_size // num_skips):
target = skip_window
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(train_data[data_index])
data_index = (data_index + 1) % len(train_data)
return batch, labels
def get_config():
return Config()
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to RNNLM data directory")
# 读取数据
raw_data = reader.rnnlm_raw_data(FLAGS.data_path, FLAGS.vocab_path)
train_data, valid_data, _, word_map = raw_data
# word_map: dictionary
# train_data: data
reverse_wordmap = dict(zip(word_map.values(), word_map.keys()))
# word embedding参数设置
embed_batch_size = 128
embedding_size = 200
skip_window = 1
num_skips = 2
valid_size = 16
valid_window = 100
embed_num_steps = 100001
config = get_config()
config.vocab_size = len(word_map)
config.hidden_size = FLAGS.hidden_size
config.num_layers = FLAGS.num_layers
config.batch_size = FLAGS.batch_size
config.keep_prob= FLAGS.keep_prob
config.max_max_epoch = FLAGS.max_epoch
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
vocabulary_size = len(word_map)
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(
valid_examples,
random.sample(range(1000, 1000 + valid_window),valid_size // 2))
num_sampled = 64
graph_skipgram = tf.Graph()
with graph_skipgram.as_default():
train_dataset = tf.placeholder(tf.int32, shape = [embed_batch_size])
train_labels = tf.placeholder(tf.int32, shape = [embed_batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype = tf.int32)
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev = 1.0/math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
print("Embed size: %s" % embed.get_shape().as_list())
loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(
weights = softmax_weights,
biases = softmax_biases,
inputs = embed,
labels = train_labels,
num_sampled = num_sampled,
num_classes = vocabulary_size))
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims = True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
with tf.Session(graph = graph_skipgram) as session:
tf.global_variables_initializer().run()
print("Initialized!")
average_loss = 0
for step in range(embed_num_steps):
batch_data, batch_labels = generate_batch(
train_data = train_data,
embed_batch_size = embed_batch_size,
num_skips = num_skips,
skip_window = skip_window)
feed_dict = {train_dataset: batch_data, train_labels: batch_labels}
_, lo = session.run([optimizer, loss], feed_dict = feed_dict)
average_loss += lo
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
print("Averge loss at step %d: %f" % (step, average_loss))
average_loss = 0
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_wordmap[valid_examples[i]]
top_k = 8
nearest = (-sim[i,:]).argsort()[1:top_k+1]
log = "Nearest to %s:" % valid_word
for k in range(top_k):
close_word = reverse_wordmap[nearest[k]]
log = log + " " + close_word + ","
print(log)
final_embeddings = normalized_embeddings.eval()
graph_rnnlm = tf.Graph()
with graph_rnnlm.as_default():
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.name_scope("Train"):
train_input = RnnlmInput(config = config, data = train_data, name = "TrainInput")
with tf.variable_scope("Model", reuse = None, initializer = initializer):
m = RnnlmModel(is_training = True, config = config, input_ = train_input,
skipgram_embeddings = final_embeddings)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = RnnlmInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = RnnlmModel(is_training=False, config=config, input_=valid_input,
skipgram_embeddings = final_embeddings)
tf.summary.scalar("Validation Loss", mvalid.cost)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op, verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path)
if __name__ == "__main__":
tf.app.run()
|
en
| 0.52892
|
# -*- coding:utf-8 -*- Small config. The input data. The RNNLM model. # first implement the less efficient version # unpacking the input state context # self.embedding = tf.get_variable("embedding", [vocab_size, hidden_size], dtype = tf.float32) # inputs = tf.nn.embedding_lookup(self.embedding, input_.input_data) # test_inputs = tf.nn.embedding_lookup(self.embedding, test_word_in) # test time # above is the first part of the graph for test # test-word-in # > ---- > test-state-out # test-state-in > test-cell-out # below is the 2nd part of the graph for test # test-word-out # > prob(word | test-word-out) # test-cell-in # Simplified version of models/tutorials/rnn/rnn.py's rnn(). # This builds an unrolled LSTM for tutorial purposes only. # In general, use the rnn() or state_saving_rnn() from rnn.py. # # The alternative version of the code below is: # # inputs = tf.unstack(inputs, num=num_steps, axis=1) # outputs, state = tf.contrib.rnn.static_rnn( # cell, inputs, initial_state=self._initial_state) Runs the model on the given data. # 读取数据 # word_map: dictionary # train_data: data # word embedding参数设置
| 2.296048
| 2
|
mean_median_mode/mean_median_mode.py
|
arunachalamb/hackerrank
| 1
|
6625963
|
# Input:
# N - number of elements in list
# Input list to calculate mean, median, mode
# Output:
# Print mean, median and mode of elements in list
n = int(input())
l = [int(i) for i in input().split()]
print(sum(l)/n)
l.sort()
if n%2 == 1:
print(l[n//2])
else:
print((l[n//2-1]+l[n//2])/2)
m = [0 for i in range(n)]
for i in range(n):
m[i] = l.count(l[i])
print(l[m.index(max(m))])
|
# Input:
# N - number of elements in list
# Input list to calculate mean, median, mode
# Output:
# Print mean, median and mode of elements in list
n = int(input())
l = [int(i) for i in input().split()]
print(sum(l)/n)
l.sort()
if n%2 == 1:
print(l[n//2])
else:
print((l[n//2-1]+l[n//2])/2)
m = [0 for i in range(n)]
for i in range(n):
m[i] = l.count(l[i])
print(l[m.index(max(m))])
|
en
| 0.766471
|
# Input: # N - number of elements in list # Input list to calculate mean, median, mode # Output: # Print mean, median and mode of elements in list
| 3.560808
| 4
|
tests/test_mot_metrics.py
|
itsraina/norfair
| 0
|
6625964
|
<reponame>itsraina/norfair
import os.path
import numpy as np
import pandas as pd
from norfair import Tracker, metrics
DATASET_PATH = "train"
MOTA_ERROR_THRESHOLD = 0.0
FRAME_SKIP_PERIOD = 1
DETECTION_THRESHOLD = 0.01
DISTANCE_THRESHOLD = 0.9
DIAGONAL_PROPORTION_THRESHOLD = 1 / 18
POINTWISE_HIT_COUNTER_MAX = 3
HIT_COUNTER_MAX = 2
def keypoints_distance(detected_pose, tracked_pose):
norm_orders = [1, 2, np.inf]
distances = 0
diagonal = 0
hor_min_pt = min(detected_pose.points[:, 0])
hor_max_pt = max(detected_pose.points[:, 0])
ver_min_pt = min(detected_pose.points[:, 1])
ver_max_pt = max(detected_pose.points[:, 1])
# Set keypoint_dist_threshold based on object size, and calculate
# distance between detections and tracker estimations
for p in norm_orders:
distances += np.linalg.norm(
detected_pose.points - tracked_pose.estimate, ord=p, axis=1
)
diagonal += np.linalg.norm(
[hor_max_pt - hor_min_pt, ver_max_pt - ver_min_pt], ord=p
)
distances = distances / len(norm_orders)
keypoint_dist_threshold = diagonal * DIAGONAL_PROPORTION_THRESHOLD
match_num = np.count_nonzero(
(distances < keypoint_dist_threshold)
* (detected_pose.scores > DETECTION_THRESHOLD)
* (tracked_pose.last_detection.scores > DETECTION_THRESHOLD)
)
return 1 / (1 + match_num)
def test_mot_metrics():
"""Tests that Norfair's MOT metrics didn't get worse
Configurable so that it allows some margin on how much worse metrics could get before
the test fails. Margin configured through MOTA_ERROR_THRESHOLD.
Raises:
If the previous metrics file its not found.
"""
# Load previous metrics
try:
previous_metrics = pd.read_fwf('tests/metrics.txt')
previous_metrics.columns = [column_name.lower() for column_name in previous_metrics.columns]
previous_metrics = previous_metrics.set_index(previous_metrics.columns[0])
except FileNotFoundError as e:
raise e
accumulator = metrics.Accumulators()
sequences_paths = [element.path for element in os.scandir(DATASET_PATH) if element.is_dir()]
for input_path in sequences_paths:
# Search vertical resolution in seqinfo.ini
seqinfo_path = os.path.join(input_path, "seqinfo.ini")
info_file = metrics.InformationFile(file_path=seqinfo_path)
all_detections = metrics.DetectionFileParser(
input_path=input_path, information_file=info_file
)
tracker = Tracker(
distance_function=keypoints_distance,
distance_threshold=DISTANCE_THRESHOLD,
detection_threshold=DETECTION_THRESHOLD,
pointwise_hit_counter_max=POINTWISE_HIT_COUNTER_MAX,
hit_counter_max=HIT_COUNTER_MAX,
)
# Initialize accumulator for this video
accumulator.create_accumulator(input_path=input_path, information_file=info_file)
for frame_number, detections in enumerate(all_detections):
if frame_number % FRAME_SKIP_PERIOD == 0:
tracked_objects = tracker.update(
detections=detections, period=FRAME_SKIP_PERIOD
)
else:
detections = []
tracked_objects = tracker.update()
accumulator.update(predictions=tracked_objects)
accumulator.compute_metrics()
new_metrics = accumulator.summary_dataframe
new_metrics.columns = [column_name.lower() for column_name in new_metrics.columns]
# Unify the scores to be able to compare them. new metrics is the percentage
# expressed between 0 and 1, the previous metrics have the percentage as a string
# with the % character at the end
new_overall_mota = new_metrics.loc["OVERALL", "mota"] * 100
previous_overall_mota = float(previous_metrics.loc["OVERALL", "mota"][:-1])
accumulator.print_metrics()
assert new_overall_mota >= previous_overall_mota * (1 - MOTA_ERROR_THRESHOLD), f"New overall MOTA score: {new_overall_mota} is too low, previous overall MOTA score: {previous_overall_mota}"
|
import os.path
import numpy as np
import pandas as pd
from norfair import Tracker, metrics
DATASET_PATH = "train"
MOTA_ERROR_THRESHOLD = 0.0
FRAME_SKIP_PERIOD = 1
DETECTION_THRESHOLD = 0.01
DISTANCE_THRESHOLD = 0.9
DIAGONAL_PROPORTION_THRESHOLD = 1 / 18
POINTWISE_HIT_COUNTER_MAX = 3
HIT_COUNTER_MAX = 2
def keypoints_distance(detected_pose, tracked_pose):
norm_orders = [1, 2, np.inf]
distances = 0
diagonal = 0
hor_min_pt = min(detected_pose.points[:, 0])
hor_max_pt = max(detected_pose.points[:, 0])
ver_min_pt = min(detected_pose.points[:, 1])
ver_max_pt = max(detected_pose.points[:, 1])
# Set keypoint_dist_threshold based on object size, and calculate
# distance between detections and tracker estimations
for p in norm_orders:
distances += np.linalg.norm(
detected_pose.points - tracked_pose.estimate, ord=p, axis=1
)
diagonal += np.linalg.norm(
[hor_max_pt - hor_min_pt, ver_max_pt - ver_min_pt], ord=p
)
distances = distances / len(norm_orders)
keypoint_dist_threshold = diagonal * DIAGONAL_PROPORTION_THRESHOLD
match_num = np.count_nonzero(
(distances < keypoint_dist_threshold)
* (detected_pose.scores > DETECTION_THRESHOLD)
* (tracked_pose.last_detection.scores > DETECTION_THRESHOLD)
)
return 1 / (1 + match_num)
def test_mot_metrics():
"""Tests that Norfair's MOT metrics didn't get worse
Configurable so that it allows some margin on how much worse metrics could get before
the test fails. Margin configured through MOTA_ERROR_THRESHOLD.
Raises:
If the previous metrics file its not found.
"""
# Load previous metrics
try:
previous_metrics = pd.read_fwf('tests/metrics.txt')
previous_metrics.columns = [column_name.lower() for column_name in previous_metrics.columns]
previous_metrics = previous_metrics.set_index(previous_metrics.columns[0])
except FileNotFoundError as e:
raise e
accumulator = metrics.Accumulators()
sequences_paths = [element.path for element in os.scandir(DATASET_PATH) if element.is_dir()]
for input_path in sequences_paths:
# Search vertical resolution in seqinfo.ini
seqinfo_path = os.path.join(input_path, "seqinfo.ini")
info_file = metrics.InformationFile(file_path=seqinfo_path)
all_detections = metrics.DetectionFileParser(
input_path=input_path, information_file=info_file
)
tracker = Tracker(
distance_function=keypoints_distance,
distance_threshold=DISTANCE_THRESHOLD,
detection_threshold=DETECTION_THRESHOLD,
pointwise_hit_counter_max=POINTWISE_HIT_COUNTER_MAX,
hit_counter_max=HIT_COUNTER_MAX,
)
# Initialize accumulator for this video
accumulator.create_accumulator(input_path=input_path, information_file=info_file)
for frame_number, detections in enumerate(all_detections):
if frame_number % FRAME_SKIP_PERIOD == 0:
tracked_objects = tracker.update(
detections=detections, period=FRAME_SKIP_PERIOD
)
else:
detections = []
tracked_objects = tracker.update()
accumulator.update(predictions=tracked_objects)
accumulator.compute_metrics()
new_metrics = accumulator.summary_dataframe
new_metrics.columns = [column_name.lower() for column_name in new_metrics.columns]
# Unify the scores to be able to compare them. new metrics is the percentage
# expressed between 0 and 1, the previous metrics have the percentage as a string
# with the % character at the end
new_overall_mota = new_metrics.loc["OVERALL", "mota"] * 100
previous_overall_mota = float(previous_metrics.loc["OVERALL", "mota"][:-1])
accumulator.print_metrics()
assert new_overall_mota >= previous_overall_mota * (1 - MOTA_ERROR_THRESHOLD), f"New overall MOTA score: {new_overall_mota} is too low, previous overall MOTA score: {previous_overall_mota}"
|
en
| 0.922253
|
# Set keypoint_dist_threshold based on object size, and calculate # distance between detections and tracker estimations Tests that Norfair's MOT metrics didn't get worse Configurable so that it allows some margin on how much worse metrics could get before the test fails. Margin configured through MOTA_ERROR_THRESHOLD. Raises: If the previous metrics file its not found. # Load previous metrics # Search vertical resolution in seqinfo.ini # Initialize accumulator for this video # Unify the scores to be able to compare them. new metrics is the percentage # expressed between 0 and 1, the previous metrics have the percentage as a string # with the % character at the end
| 2.549433
| 3
|
bundle_cache/app_store/tk-flame/v1.14.4/engine.py
|
ColinKennedy/tk-config-default2-respawn
| 4
|
6625965
|
# Copyright (c) 2014 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
A Toolkit engine for Flame
"""
import os
import pwd
import re
import shlex
import sys
import uuid
import sgtk
import pickle
import logging
import pprint
import logging.handlers
import traceback
import datetime
import subprocess
from sgtk import TankError
LOG_CHANNEL = "sgtk.tk-flame"
class FlameEngine(sgtk.platform.Engine):
"""
The engine class. This wraps around a series of callbacks in Flame (so called hooks).
The Flame engine is a bit different than other engines.
Because Flame doesn't have an API, we cannot call Flame, but Flame will call out
to the toolkit code. This means that the normal register_command approach won't
work inside of Flame - instead, the engine introduces a different scheme of callbacks
that apps can register to ensure that they cen do stuff.
For apps, the main entry points are register_export_hook and register_batch_hook.
For more information, see below.
"""
# the name of the folder in the engine which we should register
# with Flame to trigger various hooks to run.
FLAME_HOOKS_FOLDER = "flame_hooks"
# our default log file to write to
SGTK_LOG_FILE = "tk-flame.log"
# a 'plan B' safe log file that we call fall back on in case
# the default log file cannot be accessed
SGTK_LOG_FILE_SAFE = "/tmp/tk-flame.log"
# define constants for the various modes the engine can execute in
(ENGINE_MODE_DCC, ENGINE_MODE_PRELAUNCH, ENGINE_MODE_BACKBURNER) = range(3)
@property
def host_info(self):
"""
:returns: A dictionary with information about the application hosting this engine.
The returned dictionary is of the following form on success:
{
"name": "Flame",
"version": "2018.3.pr84",
}
The returned dictionary is of following form on an error preventing
the version identification.
{
"name": "Flame",
"version": "unknown"
}
"""
host_info = {"name": "Flame", "version": "unknown"}
try:
# The 'SHOTGUN_FLAME_VERSION' environment variable comes from Flame plugin
# The 'TOOLKIT_FLAME_VERSION' environment variable comes from Flame classic config
if "SHOTGUN_FLAME_VERSION" in os.environ:
host_info["version"] = os.environ.get("SHOTGUN_FLAME_VERSION", "unknown")
elif "TOOLKIT_FLAME_VERSION" in os.environ:
host_info["version"] = os.environ.get("TOOLKIT_FLAME_VERSION", "unknown")
except:
# Fallback to initialization value above
pass
return host_info
def __init__(self, *args, **kwargs):
"""
Overridden constructor where we init some things which
need to be defined very early on in the engine startup.
"""
# to support use cases where the flame engine isn't started via
# the multi-launchapp chain, make sure that hooks that the engine
# implements are registered.
flame_hooks_folder = os.path.join(self.disk_location, self.FLAME_HOOKS_FOLDER)
sgtk.util.append_path_to_env_var("DL_PYTHON_HOOK_PATH", flame_hooks_folder)
self.log_debug("Added to hook path: %s" % flame_hooks_folder)
# the path to the associated python executable
self._python_executable_path = None
# version of Flame we are running
self._flame_version = None
# root folder where flame is installed
self._install_root = None
# set the current engine mode. The mode contains information about
# how the engine was started - it can be executed either before the
# actual DCC starts up (pre-launch), in the DCC itself or on the
# backburner farm. This means that there are three distinct bootstrap
# scripts which can launch the engine (all contained within the engine itself).
# these bootstrap scripts all set an environment variable called
# TOOLKIT_FLAME_ENGINE_MODE which defines the desired engine mode.
engine_mode_str = os.environ.get("TOOLKIT_FLAME_ENGINE_MODE")
if engine_mode_str == "PRE_LAUNCH":
self._engine_mode = self.ENGINE_MODE_PRELAUNCH
elif engine_mode_str == "BACKBURNER":
self._engine_mode = self.ENGINE_MODE_BACKBURNER
elif engine_mode_str == "DCC":
self._engine_mode = self.ENGINE_MODE_DCC
else:
raise TankError("Unknown launch mode '%s' defined in "
"environment variable TOOLKIT_FLAME_ENGINE_MODE!" % engine_mode_str)
# Transcoder, thumbnail generator and local movie generator will be
# initialized on first request for them since, in order to know which
# type we will need, we need to wait for the Flame API to be loaded
# completely.
#
self._transcoder = None
self._thumbnail_generator = None
self._local_movie_generator = None
super(FlameEngine, self).__init__(*args, **kwargs)
def pre_app_init(self):
"""
Engine construction/setup done before any apps are initialized
"""
# set up a custom exception trap for the engine.
# it will log the exception and if possible also
# display it in a UI
sys.excepthook = sgtk_exception_trap
# now start the proper init
self.log_debug("%s: Initializing..." % self)
# maintain a list of export options
self._registered_export_instances = {}
self._export_sessions = {}
self._registered_batch_instances = []
# maintain the export cache
self._export_info = None
if self.has_ui:
# tell QT to interpret C strings as utf-8
from sgtk.platform.qt import QtCore, QtGui
utf8 = QtCore.QTextCodec.codecForName("utf-8")
QtCore.QTextCodec.setCodecForCStrings(utf8)
# Assuming we're in a new enough version of Flame (2018.3+) we'll
# be able to link the Flame project to our SG project. This will
# ensure that is a use launches Flame's plugin-based Shotgun
# integration that they will be bootstrapped into the correct
# project and won't be prompted to choose an SG project to link to.
#
# NOTE: We only take the initiative here and create the project
# link if this is a classic config launch of Flame. One quick way
# to knwo that is to just refer to the environment, where we know
# that the classic startup script sets some variables.
if "TOOLKIT_ENGINE_NAME" in os.environ:
try:
import flame
except Exception:
self.logger.debug(
"Was unable to import the flame Python module. As a result, "
"the Flame project will not be linked to associated Shotgun "
"project using the Flame Python API. This shouldn't cause "
"any problems in the current session, but it does mean "
"that the user might be prompted to link this project to a "
"Shotgun project if they launch Flame using the Toolkit "
"plugin and open this same Flame project."
)
else:
try:
current_flame_project = flame.project.current_project
current_flame_project.shotgun_project_name = self.context.project.get("name")
except Exception:
self.logger.debug(
"Was unable to set the current Flame project's "
"shotgun_project_name property. This shouldn't cause "
"any problems in the current session, but it does mean "
"that the user might be prompted to link this project to a "
"Shotgun project if they launch Flame using the Toolkit "
"plugin and open this same Flame project."
)
else:
self.logger.debug(
"Successfully linked the Flame project to its associated "
"Shotgun project."
)
def _initialize_logging(self, install_root):
"""
Set up logging for the engine
:param install_root: path to flame install root
"""
# standard flame log file
std_log_file = os.path.join(install_root, "log", self.SGTK_LOG_FILE)
# test if we can write to the default log file
if os.access(os.path.dirname(std_log_file), os.W_OK):
log_file = std_log_file
using_safe_log_file = False
else:
# cannot rotate file in this directory, write to tmp instead.
log_file = self.SGTK_LOG_FILE_SAFE
using_safe_log_file = True
# Set up a rotating logger with 4MiB max file size
if using_safe_log_file:
rotating = logging.handlers.RotatingFileHandler(log_file, maxBytes=4 * 1024 * 1024, backupCount=10)
else:
rotating = logging.handlers.RotatingFileHandler(log_file, maxBytes=0, backupCount=50, delay=True)
# Always rotate. Current user might not have the correct permission to open this file
if os.path.exists(log_file):
rotating.doRollover() # Will open file after roll over
rotating.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] PID %(process)d: %(message)s"))
# create a global logging object
logger = logging.getLogger(LOG_CHANNEL)
logger.propagate = False
# clear any existing handlers
logger.handlers = []
logger.addHandler(rotating)
if self.get_setting("debug_logging"):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# now that we have a logger, we can warn about a non-std log file :)
if using_safe_log_file:
logger.error("Cannot write to standard log file location %s! Please check "
"the filesystem permissions. As a fallback, logs will be "
"written to %s instead." % (std_log_file, log_file))
def set_python_executable(self, python_path):
"""
Specifies the path to the associated python process.
This is typically populated as part of the engine startup.
:param python_path: path to python, as string
"""
self._python_executable_path = python_path
self.log_debug("This engine is running python interpreter '%s'" % self._python_executable_path)
def set_version_info(self, major_version_str, minor_version_str, full_version_str, patch_version_str="0"):
"""
Specifies which version of Flame this engine is running.
This is typically populated as part of the engine startup.
:param major_version_str: Major version number as string
:param minor_version_str: Minor version number as string
:param patch_version_str: Patch version number as string
:param full_version_str: Full version number as string
"""
self._flame_version = {"full": full_version_str, "major": major_version_str, "minor": minor_version_str,
"patch": patch_version_str}
self.log_debug("This engine is running with Flame version '%s'" % self._flame_version)
def set_install_root(self, install_root):
"""
Specifies where the flame installation is located.
this may be '/usr/discreet', '/opt/Autodesk' etc.
:param install_root: root path to flame installation
"""
if self._install_root:
# cannot call this multiple times
raise TankError("Cannot call set_install_root multiple times!")
self.log_debug("Flame install root is '%s'" % self._install_root)
self._install_root = install_root
self._initialize_logging(install_root)
def _get_commands_matching_setting(self, setting):
"""
This expects a list of dictionaries in the form:
{name: "command-name", app_instance: "instance-name", display_name: "Display Name" }
The app_instance value will match a particular app instance associated with
the engine. The name is the menu name of the command to run when the engine starts up. The
display_name is the menu display name of the command to run.
If name is '' then all commands from the given app instance are returned.
If display_name is not present, name will be used instead.
:returns A list of tuples for all commands that match the given setting.
Each tuple will be in the form (instance_name, display_name, command_name, callback)
"""
# return a dictionary grouping all the commands by instance name
commands_by_instance = {}
for (name, value) in self.commands.iteritems():
app_instance = value["properties"].get("app")
if app_instance:
instance_name = app_instance.instance_name
else:
# A command without an app instance in the context menu is actually coming from the engine, so we'll
# use the engine name instead.
instance_name = "tk-flame"
commands_by_instance.setdefault(instance_name, []).append((name, value["callback"]))
# go through the values from the setting and return any matching commands
ret_value = []
setting_value = self.get_setting(setting, [])
for command in setting_value:
command_name = command["name"]
instance_name = command["app_instance"]
display_name = command.get("display_name", command_name)
instance_commands = commands_by_instance.get(instance_name)
if instance_commands is None:
continue
for (name, callback) in instance_commands:
# add the command if the name from the settings is '' or the name matches
if not command_name or (command_name == name):
ret_value.append((instance_name, display_name, name, callback))
return ret_value
def post_app_init(self):
"""
Do any initialization after apps have been loaded
"""
self.log_debug("%s: Running post app init..." % self)
# only run the startup commands when in DCC mode
if self._engine_mode != self.ENGINE_MODE_DCC:
return
# run any commands registered via run_at_startup
commands_to_start = self._get_commands_matching_setting("run_at_startup")
for (instance_name, command_name, callback) in commands_to_start:
self.log_debug("Running at startup: (%s, %s)" % (instance_name, command_name))
callback()
def destroy_engine(self):
"""
Called when the engine is being destroyed
"""
self.log_debug("%s: Destroying..." % self)
# Remove the current engine python hooks from the flame python hooks path
env_var_sep = ":"
env_var_name = "DL_PYTHON_HOOK_PATH"
flame_hooks_folder = os.path.join(self.disk_location, self.FLAME_HOOKS_FOLDER)
paths = os.environ.get(env_var_name, "").split(env_var_sep)
paths = [path for path in paths if path != flame_hooks_folder]
os.environ[env_var_name] = env_var_sep.join(paths)
self.log_debug("Removed to hook paths: %s" % flame_hooks_folder)
# Close every app windows
self.close_windows()
@property
def flame_main_window(self):
"""
Returns the Flame's main window
:return: Widget representing the flame's main window.
"""
from sgtk.platform.qt import QtCore, QtGui
for w in QtGui.QApplication.topLevelWidgets():
if w.objectName() == "CF Main Window":
self.log_debug("Found Flame main window (%s)" % w.windowTitle())
return w
@property
def python_executable(self):
"""
Returns the python executable associated with this engine
:returns: path to python, e.g. '/usr/discreet/python/2016.0.0.322/bin/python'
"""
if self._python_executable_path is None:
raise TankError("Python executable has not been defined for this engine instance!")
return self._python_executable_path
@property
def preset_version(self):
"""
Returns the preset version required for the currently executing
version of Flame. Preset xml files in Flame all have a version number
to denote which generation of the file format they implement. If you are using
an old preset with a new version of Flame, a warning message appears.
:returns: Preset version, as string, e.g. '5'
"""
if self._flame_version is None:
raise TankError("Cannot determine preset version - No Flame DCC version specified!")
if self.is_version_less_than("2016.1"):
# for version 2016 before ext 1, export preset is v5
return "5"
elif self.is_version_less_than("2017"):
# flame 2016 extension 1 and above.
return "6"
else:
# flame 2017 and above
#
# Note: Flame 2017 uses preset 7, however further adjustments to the actual
# preset format used is required in individual apps - for the time being,
# the preset version is held at v6, ensuring that app apps operate correctly,
# but generating a warning message at startup.
#
return "7"
@property
def export_presets_root(self):
"""
The location where flame export presets are located
:returns: Path as string
"""
# If possible use the Flame python API to get the presets location
try:
import flame
if 'PyExporter' in dir(flame):
return flame.PyExporter.get_presets_base_dir(
flame.PyExporter.PresetVisibility.Shotgun)
except:
pass
if self.is_version_less_than("2017"):
# flame 2016 presets structure
return os.path.join(
self.install_root,
"presets",
self.flame_version,
"export",
"presets"
)
else:
# flame 2017+ presets structure (note the extra flame folder)
return os.path.join(
self.install_root,
"presets",
self.flame_version,
"export",
"presets",
"flame"
)
@staticmethod
def _get_full_preset_path(preset_path, preset_type):
"""
Convert a path to a preset that can be incomplete to an absolute path.
:param preset_path: Path to a preset to find.
:param preset_type: Type of preset to look for.
:returns: Absolute path to the preset.
"""
if not os.path.isabs(preset_path):
import flame
presets_dir = flame.PyExporter.get_presets_dir(
flame.PyExporter.PresetVisibility.Shotgun,
preset_type
)
preset_path = os.path.join(
presets_dir,
preset_path
)
return preset_path
@property
def thumbnails_preset_path(self):
"""
The location of the flame export preset to use to generate thumbnails.
:returns: Path as string
"""
import flame
return self._get_full_preset_path(
self.get_setting("thumbnails_preset_path"),
flame.PyExporter.PresetType.Image_Sequence
)
@property
def previews_preset_path(self):
"""
The location of the flame export preset to use to generate previews.
:returns: Path as string
"""
import flame
return self._get_full_preset_path(
self.get_setting("previews_preset_path"),
flame.PyExporter.PresetType.Movie
)
@property
def local_movies_preset_path(self):
"""
The location of the flame export preset to use to generate local movies.
Local movies are linked to assets in Shotgun thru the "Path to Movie"
field but are not uploaded on the server.
:returns: Path as string
"""
import flame
return self._get_full_preset_path(
self.get_setting("local_movies_preset_path"),
flame.PyExporter.PresetType.Movie
)
@property
def wiretap_tools_root(self):
"""
The location of wiretap tool
:returns: Path as string
"""
return os.path.join(
self.install_root,
"wiretap",
"tools",
self.flame_version
)
def _is_version_less_than(self, major, minor, patch):
"""
Compares the given version numbers with the current
flame version and returns False if the given version is
greater than the current version.
Example:
- Flame: '2016.1.0.278', version str: '2016.1' => False
- Flame: '2016', version str: '2016.1' => True
:param version_str: Version to run comparison against
"""
if int(self.flame_major_version) != int(major):
return int(self.flame_major_version) < int(major)
if int(self.flame_minor_version) != int(minor):
return int(self.flame_minor_version) < int(minor)
if int(self.flame_patch_version) != int(patch):
return int(self.flame_patch_version) < int(patch)
# Same version
return False
def is_version_less_than(self, version_str):
"""
Compares the given version string with the current
flame version and returns False if the given version is
greater than the current version.
Example:
- Flame: '2016.1.0.278', version str: '2016.1' => False
- Flame: '2016', version str: '2016.1' => True
:param version_str: Version to run comparison against
"""
major_ver = 0
minor_ver = 0
patch_ver = 0
chunks = version_str.split(".")
if len(chunks) > 0:
if chunks[0].isdigit():
major_ver = int(chunks[0])
if len(chunks) > 1:
if chunks[1].isdigit():
minor_ver = int(chunks[1])
if len(chunks) > 2:
if chunks[2].isdigit():
patch_ver = int(chunks[2])
return self._is_version_less_than(major_ver, minor_ver, patch_ver)
@property
def flame_major_version(self):
"""
Returns Flame's major version number as a string.
:returns: String (e.g. '2016')
"""
if self._flame_version is None:
raise TankError("No Flame DCC version specified!")
return self._flame_version["major"]
@property
def flame_minor_version(self):
"""
Returns Flame's minor version number as a string.
:returns: String (e.g. '2')
"""
if self._flame_version is None:
raise TankError("No Flame DCC version specified!")
return self._flame_version["minor"]
@property
def flame_patch_version(self):
"""
Returns Flame's patch version number as a string.
:returns: String (e.g. '2')
"""
if self._flame_version is None:
raise TankError("No Flame DCC version specified!")
return self._flame_version["patch"]
@property
def flame_version(self):
"""
Returns Flame's full version number as a string.
:returns: String (e.g. '2016.1.0.278')
"""
if self._flame_version is None:
raise TankError("No Flame DCC version specified!")
return self._flame_version["full"]
@property
def install_root(self):
"""
The location where flame is installed.
This may be '/usr/discreet', '/opt/Autodesk' etc.
:returns: Path as string
"""
return self._install_root
@property
def has_ui(self):
"""
Property to determine if the current environment has access to a UI or not
"""
# check if there is a UI. With Flame, we may run the engine in bootstrap
# mode or on the farm - in this case, there is no access to UI. If inside the
# DCC UI environment, pyside support is available.
has_ui = False
try:
from sgtk.platform.qt import QtCore, QtGui
if QtCore.QCoreApplication.instance():
# there is an active application
has_ui = True
except:
pass
return has_ui
def show_panel(self, panel_id, title, bundle, widget_class, *args, **kwargs):
"""
Override the base show_panel to create a non-modal dialog that will stay on
top of the Flame interface
"""
if not self.has_ui:
self.log_error("Sorry, this environment does not support UI display! Cannot show "
"the requested panel '%s'." % title)
return None
from sgtk.platform.qt import QtCore, QtGui
# create the dialog:
dialog, widget = self._create_dialog_with_widget(title, bundle, widget_class, *args, **kwargs)
dialog.setWindowFlags(
dialog.windowFlags() |
QtCore.Qt.WindowStaysOnTopHint &
~QtCore.Qt.WindowCloseButtonHint
)
self.created_qt_dialogs.append(dialog)
# show the dialog
dialog.show()
# lastly, return the instantiated widget
return widget
def _get_dialog_parent(self):
"""
Get the QWidget parent for all dialogs created through :meth:`show_dialog` :meth:`show_modal`.
Can be overriden in derived classes to return the QWidget to be used as the parent
for all TankQDialog's.
:return: QT Parent window (:class:`PySide.QtGui.QWidget`)
"""
from sgtk.platform.qt import QtCore, QtGui
w = self.flame_main_window
return w if w else super(FlameEngine, self)._get_dialog_parent()
def show_dialog(self, title, bundle, widget_class, *args, **kwargs):
"""
Shows a non-modal dialog window in a way suitable for this engine.
The engine will attempt to parent the dialog nicely to the host application.
The dialog will be created with a standard Toolkit window title bar where
the title will be displayed.
.. note:: In some cases, it is necessary to hide the standard Toolkit title
bar. You can do this by adding a property to the widget class you are
displaying::
@property
def hide_tk_title_bar(self):
"Tell the system to not show the standard toolkit toolbar"
return True
**Notes for engine developers**
Qt dialog & widget management can be quite tricky in different engines/applications.
Because of this, Sgtk provides a few overridable methods with the idea being that when
developing a new engine, you only need to override the minimum amount necessary.
Making use of these methods in the correct way allows the base Engine class to manage the
lifetime of the dialogs and widgets efficiently and safely without you having to worry about it.
The methods available are listed here in the hierarchy in which they are called::
show_dialog()/show_modal()
_create_dialog_with_widget()
_get_dialog_parent()
_create_widget()
_create_dialog()
For example, if you just need to make sure that all dialogs use a specific parent widget
then you only need to override _get_dialog_parent() (e.g. the tk-maya engine).
However, if you need to implement a two-stage creation then you may need to re-implement
show_dialog() and show_modal() to call _create_widget() and _create_dialog() directly rather
than using the helper method _create_dialog_with_widget() (e.g. the tk-3dsmax engine).
Finally, if the application you are writing an engine for is Qt based then you may not need
to override any of these methods (e.g. the tk-nuke engine).
:param title: The title of the window. This will appear in the Toolkit title bar.
:param bundle: The app, engine or framework object that is associated with this window
:param widget_class: The class of the UI to be constructed. This must derive from QWidget.
:type widget_class: :class:`PySide.QtGui.QWidget`
Additional parameters specified will be passed through to the widget_class constructor.
:returns: the created widget_class instance
"""
if not self.has_ui:
self.log_error("Sorry, this environment does not support UI display! Cannot show "
"the requested window '%s'." % title)
return None
from sgtk.platform.qt import QtGui, QtCore
# create the dialog:
dialog, widget = self._create_dialog_with_widget(title, bundle, widget_class, *args, **kwargs)
dialog.setWindowFlags(
dialog.windowFlags() |
QtCore.Qt.WindowStaysOnTopHint &
~QtCore.Qt.WindowCloseButtonHint
)
self.created_qt_dialogs.append(dialog)
# show the dialog
dialog.show()
# lastly, return the instantiated widget
return widget
def close_windows(self):
"""
Closes the various windows (dialogs, panels, etc.) opened by the engine.
"""
# Make a copy of the list of Tank dialogs that have been created by the engine and
# are still opened since the original list will be updated when each dialog is closed.
opened_dialog_list = self.created_qt_dialogs[:]
# Loop through the list of opened Tank dialogs.
for dialog in opened_dialog_list:
dialog_window_title = dialog.windowTitle()
try:
# Close the dialog and let its close callback remove it from the original dialog list.
self.log_debug("Closing dialog %s." % dialog_window_title)
dialog.close()
except Exception, exception:
self.log_error("Cannot close dialog %s: %s" % (dialog_window_title, exception))
def log_debug(self, msg):
"""
Log a debug message
:param msg: The debug message to log
"""
logging.getLogger(LOG_CHANNEL).debug(msg)
def log_info(self, msg):
"""
Log some info
:param msg: The info message to log
"""
logging.getLogger(LOG_CHANNEL).info(msg)
def log_warning(self, msg):
"""
Log a warning
:param msg: The warning message to log
"""
logging.getLogger(LOG_CHANNEL).warning(msg)
def log_error(self, msg):
"""
Log an error
:param msg: The error message to log
"""
logging.getLogger(LOG_CHANNEL).error(msg)
################################################################################################################
# Engine Bootstrap
#
def pre_dcc_launch_phase(self):
"""
Special bootstrap method used to set up the Flame environment.
This is designed to execute before Flame has launched, as part of the
bootstrapping process.
This method assumes that it is being executed inside a Flame python
and is called from the app_launcher script which ensures such an environment.
The bootstrapper will first import the wiretap API and setup other settings.
It then attempts to execute the pre-DCC project creation process, utilizing
both wiretap and QT (setup project UI) for this.
Finally, it will return the command line args to pass to Flame as it is being
launched.
:returns: arguments to pass to the app launch process
"""
if self.get_setting("debug_logging"):
# enable Flame hooks debug
os.environ["DL_DEBUG_PYTHON_HOOKS"] = "1"
# see if we can launch into batch mode. We only do this when in a
# shot context and if there is a published batch file in Shotgun
#
# For now, hard code the logic of how to detect which batch file to load up.
# TODO: in the future, we may want to expose this in a hook - but it is arguably
# pretty advanced customization :)
#
# Current logic: Find the latest batch publish belonging to the context
if self.context.entity:
# we have a current context to lock on to!
# try to see if we can find the latest batch publish
publish_type = sgtk.util.get_published_file_entity_type(self.sgtk)
if publish_type == "PublishedFile":
type_link_field = "published_file_type.PublishedFileType.code"
else:
type_link_field = "tank_type.TankType.code"
sg_data = self.shotgun.find_one(publish_type,
[[type_link_field, "is", self.get_setting("flame_batch_publish_type")],
["entity", "is", self.context.entity]],
["path"],
order=[{"field_name": "created_at", "direction": "desc"}])
if sg_data:
# we have a batch file published for this context!
batch_file_path = sg_data["path"]["local_path"]
if os.path.exists(batch_file_path):
self.log_debug("Setting auto startup file '%s'" % batch_file_path)
os.environ["DL_BATCH_START_WITH_SETUP"] = batch_file_path
# add Flame hooks for this engine
flame_hooks_folder = os.path.join(self.disk_location, self.FLAME_HOOKS_FOLDER)
sgtk.util.append_path_to_env_var("DL_PYTHON_HOOK_PATH", flame_hooks_folder)
self.log_debug("Added to hook path: %s" % flame_hooks_folder)
# now that we have a wiretap library, call out and initialize the project
# automatically
tk_flame = self.import_module("tk_flame")
wiretap_handler = tk_flame.WiretapHandler()
try:
app_args = wiretap_handler.prepare_and_load_project()
finally:
wiretap_handler.close()
return app_args
def _define_qt_base(self):
"""
Define QT behaviour. Subclassed from base class.
"""
if self._engine_mode in (self.ENGINE_MODE_DCC, self.ENGINE_MODE_BACKBURNER):
# We are running the engine inside of the Flame Application.
# alternatively, we are running the engine in backburner
#
# in both these states, no special QT init is necessary.
# Defer to default implementation which looks for pyside and
# gracefully fails in case that isn't found.
self.log_debug("Initializing default PySide for in-DCC / backburner use")
return super(FlameEngine, self)._define_qt_base()
else:
# we are running the engine outside of Flame.
# This is special - no QApplication is running at this point -
# a state akin to running apps inside the shell engine.
# We assume that in pre-launch mode, PySide is available since
# we are running within the Flame python.
from sgtk.platform import qt
from sgtk.util.qt_importer import QtImporter
importer = QtImporter()
QtCore = importer.QtCore
QtGui = importer.QtGui
# a simple dialog proxy that pushes the window forward
class ProxyDialogPySide(QtGui.QDialog):
def show(self):
QtGui.QDialog.show(self)
self.activateWindow()
self.raise_()
def exec_(self):
self.activateWindow()
self.raise_()
# the trick of activating + raising does not seem to be enough for
# modal dialogs. So force put them on top as well.
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint | self.windowFlags())
return QtGui.QDialog.exec_(self)
base = {}
base["qt_core"] = QtCore
base["qt_gui"] = QtGui
base["dialog_base"] = ProxyDialogPySide
return base
def cache_export_asset(self, asset_info):
"""
Cache the export asset into the engine cache.
:param asset_info: Information dictionary of the asset.
See sg_export_hook.postExportAsset for details on the dictionary content.
"""
# extract asset information
sequence_name = asset_info.get("sequenceName")
shot_name = asset_info.get("shotName")
asset_type = asset_info.get("assetType")
asset_name = asset_info.get("assetName")
# reinitialize the export cache if the format doesn't fit the current asset
if not isinstance(self._export_info, dict):
self._export_info = {}
if sequence_name not in self._export_info:
self._export_info[sequence_name] = {shot_name: {asset_type: {asset_name: [asset_info]}}}
elif shot_name not in self._export_info[sequence_name]:
self._export_info[sequence_name][shot_name] = {asset_type: {asset_name: [asset_info]}}
elif asset_type not in self._export_info[sequence_name][shot_name]:
self._export_info[sequence_name][shot_name][asset_type] = {asset_name: [asset_info]}
elif asset_name not in self._export_info[sequence_name][shot_name][asset_type]:
self._export_info[sequence_name][shot_name][asset_type][asset_name] = [asset_info]
else:
self._export_info[sequence_name][shot_name][asset_type][asset_name].append(asset_info)
def cache_batch_export_asset(self, info):
"""
Cache the batch export asset into the engine cache.
:param info: Information dictionary of the asset
See sg_batch_hook.batchExportEnd for details on the dictionary content.
"""
if not isinstance(self._export_info, list):
self._export_info = []
self._export_info.append(info)
################################################################################################################
# export callbacks handling
#
# Any apps which are interested in registering custom exporters with Flame should use the methods
# below. The register_export_hook() is called by apps in order to create a menu entry
# on the Flame export menu. The remaining methods are used to call out from the actual Flame hook
# to the relevant app code.
#
def register_export_hook(self, menu_caption, callbacks):
"""
Allows an app to register an interest in one of the Flame export hooks.
This is one of the interaction entry points in the system and this is how apps
typically have their business logic executed. At app init, an app typically
calls this method with a syntax like this:
# set up callback map
callbacks = {}
callbacks["preCustomExport"] = self.pre_custom_export
callbacks["preExportAsset"] = self.adjust_path
callbacks["postExportAsset"] = self.register_post_asset_job
# register with the engine
self.engine.register_export_hook("Menu Caption", callbacks)
The engine will keep track of things automatically, and whenever the user
clicks the "Menu Caption" entry on the menu, the corresponding chain of callbacks
will be called.
All methods should have the following method signature:
def export_callback(self, session_id, info)
Where session_id is a unique session identifier (typically only used in advanced scenarios)
and info reflects the info parameter passed from Flame (varies for different callbacks).
For information which export can currently be registered against, see the
flame_hooks/exportHook.py file.
:param menu_caption: Text to appear on the Flame export menu
:param callbacks: Dictionary of callbacks, see above for details.
"""
if menu_caption in self._registered_export_instances:
raise TankError("There is already a menu export preset named '%s'! "
"Please ensure your preset names are unique" % menu_caption)
self.log_debug("Registered export preset '%s' with engine." % menu_caption)
self._registered_export_instances[menu_caption] = callbacks
def get_export_presets(self):
"""
Internal engine method. Do not use outside of the engine.
Returns all export presets registered by apps.
:returns: List of preset titles
"""
return self._registered_export_instances.keys()
def create_export_session(self, preset_name):
"""
Internal engine method. Do not use outside of the engine.
Start a new export session.
Creates a session object which represents a single export session in Flame.
:param preset_name: The name of the preset which should be executed.
:returns: session id string which is later passed into various methods
"""
if preset_name not in self._registered_export_instances:
raise TankError("The export preset '%s' is not registered with the current engine. "
"Current presets are: %s" % (preset_name, self._registered_export_instances.keys()))
session_id = "tk_%s" % uuid.uuid4().hex
# set up an export session
self._export_sessions[session_id] = preset_name
return session_id
def trigger_export_callback(self, callback_name, session_id, info):
"""
Internal engine method. Do not use outside of the engine.
Dispatch method called from the various Flame hooks.
This method will ensure that the Flame callbacks will be
dispatched to the appropriate registered app callbacks.
:param callback_name: Name of the Flame callback method
:param session_id: Unique session identifier
:param info: Metadata dictionary from Flame
"""
self.log_debug("Flame engine export callback dispatch for %s" % callback_name)
self.log_debug("Info parameters passed from Flame: %s" % pprint.pformat(info))
if session_id not in self._export_sessions:
self.log_debug("Ignoring request for unknown session %s..." % session_id)
return
# get the preset
preset_name = self._export_sessions[session_id]
tk_callbacks = self._registered_export_instances[preset_name]
# call the callback in the preset
if callback_name in tk_callbacks:
# the app has registered interest in this!
self.log_debug("Executing callback %s" % tk_callbacks[callback_name])
tk_callbacks[callback_name](session_id, info)
@property
def export_info(self):
"""
:return: Flame export cache
"""
return self._export_info
def clear_export_info(self):
"""
Clear the Flame export cache
"""
self._export_info = None
################################################################################################################
# batch callbacks handling
#
# Any apps which are interested in register custom batch exporters with Flame should use the methods
# below. The register_batch_hook() is called by apps in order to register an interest in pre and post
# export callbacks when in batch mode. The Flame engine will ensure that the app's callbacks will get
# called at the right time.
#
def register_batch_hook(self, callbacks):
"""
Allows an app to register an interest in one of the Flame batch hooks.
This one of the interaction entry points in the system and this is how apps
typically have their business logic executed. At app init, an app typically
calls this method with a syntax like this:
# set up callback map
callbacks = {}
callbacks["batchExportBegin"] = self.before_export
callbacks["batchExportEnd"] = self.after_export
# register with the engine
self.engine.register_batch_hook(callbacks)
The engine will keep track of things automatically, and whenever a batch render executes,
the corresponding chain of callbacks will be called.
All methods should have the following method signature:
def export_callback(self, info)
For information which export can currently be registered against, see the
flame_hooks/batchHook.py file.
:param callbacks: Dictionary of callbacks, see above for details.
"""
self.log_debug("Registered batch callbacks with engine: %s" % callbacks)
self._registered_batch_instances.append(callbacks)
def trigger_batch_callback(self, callback_name, info):
"""
Internal engine method. Do not use outside of the engine.
Dispatch method called from the various Flame hooks.
This method will ensure that the Flame callbacks will be
dispatched to the appropriate registered app callbacks.
:param callback_name: Name of the Flame callback method
:param session_id: Unique session identifier
:param info: Metadata dictionary from Flame
"""
self.log_debug("Flame engine batch callback dispatch for %s" % callback_name)
self.log_debug("Info parameters passed from Flame: %s" % pprint.pformat(info))
# dispatch to all callbacks
for registered_batch_instance in self._registered_batch_instances:
self.log_debug("Checking %s" % registered_batch_instance)
if callback_name in registered_batch_instance:
# the app has registered interest in this!
self.log_debug("Executing callback %s" % registered_batch_instance[callback_name])
registered_batch_instance[callback_name](info)
################################################################################################################
# backburner integration
#
def get_server_hostname(self):
"""
Return the hostname for the server which hosts this Flame setup.
This is an accessor into the engine hook settings, allowing apps
to query which host the closest Flame server is running on.
:returns: hostname string
"""
return self.execute_hook_method("project_startup_hook", "get_server_hostname")
def get_backburner_tmp(self):
"""
Return a location on disk, guaranteed to exist
where temporary data can be put in such a way that
it will be accessible for all backburner jobs, regardless of
which host they execute on.
:returns: path
"""
return self.get_setting("backburner_shared_tmp")
@property
def _flame_exporter_supported(self):
"""
:return True if Flame exporter API is supported.
"""
# Note. Flame exporter can be used in 2019.1 but there are issues
# with transcoding of Movie files that prevent wide use of it
# with 2019.1.
#
return not self.is_version_less_than("2019.2")
@property
def transcoder(self):
"""
:return transcoder: Transcoder to use to trancode a clip from
one format to another.
"""
if self._transcoder is not None:
return self._transcoder
tk_flame = self.import_module("tk_flame")
if self._flame_exporter_supported:
self._transcoder = tk_flame.Transcoder(
engine=self
)
else:
raise Exception("Transcoder not supported")
return self._transcoder
@property
def thumbnail_generator(self):
"""
:return thumbnail_generator: Thumbnail generator to use to generate
thumbnail from Flame's asset published or rendered.
"""
if self._thumbnail_generator is not None:
return self._thumbnail_generator
tk_flame = self.import_module("tk_flame")
if self._flame_exporter_supported:
self._thumbnail_generator = tk_flame.ThumbnailGeneratorFlame(
engine=self
)
else:
self._thumbnail_generator = tk_flame.ThumbnailGeneratorFFmpeg(
engine=self
)
return self._thumbnail_generator
@property
def local_movie_generator(self):
"""
:return local_movie_generator: Local movie generator to use to generate
local movie file from Flame's asset published or rendered.
"""
if self._local_movie_generator is not None:
return self._local_movie_generator
tk_flame = self.import_module("tk_flame")
if self._flame_exporter_supported:
self._thumbnail_generator = tk_flame.LocalMovieGeneratorFlame(
engine=self
)
else:
self._thumbnail_generator = tk_flame.LocalMovieGeneratorFFmpeg(
engine=self
)
return self._thumbnail_generator
def create_local_backburner_job(self, job_name, description, dependencies,
instance, method_name, args, backburner_server_host=None):
"""
Run a method in the local backburner queue.
:param job_name: Name of the backburner job
:param description: Description of the backburner job
:param dependencies: None if the backburner job should execute arbitrarily. If you
want to set the job up so that it executes after another known task, pass
the backburner id or a list of ids here. This is typically used in conjunction with a postExportAsset
hook where the export task runs on backburner. In this case, the hook will return
the backburner id. By passing that id into this method, you can create a job which
only executes after the main export task has completed.
:param instance: App or hook to remotely call up
:param method_name: Name of method to remotely execute
:param args: dictionary or args (**argv style) to pass to method at remote execution
:param backburner_server_host: Name of the backburner server host.
:return backburner_job_id: Id of the backburner job created
"""
# the backburner executable
backburner_job_cmd = os.path.join(self._install_root, "backburner", "cmdjob")
# pass some args - most importantly tell it to run on the local host
# looks like : chars are not valid so replace those
backburner_args = []
# run as current user, not as root
backburner_args.append("-userRights")
# attach the executable to the backburner job
backburner_args.append("-attach")
# increase the max task length to 600 minutes
backburner_args.append("-timeout:600")
# add basic job info
# backburner does not do any kind of sanitaion itself, so ensure that job
# info doesn't contain any strange characters etc
# remove any non-trivial characters
sanitized_job_name = re.sub(r"[^0-9a-zA-Z_\-,\. ]+", "_", job_name)
sanitized_job_desc = re.sub(r"[^0-9a-zA-Z_\-,\. ]+", "_", description)
# if the job name contains too many characters, backburner submission fails
if len(sanitized_job_name) > 70:
sanitized_job_name = "%s..." % sanitized_job_name[:67]
if len(sanitized_job_desc) > 70:
sanitized_job_desc = "%s..." % sanitized_job_desc[:67]
# there is a convention in flame to append a time stamp to jobs
# e.g. 'Export - XXX_YYY_ZZZ (10.02.04)
sanitized_job_name += datetime.datetime.now().strftime(" (%H.%M.%S)")
backburner_args.append("-jobName:\"%s\"" % sanitized_job_name)
backburner_args.append("-description:\"%s\"" % sanitized_job_desc)
# Specifying a remote backburner manager is only supported on 2016.1 and above
if not self.is_version_less_than("2016.1"):
bb_manager = self.get_setting("backburner_manager")
if not bb_manager and not self.is_version_less_than("2018"):
# No backburner manager speficied in settings. Ask local backburnerServer
# which manager to choose from. (They might be none running locally)
# Before 2018, you needed root privileges to execute this command.
backburner_server_cmd = os.path.join(self._install_root, "backburner", "backburnerServer")
bb_manager = subprocess.check_output([backburner_server_cmd, "-q", "MANAGER"])
bb_manager = bb_manager.strip("\n")
if bb_manager:
backburner_args.append("-manager:\"%s\"" % bb_manager)
# Set the server group to the backburner job
bb_server_group = self.get_setting("backburner_server_group")
if bb_server_group:
backburner_args.append("-group:\"%s\"" % bb_server_group)
# Specify the backburner server if provided
if backburner_server_host:
backburner_args.append("-servers:\"%s\"" % backburner_server_host)
# Otherwise, fallback to the global backburner servers setting
else:
bb_servers = self.get_setting("backburner_servers")
if bb_servers:
backburner_args.append("-servers:\"%s\"" % bb_servers)
# Set the backburner job dependencies
if dependencies:
if isinstance(dependencies, list):
backburner_args.append("-dependencies:%s" % ",".join(dependencies))
else:
backburner_args.append("-dependencies:%s" % dependencies)
# call the bootstrap script
backburner_bootstrap = os.path.join(self.disk_location, "python", "startup", "backburner.py")
# now we need to capture all of the environment and everything in a file
# (thanks backburner!) so that we can replay it later when the task wakes up
session_file = os.path.join(self.get_backburner_tmp(), "tk_backburner_%s.pickle" % uuid.uuid4().hex)
data = {}
data["engine_instance"] = self.instance_name
data["serialized_context"] = sgtk.context.serialize(self.context)
data["instance"] = instance if isinstance(instance, str) else instance.instance_name
data["method_to_execute"] = method_name
data["args"] = args
data["sgtk_core_location"] = os.path.dirname(sgtk.__path__[0])
data["flame_version"] = self._flame_version
data["user_home"] = os.path.expanduser("~")
fh = open(session_file, "wb")
pickle.dump(data, fh)
fh.close()
full_cmd = "%s %s %s %s" % (backburner_job_cmd, " ".join(backburner_args), backburner_bootstrap, session_file)
# On old Flame version, python hooks are running root. We need to run the command as the effective user to
# ensure that backburner is running the job as the user who's using the Software to avoir permissions issues.
if os.getuid() == 0: # root
# Getting the user name of the user who started Flame (the effective user)
e_user = pwd.getpwuid(os.geteuid()).pw_name
# Run the command as the effective user
full_cmd = "sudo -u %s %s" % (e_user, full_cmd)
self.log_debug("Running root but will send the job as [%s]" % e_user)
try:
# Make sure that the session is not expired
sgtk.get_authenticated_user().refresh_credentials()
except sgtk.authentication.AuthenticationCancelled:
self.log_debug("User cancelled auth. No backburner job will be created.")
else:
self.log_debug("Starting backburner job '%s'" % job_name)
self.log_debug("Command line: %s" % full_cmd)
self.log_debug("App: %s" % instance)
self.log_debug("Method: %s with args %s" % (method_name, args))
# kick it off
backburner_job_submission = subprocess.Popen([full_cmd], stdout=subprocess.PIPE, shell=True)
stdout, stderr = backburner_job_submission.communicate()
self.log_debug(stdout)
job_id_regex = re.compile(r"(?<=Successfully submitted job )(\d+)")
match = job_id_regex.search(stdout)
if match:
backburner_job_id = match.group(0)
self.log_debug("Backburner job created (%s)" % backburner_job_id)
return backburner_job_id
else:
error = ["Shotgun backburner job could not be created."]
if stderr:
error += ["Reason: " + stderr]
error += ["See backburner logs for details."]
raise TankError("\n".join(error))
################################################################################################################
# accessors to various core settings and functions
def __get_wiretap_central_binary(self, binary_name):
"""
Try to returns the path to a binary in the Wiretap Central binary collection.
This function is compatible with both new Wiretap Central and the legacy Wiretap Central.
:param binary_name: Name of desired binary
:returns: Absolute path as a string
"""
# Wiretap Central can only be present on MacOS and on Linux
if sys.platform not in ["darwin", "linux2"]:
raise TankError("Your operating system does not support Wiretap Central!")
# Priority have to be given to every ".bin" executable on the Wiretap Central binary folder
wtc_path = self._get_wiretap_central_bin_path()
binary = os.path.join(wtc_path, binary_name + ".bin")
if os.path.exists(binary):
return binary
# If not found, we should look for the same path without the ".bin"
binary = os.path.join(wtc_path, binary_name)
if os.path.exists(binary):
return binary
# If we reach this, we are running a legacy Wiretap Central
wtc_path = self._get_wiretap_central_legacy_bin_path()
binary = os.path.join(wtc_path, binary_name)
if os.path.exists(binary):
return binary
# We don't have any Wiretap Central installed on this workstation
raise TankError("Cannot find binary '%s'!" % binary_name)
def _get_wiretap_central_bin_path(self):
"""
Get the path to the Wiretap Central binaries folder based on the current operating system.
:return: Path to the Wiretap Central binaries folder
"""
if sys.platform == "darwin":
return "/Library/WebServer/Documents/WiretapCentral/cgi-bin"
elif sys.platform == "linux2":
return "/var/www/html/WiretapCentral/cgi-bin"
def _get_wiretap_central_legacy_bin_path(self):
"""
Get the path to the legacy Wiretap Central binaries folder based on the current operating system.
:return: Path to the legacy Wiretap Central binaries folder
"""
if sys.platform == "darwin":
return "/Library/WebServer/CGI-Executables/WiretapCentral"
elif sys.platform == "linux2":
return "/var/www/cgi-bin/WiretapCentral"
def get_ffmpeg_path(self):
"""
Returns the path to the ffmpeg executable that ships with Flame.
:returns: Absolute path as a string
"""
return self.__get_wiretap_central_binary("ffmpeg")
def get_read_frame_path(self):
"""
Returns the path to the read_frame utility that ships with Flame.
:returns: Absolute path as a string
"""
return self.__get_wiretap_central_binary("read_frame")
def sgtk_exception_trap(ex_cls, ex, tb):
"""
UI Popup and logging exception trap override.
This method is used to override the default exception reporting behaviour
inside the embedded Flame python interpreter to make errors more visible
to the user.
It attempts to create a QT messagebox with a formatted error message to
alert the user that something has gong wrong. In addition to this, the
default exception handling is also carried out and the exception is also
written to the log.
Note that this is a global object and not an engine-relative thing, so that
the exception handler will operate correctly even if the engine instance no
longer exists.
"""
# careful about infinite loops here - we mustn't raise exceptions.
# like in other environments and scripts, for TankErrors, we assume that the
# error message is already a nice descriptive, crafted message and try to present
# this in a user friendly fashion
#
# for other exception types, we give a full call stack.
error_message = "Critical: Could not format error message."
try:
traceback_str = "\n".join(traceback.format_tb(tb))
if ex_cls == TankError:
# for TankErrors, we don't show the whole stack trace
error_message = "A Shotgun error was reported:\n\n%s" % ex
else:
error_message = "A Shotgun error was reported:\n\n%s (%s)\n\nTraceback:\n%s" % (ex, ex_cls, traceback_str)
except:
pass
# now try to output it
try:
from sgtk.platform.qt import QtGui, QtCore
if QtCore.QCoreApplication.instance():
# there is an application running - so pop up a message!
QtGui.QMessageBox.critical(None, "Shotgun General Error", error_message)
except:
pass
# and try to log it
try:
error_message = "An exception was raised:\n\n%s (%s)\n\nTraceback:\n%s" % (ex, ex_cls, traceback_str)
logging.getLogger(LOG_CHANNEL).error(error_message)
except:
pass
# in addition to the ui popup, also defer to the default mechanism
sys.__excepthook__(type, ex, tb)
|
# Copyright (c) 2014 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
A Toolkit engine for Flame
"""
import os
import pwd
import re
import shlex
import sys
import uuid
import sgtk
import pickle
import logging
import pprint
import logging.handlers
import traceback
import datetime
import subprocess
from sgtk import TankError
LOG_CHANNEL = "sgtk.tk-flame"
class FlameEngine(sgtk.platform.Engine):
"""
The engine class. This wraps around a series of callbacks in Flame (so called hooks).
The Flame engine is a bit different than other engines.
Because Flame doesn't have an API, we cannot call Flame, but Flame will call out
to the toolkit code. This means that the normal register_command approach won't
work inside of Flame - instead, the engine introduces a different scheme of callbacks
that apps can register to ensure that they cen do stuff.
For apps, the main entry points are register_export_hook and register_batch_hook.
For more information, see below.
"""
# the name of the folder in the engine which we should register
# with Flame to trigger various hooks to run.
FLAME_HOOKS_FOLDER = "flame_hooks"
# our default log file to write to
SGTK_LOG_FILE = "tk-flame.log"
# a 'plan B' safe log file that we call fall back on in case
# the default log file cannot be accessed
SGTK_LOG_FILE_SAFE = "/tmp/tk-flame.log"
# define constants for the various modes the engine can execute in
(ENGINE_MODE_DCC, ENGINE_MODE_PRELAUNCH, ENGINE_MODE_BACKBURNER) = range(3)
@property
def host_info(self):
"""
:returns: A dictionary with information about the application hosting this engine.
The returned dictionary is of the following form on success:
{
"name": "Flame",
"version": "2018.3.pr84",
}
The returned dictionary is of following form on an error preventing
the version identification.
{
"name": "Flame",
"version": "unknown"
}
"""
host_info = {"name": "Flame", "version": "unknown"}
try:
# The 'SHOTGUN_FLAME_VERSION' environment variable comes from Flame plugin
# The 'TOOLKIT_FLAME_VERSION' environment variable comes from Flame classic config
if "SHOTGUN_FLAME_VERSION" in os.environ:
host_info["version"] = os.environ.get("SHOTGUN_FLAME_VERSION", "unknown")
elif "TOOLKIT_FLAME_VERSION" in os.environ:
host_info["version"] = os.environ.get("TOOLKIT_FLAME_VERSION", "unknown")
except:
# Fallback to initialization value above
pass
return host_info
def __init__(self, *args, **kwargs):
"""
Overridden constructor where we init some things which
need to be defined very early on in the engine startup.
"""
# to support use cases where the flame engine isn't started via
# the multi-launchapp chain, make sure that hooks that the engine
# implements are registered.
flame_hooks_folder = os.path.join(self.disk_location, self.FLAME_HOOKS_FOLDER)
sgtk.util.append_path_to_env_var("DL_PYTHON_HOOK_PATH", flame_hooks_folder)
self.log_debug("Added to hook path: %s" % flame_hooks_folder)
# the path to the associated python executable
self._python_executable_path = None
# version of Flame we are running
self._flame_version = None
# root folder where flame is installed
self._install_root = None
# set the current engine mode. The mode contains information about
# how the engine was started - it can be executed either before the
# actual DCC starts up (pre-launch), in the DCC itself or on the
# backburner farm. This means that there are three distinct bootstrap
# scripts which can launch the engine (all contained within the engine itself).
# these bootstrap scripts all set an environment variable called
# TOOLKIT_FLAME_ENGINE_MODE which defines the desired engine mode.
engine_mode_str = os.environ.get("TOOLKIT_FLAME_ENGINE_MODE")
if engine_mode_str == "PRE_LAUNCH":
self._engine_mode = self.ENGINE_MODE_PRELAUNCH
elif engine_mode_str == "BACKBURNER":
self._engine_mode = self.ENGINE_MODE_BACKBURNER
elif engine_mode_str == "DCC":
self._engine_mode = self.ENGINE_MODE_DCC
else:
raise TankError("Unknown launch mode '%s' defined in "
"environment variable TOOLKIT_FLAME_ENGINE_MODE!" % engine_mode_str)
# Transcoder, thumbnail generator and local movie generator will be
# initialized on first request for them since, in order to know which
# type we will need, we need to wait for the Flame API to be loaded
# completely.
#
self._transcoder = None
self._thumbnail_generator = None
self._local_movie_generator = None
super(FlameEngine, self).__init__(*args, **kwargs)
def pre_app_init(self):
"""
Engine construction/setup done before any apps are initialized
"""
# set up a custom exception trap for the engine.
# it will log the exception and if possible also
# display it in a UI
sys.excepthook = sgtk_exception_trap
# now start the proper init
self.log_debug("%s: Initializing..." % self)
# maintain a list of export options
self._registered_export_instances = {}
self._export_sessions = {}
self._registered_batch_instances = []
# maintain the export cache
self._export_info = None
if self.has_ui:
# tell QT to interpret C strings as utf-8
from sgtk.platform.qt import QtCore, QtGui
utf8 = QtCore.QTextCodec.codecForName("utf-8")
QtCore.QTextCodec.setCodecForCStrings(utf8)
# Assuming we're in a new enough version of Flame (2018.3+) we'll
# be able to link the Flame project to our SG project. This will
# ensure that is a use launches Flame's plugin-based Shotgun
# integration that they will be bootstrapped into the correct
# project and won't be prompted to choose an SG project to link to.
#
# NOTE: We only take the initiative here and create the project
# link if this is a classic config launch of Flame. One quick way
# to knwo that is to just refer to the environment, where we know
# that the classic startup script sets some variables.
if "TOOLKIT_ENGINE_NAME" in os.environ:
try:
import flame
except Exception:
self.logger.debug(
"Was unable to import the flame Python module. As a result, "
"the Flame project will not be linked to associated Shotgun "
"project using the Flame Python API. This shouldn't cause "
"any problems in the current session, but it does mean "
"that the user might be prompted to link this project to a "
"Shotgun project if they launch Flame using the Toolkit "
"plugin and open this same Flame project."
)
else:
try:
current_flame_project = flame.project.current_project
current_flame_project.shotgun_project_name = self.context.project.get("name")
except Exception:
self.logger.debug(
"Was unable to set the current Flame project's "
"shotgun_project_name property. This shouldn't cause "
"any problems in the current session, but it does mean "
"that the user might be prompted to link this project to a "
"Shotgun project if they launch Flame using the Toolkit "
"plugin and open this same Flame project."
)
else:
self.logger.debug(
"Successfully linked the Flame project to its associated "
"Shotgun project."
)
def _initialize_logging(self, install_root):
"""
Set up logging for the engine
:param install_root: path to flame install root
"""
# standard flame log file
std_log_file = os.path.join(install_root, "log", self.SGTK_LOG_FILE)
# test if we can write to the default log file
if os.access(os.path.dirname(std_log_file), os.W_OK):
log_file = std_log_file
using_safe_log_file = False
else:
# cannot rotate file in this directory, write to tmp instead.
log_file = self.SGTK_LOG_FILE_SAFE
using_safe_log_file = True
# Set up a rotating logger with 4MiB max file size
if using_safe_log_file:
rotating = logging.handlers.RotatingFileHandler(log_file, maxBytes=4 * 1024 * 1024, backupCount=10)
else:
rotating = logging.handlers.RotatingFileHandler(log_file, maxBytes=0, backupCount=50, delay=True)
# Always rotate. Current user might not have the correct permission to open this file
if os.path.exists(log_file):
rotating.doRollover() # Will open file after roll over
rotating.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] PID %(process)d: %(message)s"))
# create a global logging object
logger = logging.getLogger(LOG_CHANNEL)
logger.propagate = False
# clear any existing handlers
logger.handlers = []
logger.addHandler(rotating)
if self.get_setting("debug_logging"):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# now that we have a logger, we can warn about a non-std log file :)
if using_safe_log_file:
logger.error("Cannot write to standard log file location %s! Please check "
"the filesystem permissions. As a fallback, logs will be "
"written to %s instead." % (std_log_file, log_file))
def set_python_executable(self, python_path):
"""
Specifies the path to the associated python process.
This is typically populated as part of the engine startup.
:param python_path: path to python, as string
"""
self._python_executable_path = python_path
self.log_debug("This engine is running python interpreter '%s'" % self._python_executable_path)
def set_version_info(self, major_version_str, minor_version_str, full_version_str, patch_version_str="0"):
"""
Specifies which version of Flame this engine is running.
This is typically populated as part of the engine startup.
:param major_version_str: Major version number as string
:param minor_version_str: Minor version number as string
:param patch_version_str: Patch version number as string
:param full_version_str: Full version number as string
"""
self._flame_version = {"full": full_version_str, "major": major_version_str, "minor": minor_version_str,
"patch": patch_version_str}
self.log_debug("This engine is running with Flame version '%s'" % self._flame_version)
def set_install_root(self, install_root):
"""
Specifies where the flame installation is located.
this may be '/usr/discreet', '/opt/Autodesk' etc.
:param install_root: root path to flame installation
"""
if self._install_root:
# cannot call this multiple times
raise TankError("Cannot call set_install_root multiple times!")
self.log_debug("Flame install root is '%s'" % self._install_root)
self._install_root = install_root
self._initialize_logging(install_root)
def _get_commands_matching_setting(self, setting):
"""
This expects a list of dictionaries in the form:
{name: "command-name", app_instance: "instance-name", display_name: "Display Name" }
The app_instance value will match a particular app instance associated with
the engine. The name is the menu name of the command to run when the engine starts up. The
display_name is the menu display name of the command to run.
If name is '' then all commands from the given app instance are returned.
If display_name is not present, name will be used instead.
:returns A list of tuples for all commands that match the given setting.
Each tuple will be in the form (instance_name, display_name, command_name, callback)
"""
# return a dictionary grouping all the commands by instance name
commands_by_instance = {}
for (name, value) in self.commands.iteritems():
app_instance = value["properties"].get("app")
if app_instance:
instance_name = app_instance.instance_name
else:
# A command without an app instance in the context menu is actually coming from the engine, so we'll
# use the engine name instead.
instance_name = "tk-flame"
commands_by_instance.setdefault(instance_name, []).append((name, value["callback"]))
# go through the values from the setting and return any matching commands
ret_value = []
setting_value = self.get_setting(setting, [])
for command in setting_value:
command_name = command["name"]
instance_name = command["app_instance"]
display_name = command.get("display_name", command_name)
instance_commands = commands_by_instance.get(instance_name)
if instance_commands is None:
continue
for (name, callback) in instance_commands:
# add the command if the name from the settings is '' or the name matches
if not command_name or (command_name == name):
ret_value.append((instance_name, display_name, name, callback))
return ret_value
def post_app_init(self):
"""
Do any initialization after apps have been loaded
"""
self.log_debug("%s: Running post app init..." % self)
# only run the startup commands when in DCC mode
if self._engine_mode != self.ENGINE_MODE_DCC:
return
# run any commands registered via run_at_startup
commands_to_start = self._get_commands_matching_setting("run_at_startup")
for (instance_name, command_name, callback) in commands_to_start:
self.log_debug("Running at startup: (%s, %s)" % (instance_name, command_name))
callback()
def destroy_engine(self):
"""
Called when the engine is being destroyed
"""
self.log_debug("%s: Destroying..." % self)
# Remove the current engine python hooks from the flame python hooks path
env_var_sep = ":"
env_var_name = "DL_PYTHON_HOOK_PATH"
flame_hooks_folder = os.path.join(self.disk_location, self.FLAME_HOOKS_FOLDER)
paths = os.environ.get(env_var_name, "").split(env_var_sep)
paths = [path for path in paths if path != flame_hooks_folder]
os.environ[env_var_name] = env_var_sep.join(paths)
self.log_debug("Removed to hook paths: %s" % flame_hooks_folder)
# Close every app windows
self.close_windows()
@property
def flame_main_window(self):
"""
Returns the Flame's main window
:return: Widget representing the flame's main window.
"""
from sgtk.platform.qt import QtCore, QtGui
for w in QtGui.QApplication.topLevelWidgets():
if w.objectName() == "CF Main Window":
self.log_debug("Found Flame main window (%s)" % w.windowTitle())
return w
@property
def python_executable(self):
"""
Returns the python executable associated with this engine
:returns: path to python, e.g. '/usr/discreet/python/2016.0.0.322/bin/python'
"""
if self._python_executable_path is None:
raise TankError("Python executable has not been defined for this engine instance!")
return self._python_executable_path
@property
def preset_version(self):
"""
Returns the preset version required for the currently executing
version of Flame. Preset xml files in Flame all have a version number
to denote which generation of the file format they implement. If you are using
an old preset with a new version of Flame, a warning message appears.
:returns: Preset version, as string, e.g. '5'
"""
if self._flame_version is None:
raise TankError("Cannot determine preset version - No Flame DCC version specified!")
if self.is_version_less_than("2016.1"):
# for version 2016 before ext 1, export preset is v5
return "5"
elif self.is_version_less_than("2017"):
# flame 2016 extension 1 and above.
return "6"
else:
# flame 2017 and above
#
# Note: Flame 2017 uses preset 7, however further adjustments to the actual
# preset format used is required in individual apps - for the time being,
# the preset version is held at v6, ensuring that app apps operate correctly,
# but generating a warning message at startup.
#
return "7"
@property
def export_presets_root(self):
"""
The location where flame export presets are located
:returns: Path as string
"""
# If possible use the Flame python API to get the presets location
try:
import flame
if 'PyExporter' in dir(flame):
return flame.PyExporter.get_presets_base_dir(
flame.PyExporter.PresetVisibility.Shotgun)
except:
pass
if self.is_version_less_than("2017"):
# flame 2016 presets structure
return os.path.join(
self.install_root,
"presets",
self.flame_version,
"export",
"presets"
)
else:
# flame 2017+ presets structure (note the extra flame folder)
return os.path.join(
self.install_root,
"presets",
self.flame_version,
"export",
"presets",
"flame"
)
@staticmethod
def _get_full_preset_path(preset_path, preset_type):
"""
Convert a path to a preset that can be incomplete to an absolute path.
:param preset_path: Path to a preset to find.
:param preset_type: Type of preset to look for.
:returns: Absolute path to the preset.
"""
if not os.path.isabs(preset_path):
import flame
presets_dir = flame.PyExporter.get_presets_dir(
flame.PyExporter.PresetVisibility.Shotgun,
preset_type
)
preset_path = os.path.join(
presets_dir,
preset_path
)
return preset_path
@property
def thumbnails_preset_path(self):
"""
The location of the flame export preset to use to generate thumbnails.
:returns: Path as string
"""
import flame
return self._get_full_preset_path(
self.get_setting("thumbnails_preset_path"),
flame.PyExporter.PresetType.Image_Sequence
)
@property
def previews_preset_path(self):
"""
The location of the flame export preset to use to generate previews.
:returns: Path as string
"""
import flame
return self._get_full_preset_path(
self.get_setting("previews_preset_path"),
flame.PyExporter.PresetType.Movie
)
@property
def local_movies_preset_path(self):
"""
The location of the flame export preset to use to generate local movies.
Local movies are linked to assets in Shotgun thru the "Path to Movie"
field but are not uploaded on the server.
:returns: Path as string
"""
import flame
return self._get_full_preset_path(
self.get_setting("local_movies_preset_path"),
flame.PyExporter.PresetType.Movie
)
@property
def wiretap_tools_root(self):
"""
The location of wiretap tool
:returns: Path as string
"""
return os.path.join(
self.install_root,
"wiretap",
"tools",
self.flame_version
)
def _is_version_less_than(self, major, minor, patch):
"""
Compares the given version numbers with the current
flame version and returns False if the given version is
greater than the current version.
Example:
- Flame: '2016.1.0.278', version str: '2016.1' => False
- Flame: '2016', version str: '2016.1' => True
:param version_str: Version to run comparison against
"""
if int(self.flame_major_version) != int(major):
return int(self.flame_major_version) < int(major)
if int(self.flame_minor_version) != int(minor):
return int(self.flame_minor_version) < int(minor)
if int(self.flame_patch_version) != int(patch):
return int(self.flame_patch_version) < int(patch)
# Same version
return False
def is_version_less_than(self, version_str):
"""
Compares the given version string with the current
flame version and returns False if the given version is
greater than the current version.
Example:
- Flame: '2016.1.0.278', version str: '2016.1' => False
- Flame: '2016', version str: '2016.1' => True
:param version_str: Version to run comparison against
"""
major_ver = 0
minor_ver = 0
patch_ver = 0
chunks = version_str.split(".")
if len(chunks) > 0:
if chunks[0].isdigit():
major_ver = int(chunks[0])
if len(chunks) > 1:
if chunks[1].isdigit():
minor_ver = int(chunks[1])
if len(chunks) > 2:
if chunks[2].isdigit():
patch_ver = int(chunks[2])
return self._is_version_less_than(major_ver, minor_ver, patch_ver)
@property
def flame_major_version(self):
"""
Returns Flame's major version number as a string.
:returns: String (e.g. '2016')
"""
if self._flame_version is None:
raise TankError("No Flame DCC version specified!")
return self._flame_version["major"]
@property
def flame_minor_version(self):
"""
Returns Flame's minor version number as a string.
:returns: String (e.g. '2')
"""
if self._flame_version is None:
raise TankError("No Flame DCC version specified!")
return self._flame_version["minor"]
@property
def flame_patch_version(self):
"""
Returns Flame's patch version number as a string.
:returns: String (e.g. '2')
"""
if self._flame_version is None:
raise TankError("No Flame DCC version specified!")
return self._flame_version["patch"]
@property
def flame_version(self):
"""
Returns Flame's full version number as a string.
:returns: String (e.g. '2016.1.0.278')
"""
if self._flame_version is None:
raise TankError("No Flame DCC version specified!")
return self._flame_version["full"]
@property
def install_root(self):
"""
The location where flame is installed.
This may be '/usr/discreet', '/opt/Autodesk' etc.
:returns: Path as string
"""
return self._install_root
@property
def has_ui(self):
"""
Property to determine if the current environment has access to a UI or not
"""
# check if there is a UI. With Flame, we may run the engine in bootstrap
# mode or on the farm - in this case, there is no access to UI. If inside the
# DCC UI environment, pyside support is available.
has_ui = False
try:
from sgtk.platform.qt import QtCore, QtGui
if QtCore.QCoreApplication.instance():
# there is an active application
has_ui = True
except:
pass
return has_ui
def show_panel(self, panel_id, title, bundle, widget_class, *args, **kwargs):
"""
Override the base show_panel to create a non-modal dialog that will stay on
top of the Flame interface
"""
if not self.has_ui:
self.log_error("Sorry, this environment does not support UI display! Cannot show "
"the requested panel '%s'." % title)
return None
from sgtk.platform.qt import QtCore, QtGui
# create the dialog:
dialog, widget = self._create_dialog_with_widget(title, bundle, widget_class, *args, **kwargs)
dialog.setWindowFlags(
dialog.windowFlags() |
QtCore.Qt.WindowStaysOnTopHint &
~QtCore.Qt.WindowCloseButtonHint
)
self.created_qt_dialogs.append(dialog)
# show the dialog
dialog.show()
# lastly, return the instantiated widget
return widget
def _get_dialog_parent(self):
"""
Get the QWidget parent for all dialogs created through :meth:`show_dialog` :meth:`show_modal`.
Can be overriden in derived classes to return the QWidget to be used as the parent
for all TankQDialog's.
:return: QT Parent window (:class:`PySide.QtGui.QWidget`)
"""
from sgtk.platform.qt import QtCore, QtGui
w = self.flame_main_window
return w if w else super(FlameEngine, self)._get_dialog_parent()
def show_dialog(self, title, bundle, widget_class, *args, **kwargs):
"""
Shows a non-modal dialog window in a way suitable for this engine.
The engine will attempt to parent the dialog nicely to the host application.
The dialog will be created with a standard Toolkit window title bar where
the title will be displayed.
.. note:: In some cases, it is necessary to hide the standard Toolkit title
bar. You can do this by adding a property to the widget class you are
displaying::
@property
def hide_tk_title_bar(self):
"Tell the system to not show the standard toolkit toolbar"
return True
**Notes for engine developers**
Qt dialog & widget management can be quite tricky in different engines/applications.
Because of this, Sgtk provides a few overridable methods with the idea being that when
developing a new engine, you only need to override the minimum amount necessary.
Making use of these methods in the correct way allows the base Engine class to manage the
lifetime of the dialogs and widgets efficiently and safely without you having to worry about it.
The methods available are listed here in the hierarchy in which they are called::
show_dialog()/show_modal()
_create_dialog_with_widget()
_get_dialog_parent()
_create_widget()
_create_dialog()
For example, if you just need to make sure that all dialogs use a specific parent widget
then you only need to override _get_dialog_parent() (e.g. the tk-maya engine).
However, if you need to implement a two-stage creation then you may need to re-implement
show_dialog() and show_modal() to call _create_widget() and _create_dialog() directly rather
than using the helper method _create_dialog_with_widget() (e.g. the tk-3dsmax engine).
Finally, if the application you are writing an engine for is Qt based then you may not need
to override any of these methods (e.g. the tk-nuke engine).
:param title: The title of the window. This will appear in the Toolkit title bar.
:param bundle: The app, engine or framework object that is associated with this window
:param widget_class: The class of the UI to be constructed. This must derive from QWidget.
:type widget_class: :class:`PySide.QtGui.QWidget`
Additional parameters specified will be passed through to the widget_class constructor.
:returns: the created widget_class instance
"""
if not self.has_ui:
self.log_error("Sorry, this environment does not support UI display! Cannot show "
"the requested window '%s'." % title)
return None
from sgtk.platform.qt import QtGui, QtCore
# create the dialog:
dialog, widget = self._create_dialog_with_widget(title, bundle, widget_class, *args, **kwargs)
dialog.setWindowFlags(
dialog.windowFlags() |
QtCore.Qt.WindowStaysOnTopHint &
~QtCore.Qt.WindowCloseButtonHint
)
self.created_qt_dialogs.append(dialog)
# show the dialog
dialog.show()
# lastly, return the instantiated widget
return widget
def close_windows(self):
"""
Closes the various windows (dialogs, panels, etc.) opened by the engine.
"""
# Make a copy of the list of Tank dialogs that have been created by the engine and
# are still opened since the original list will be updated when each dialog is closed.
opened_dialog_list = self.created_qt_dialogs[:]
# Loop through the list of opened Tank dialogs.
for dialog in opened_dialog_list:
dialog_window_title = dialog.windowTitle()
try:
# Close the dialog and let its close callback remove it from the original dialog list.
self.log_debug("Closing dialog %s." % dialog_window_title)
dialog.close()
except Exception, exception:
self.log_error("Cannot close dialog %s: %s" % (dialog_window_title, exception))
def log_debug(self, msg):
"""
Log a debug message
:param msg: The debug message to log
"""
logging.getLogger(LOG_CHANNEL).debug(msg)
def log_info(self, msg):
"""
Log some info
:param msg: The info message to log
"""
logging.getLogger(LOG_CHANNEL).info(msg)
def log_warning(self, msg):
"""
Log a warning
:param msg: The warning message to log
"""
logging.getLogger(LOG_CHANNEL).warning(msg)
def log_error(self, msg):
"""
Log an error
:param msg: The error message to log
"""
logging.getLogger(LOG_CHANNEL).error(msg)
################################################################################################################
# Engine Bootstrap
#
def pre_dcc_launch_phase(self):
"""
Special bootstrap method used to set up the Flame environment.
This is designed to execute before Flame has launched, as part of the
bootstrapping process.
This method assumes that it is being executed inside a Flame python
and is called from the app_launcher script which ensures such an environment.
The bootstrapper will first import the wiretap API and setup other settings.
It then attempts to execute the pre-DCC project creation process, utilizing
both wiretap and QT (setup project UI) for this.
Finally, it will return the command line args to pass to Flame as it is being
launched.
:returns: arguments to pass to the app launch process
"""
if self.get_setting("debug_logging"):
# enable Flame hooks debug
os.environ["DL_DEBUG_PYTHON_HOOKS"] = "1"
# see if we can launch into batch mode. We only do this when in a
# shot context and if there is a published batch file in Shotgun
#
# For now, hard code the logic of how to detect which batch file to load up.
# TODO: in the future, we may want to expose this in a hook - but it is arguably
# pretty advanced customization :)
#
# Current logic: Find the latest batch publish belonging to the context
if self.context.entity:
# we have a current context to lock on to!
# try to see if we can find the latest batch publish
publish_type = sgtk.util.get_published_file_entity_type(self.sgtk)
if publish_type == "PublishedFile":
type_link_field = "published_file_type.PublishedFileType.code"
else:
type_link_field = "tank_type.TankType.code"
sg_data = self.shotgun.find_one(publish_type,
[[type_link_field, "is", self.get_setting("flame_batch_publish_type")],
["entity", "is", self.context.entity]],
["path"],
order=[{"field_name": "created_at", "direction": "desc"}])
if sg_data:
# we have a batch file published for this context!
batch_file_path = sg_data["path"]["local_path"]
if os.path.exists(batch_file_path):
self.log_debug("Setting auto startup file '%s'" % batch_file_path)
os.environ["DL_BATCH_START_WITH_SETUP"] = batch_file_path
# add Flame hooks for this engine
flame_hooks_folder = os.path.join(self.disk_location, self.FLAME_HOOKS_FOLDER)
sgtk.util.append_path_to_env_var("DL_PYTHON_HOOK_PATH", flame_hooks_folder)
self.log_debug("Added to hook path: %s" % flame_hooks_folder)
# now that we have a wiretap library, call out and initialize the project
# automatically
tk_flame = self.import_module("tk_flame")
wiretap_handler = tk_flame.WiretapHandler()
try:
app_args = wiretap_handler.prepare_and_load_project()
finally:
wiretap_handler.close()
return app_args
def _define_qt_base(self):
"""
Define QT behaviour. Subclassed from base class.
"""
if self._engine_mode in (self.ENGINE_MODE_DCC, self.ENGINE_MODE_BACKBURNER):
# We are running the engine inside of the Flame Application.
# alternatively, we are running the engine in backburner
#
# in both these states, no special QT init is necessary.
# Defer to default implementation which looks for pyside and
# gracefully fails in case that isn't found.
self.log_debug("Initializing default PySide for in-DCC / backburner use")
return super(FlameEngine, self)._define_qt_base()
else:
# we are running the engine outside of Flame.
# This is special - no QApplication is running at this point -
# a state akin to running apps inside the shell engine.
# We assume that in pre-launch mode, PySide is available since
# we are running within the Flame python.
from sgtk.platform import qt
from sgtk.util.qt_importer import QtImporter
importer = QtImporter()
QtCore = importer.QtCore
QtGui = importer.QtGui
# a simple dialog proxy that pushes the window forward
class ProxyDialogPySide(QtGui.QDialog):
def show(self):
QtGui.QDialog.show(self)
self.activateWindow()
self.raise_()
def exec_(self):
self.activateWindow()
self.raise_()
# the trick of activating + raising does not seem to be enough for
# modal dialogs. So force put them on top as well.
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint | self.windowFlags())
return QtGui.QDialog.exec_(self)
base = {}
base["qt_core"] = QtCore
base["qt_gui"] = QtGui
base["dialog_base"] = ProxyDialogPySide
return base
def cache_export_asset(self, asset_info):
"""
Cache the export asset into the engine cache.
:param asset_info: Information dictionary of the asset.
See sg_export_hook.postExportAsset for details on the dictionary content.
"""
# extract asset information
sequence_name = asset_info.get("sequenceName")
shot_name = asset_info.get("shotName")
asset_type = asset_info.get("assetType")
asset_name = asset_info.get("assetName")
# reinitialize the export cache if the format doesn't fit the current asset
if not isinstance(self._export_info, dict):
self._export_info = {}
if sequence_name not in self._export_info:
self._export_info[sequence_name] = {shot_name: {asset_type: {asset_name: [asset_info]}}}
elif shot_name not in self._export_info[sequence_name]:
self._export_info[sequence_name][shot_name] = {asset_type: {asset_name: [asset_info]}}
elif asset_type not in self._export_info[sequence_name][shot_name]:
self._export_info[sequence_name][shot_name][asset_type] = {asset_name: [asset_info]}
elif asset_name not in self._export_info[sequence_name][shot_name][asset_type]:
self._export_info[sequence_name][shot_name][asset_type][asset_name] = [asset_info]
else:
self._export_info[sequence_name][shot_name][asset_type][asset_name].append(asset_info)
def cache_batch_export_asset(self, info):
"""
Cache the batch export asset into the engine cache.
:param info: Information dictionary of the asset
See sg_batch_hook.batchExportEnd for details on the dictionary content.
"""
if not isinstance(self._export_info, list):
self._export_info = []
self._export_info.append(info)
################################################################################################################
# export callbacks handling
#
# Any apps which are interested in registering custom exporters with Flame should use the methods
# below. The register_export_hook() is called by apps in order to create a menu entry
# on the Flame export menu. The remaining methods are used to call out from the actual Flame hook
# to the relevant app code.
#
def register_export_hook(self, menu_caption, callbacks):
"""
Allows an app to register an interest in one of the Flame export hooks.
This is one of the interaction entry points in the system and this is how apps
typically have their business logic executed. At app init, an app typically
calls this method with a syntax like this:
# set up callback map
callbacks = {}
callbacks["preCustomExport"] = self.pre_custom_export
callbacks["preExportAsset"] = self.adjust_path
callbacks["postExportAsset"] = self.register_post_asset_job
# register with the engine
self.engine.register_export_hook("Menu Caption", callbacks)
The engine will keep track of things automatically, and whenever the user
clicks the "Menu Caption" entry on the menu, the corresponding chain of callbacks
will be called.
All methods should have the following method signature:
def export_callback(self, session_id, info)
Where session_id is a unique session identifier (typically only used in advanced scenarios)
and info reflects the info parameter passed from Flame (varies for different callbacks).
For information which export can currently be registered against, see the
flame_hooks/exportHook.py file.
:param menu_caption: Text to appear on the Flame export menu
:param callbacks: Dictionary of callbacks, see above for details.
"""
if menu_caption in self._registered_export_instances:
raise TankError("There is already a menu export preset named '%s'! "
"Please ensure your preset names are unique" % menu_caption)
self.log_debug("Registered export preset '%s' with engine." % menu_caption)
self._registered_export_instances[menu_caption] = callbacks
def get_export_presets(self):
"""
Internal engine method. Do not use outside of the engine.
Returns all export presets registered by apps.
:returns: List of preset titles
"""
return self._registered_export_instances.keys()
def create_export_session(self, preset_name):
"""
Internal engine method. Do not use outside of the engine.
Start a new export session.
Creates a session object which represents a single export session in Flame.
:param preset_name: The name of the preset which should be executed.
:returns: session id string which is later passed into various methods
"""
if preset_name not in self._registered_export_instances:
raise TankError("The export preset '%s' is not registered with the current engine. "
"Current presets are: %s" % (preset_name, self._registered_export_instances.keys()))
session_id = "tk_%s" % uuid.uuid4().hex
# set up an export session
self._export_sessions[session_id] = preset_name
return session_id
def trigger_export_callback(self, callback_name, session_id, info):
"""
Internal engine method. Do not use outside of the engine.
Dispatch method called from the various Flame hooks.
This method will ensure that the Flame callbacks will be
dispatched to the appropriate registered app callbacks.
:param callback_name: Name of the Flame callback method
:param session_id: Unique session identifier
:param info: Metadata dictionary from Flame
"""
self.log_debug("Flame engine export callback dispatch for %s" % callback_name)
self.log_debug("Info parameters passed from Flame: %s" % pprint.pformat(info))
if session_id not in self._export_sessions:
self.log_debug("Ignoring request for unknown session %s..." % session_id)
return
# get the preset
preset_name = self._export_sessions[session_id]
tk_callbacks = self._registered_export_instances[preset_name]
# call the callback in the preset
if callback_name in tk_callbacks:
# the app has registered interest in this!
self.log_debug("Executing callback %s" % tk_callbacks[callback_name])
tk_callbacks[callback_name](session_id, info)
@property
def export_info(self):
"""
:return: Flame export cache
"""
return self._export_info
def clear_export_info(self):
"""
Clear the Flame export cache
"""
self._export_info = None
################################################################################################################
# batch callbacks handling
#
# Any apps which are interested in register custom batch exporters with Flame should use the methods
# below. The register_batch_hook() is called by apps in order to register an interest in pre and post
# export callbacks when in batch mode. The Flame engine will ensure that the app's callbacks will get
# called at the right time.
#
def register_batch_hook(self, callbacks):
"""
Allows an app to register an interest in one of the Flame batch hooks.
This one of the interaction entry points in the system and this is how apps
typically have their business logic executed. At app init, an app typically
calls this method with a syntax like this:
# set up callback map
callbacks = {}
callbacks["batchExportBegin"] = self.before_export
callbacks["batchExportEnd"] = self.after_export
# register with the engine
self.engine.register_batch_hook(callbacks)
The engine will keep track of things automatically, and whenever a batch render executes,
the corresponding chain of callbacks will be called.
All methods should have the following method signature:
def export_callback(self, info)
For information which export can currently be registered against, see the
flame_hooks/batchHook.py file.
:param callbacks: Dictionary of callbacks, see above for details.
"""
self.log_debug("Registered batch callbacks with engine: %s" % callbacks)
self._registered_batch_instances.append(callbacks)
def trigger_batch_callback(self, callback_name, info):
"""
Internal engine method. Do not use outside of the engine.
Dispatch method called from the various Flame hooks.
This method will ensure that the Flame callbacks will be
dispatched to the appropriate registered app callbacks.
:param callback_name: Name of the Flame callback method
:param session_id: Unique session identifier
:param info: Metadata dictionary from Flame
"""
self.log_debug("Flame engine batch callback dispatch for %s" % callback_name)
self.log_debug("Info parameters passed from Flame: %s" % pprint.pformat(info))
# dispatch to all callbacks
for registered_batch_instance in self._registered_batch_instances:
self.log_debug("Checking %s" % registered_batch_instance)
if callback_name in registered_batch_instance:
# the app has registered interest in this!
self.log_debug("Executing callback %s" % registered_batch_instance[callback_name])
registered_batch_instance[callback_name](info)
################################################################################################################
# backburner integration
#
def get_server_hostname(self):
"""
Return the hostname for the server which hosts this Flame setup.
This is an accessor into the engine hook settings, allowing apps
to query which host the closest Flame server is running on.
:returns: hostname string
"""
return self.execute_hook_method("project_startup_hook", "get_server_hostname")
def get_backburner_tmp(self):
"""
Return a location on disk, guaranteed to exist
where temporary data can be put in such a way that
it will be accessible for all backburner jobs, regardless of
which host they execute on.
:returns: path
"""
return self.get_setting("backburner_shared_tmp")
@property
def _flame_exporter_supported(self):
"""
:return True if Flame exporter API is supported.
"""
# Note. Flame exporter can be used in 2019.1 but there are issues
# with transcoding of Movie files that prevent wide use of it
# with 2019.1.
#
return not self.is_version_less_than("2019.2")
@property
def transcoder(self):
"""
:return transcoder: Transcoder to use to trancode a clip from
one format to another.
"""
if self._transcoder is not None:
return self._transcoder
tk_flame = self.import_module("tk_flame")
if self._flame_exporter_supported:
self._transcoder = tk_flame.Transcoder(
engine=self
)
else:
raise Exception("Transcoder not supported")
return self._transcoder
@property
def thumbnail_generator(self):
"""
:return thumbnail_generator: Thumbnail generator to use to generate
thumbnail from Flame's asset published or rendered.
"""
if self._thumbnail_generator is not None:
return self._thumbnail_generator
tk_flame = self.import_module("tk_flame")
if self._flame_exporter_supported:
self._thumbnail_generator = tk_flame.ThumbnailGeneratorFlame(
engine=self
)
else:
self._thumbnail_generator = tk_flame.ThumbnailGeneratorFFmpeg(
engine=self
)
return self._thumbnail_generator
@property
def local_movie_generator(self):
"""
:return local_movie_generator: Local movie generator to use to generate
local movie file from Flame's asset published or rendered.
"""
if self._local_movie_generator is not None:
return self._local_movie_generator
tk_flame = self.import_module("tk_flame")
if self._flame_exporter_supported:
self._thumbnail_generator = tk_flame.LocalMovieGeneratorFlame(
engine=self
)
else:
self._thumbnail_generator = tk_flame.LocalMovieGeneratorFFmpeg(
engine=self
)
return self._thumbnail_generator
def create_local_backburner_job(self, job_name, description, dependencies,
instance, method_name, args, backburner_server_host=None):
"""
Run a method in the local backburner queue.
:param job_name: Name of the backburner job
:param description: Description of the backburner job
:param dependencies: None if the backburner job should execute arbitrarily. If you
want to set the job up so that it executes after another known task, pass
the backburner id or a list of ids here. This is typically used in conjunction with a postExportAsset
hook where the export task runs on backburner. In this case, the hook will return
the backburner id. By passing that id into this method, you can create a job which
only executes after the main export task has completed.
:param instance: App or hook to remotely call up
:param method_name: Name of method to remotely execute
:param args: dictionary or args (**argv style) to pass to method at remote execution
:param backburner_server_host: Name of the backburner server host.
:return backburner_job_id: Id of the backburner job created
"""
# the backburner executable
backburner_job_cmd = os.path.join(self._install_root, "backburner", "cmdjob")
# pass some args - most importantly tell it to run on the local host
# looks like : chars are not valid so replace those
backburner_args = []
# run as current user, not as root
backburner_args.append("-userRights")
# attach the executable to the backburner job
backburner_args.append("-attach")
# increase the max task length to 600 minutes
backburner_args.append("-timeout:600")
# add basic job info
# backburner does not do any kind of sanitaion itself, so ensure that job
# info doesn't contain any strange characters etc
# remove any non-trivial characters
sanitized_job_name = re.sub(r"[^0-9a-zA-Z_\-,\. ]+", "_", job_name)
sanitized_job_desc = re.sub(r"[^0-9a-zA-Z_\-,\. ]+", "_", description)
# if the job name contains too many characters, backburner submission fails
if len(sanitized_job_name) > 70:
sanitized_job_name = "%s..." % sanitized_job_name[:67]
if len(sanitized_job_desc) > 70:
sanitized_job_desc = "%s..." % sanitized_job_desc[:67]
# there is a convention in flame to append a time stamp to jobs
# e.g. 'Export - XXX_YYY_ZZZ (10.02.04)
sanitized_job_name += datetime.datetime.now().strftime(" (%H.%M.%S)")
backburner_args.append("-jobName:\"%s\"" % sanitized_job_name)
backburner_args.append("-description:\"%s\"" % sanitized_job_desc)
# Specifying a remote backburner manager is only supported on 2016.1 and above
if not self.is_version_less_than("2016.1"):
bb_manager = self.get_setting("backburner_manager")
if not bb_manager and not self.is_version_less_than("2018"):
# No backburner manager speficied in settings. Ask local backburnerServer
# which manager to choose from. (They might be none running locally)
# Before 2018, you needed root privileges to execute this command.
backburner_server_cmd = os.path.join(self._install_root, "backburner", "backburnerServer")
bb_manager = subprocess.check_output([backburner_server_cmd, "-q", "MANAGER"])
bb_manager = bb_manager.strip("\n")
if bb_manager:
backburner_args.append("-manager:\"%s\"" % bb_manager)
# Set the server group to the backburner job
bb_server_group = self.get_setting("backburner_server_group")
if bb_server_group:
backburner_args.append("-group:\"%s\"" % bb_server_group)
# Specify the backburner server if provided
if backburner_server_host:
backburner_args.append("-servers:\"%s\"" % backburner_server_host)
# Otherwise, fallback to the global backburner servers setting
else:
bb_servers = self.get_setting("backburner_servers")
if bb_servers:
backburner_args.append("-servers:\"%s\"" % bb_servers)
# Set the backburner job dependencies
if dependencies:
if isinstance(dependencies, list):
backburner_args.append("-dependencies:%s" % ",".join(dependencies))
else:
backburner_args.append("-dependencies:%s" % dependencies)
# call the bootstrap script
backburner_bootstrap = os.path.join(self.disk_location, "python", "startup", "backburner.py")
# now we need to capture all of the environment and everything in a file
# (thanks backburner!) so that we can replay it later when the task wakes up
session_file = os.path.join(self.get_backburner_tmp(), "tk_backburner_%s.pickle" % uuid.uuid4().hex)
data = {}
data["engine_instance"] = self.instance_name
data["serialized_context"] = sgtk.context.serialize(self.context)
data["instance"] = instance if isinstance(instance, str) else instance.instance_name
data["method_to_execute"] = method_name
data["args"] = args
data["sgtk_core_location"] = os.path.dirname(sgtk.__path__[0])
data["flame_version"] = self._flame_version
data["user_home"] = os.path.expanduser("~")
fh = open(session_file, "wb")
pickle.dump(data, fh)
fh.close()
full_cmd = "%s %s %s %s" % (backburner_job_cmd, " ".join(backburner_args), backburner_bootstrap, session_file)
# On old Flame version, python hooks are running root. We need to run the command as the effective user to
# ensure that backburner is running the job as the user who's using the Software to avoir permissions issues.
if os.getuid() == 0: # root
# Getting the user name of the user who started Flame (the effective user)
e_user = pwd.getpwuid(os.geteuid()).pw_name
# Run the command as the effective user
full_cmd = "sudo -u %s %s" % (e_user, full_cmd)
self.log_debug("Running root but will send the job as [%s]" % e_user)
try:
# Make sure that the session is not expired
sgtk.get_authenticated_user().refresh_credentials()
except sgtk.authentication.AuthenticationCancelled:
self.log_debug("User cancelled auth. No backburner job will be created.")
else:
self.log_debug("Starting backburner job '%s'" % job_name)
self.log_debug("Command line: %s" % full_cmd)
self.log_debug("App: %s" % instance)
self.log_debug("Method: %s with args %s" % (method_name, args))
# kick it off
backburner_job_submission = subprocess.Popen([full_cmd], stdout=subprocess.PIPE, shell=True)
stdout, stderr = backburner_job_submission.communicate()
self.log_debug(stdout)
job_id_regex = re.compile(r"(?<=Successfully submitted job )(\d+)")
match = job_id_regex.search(stdout)
if match:
backburner_job_id = match.group(0)
self.log_debug("Backburner job created (%s)" % backburner_job_id)
return backburner_job_id
else:
error = ["Shotgun backburner job could not be created."]
if stderr:
error += ["Reason: " + stderr]
error += ["See backburner logs for details."]
raise TankError("\n".join(error))
################################################################################################################
# accessors to various core settings and functions
def __get_wiretap_central_binary(self, binary_name):
"""
Try to returns the path to a binary in the Wiretap Central binary collection.
This function is compatible with both new Wiretap Central and the legacy Wiretap Central.
:param binary_name: Name of desired binary
:returns: Absolute path as a string
"""
# Wiretap Central can only be present on MacOS and on Linux
if sys.platform not in ["darwin", "linux2"]:
raise TankError("Your operating system does not support Wiretap Central!")
# Priority have to be given to every ".bin" executable on the Wiretap Central binary folder
wtc_path = self._get_wiretap_central_bin_path()
binary = os.path.join(wtc_path, binary_name + ".bin")
if os.path.exists(binary):
return binary
# If not found, we should look for the same path without the ".bin"
binary = os.path.join(wtc_path, binary_name)
if os.path.exists(binary):
return binary
# If we reach this, we are running a legacy Wiretap Central
wtc_path = self._get_wiretap_central_legacy_bin_path()
binary = os.path.join(wtc_path, binary_name)
if os.path.exists(binary):
return binary
# We don't have any Wiretap Central installed on this workstation
raise TankError("Cannot find binary '%s'!" % binary_name)
def _get_wiretap_central_bin_path(self):
"""
Get the path to the Wiretap Central binaries folder based on the current operating system.
:return: Path to the Wiretap Central binaries folder
"""
if sys.platform == "darwin":
return "/Library/WebServer/Documents/WiretapCentral/cgi-bin"
elif sys.platform == "linux2":
return "/var/www/html/WiretapCentral/cgi-bin"
def _get_wiretap_central_legacy_bin_path(self):
"""
Get the path to the legacy Wiretap Central binaries folder based on the current operating system.
:return: Path to the legacy Wiretap Central binaries folder
"""
if sys.platform == "darwin":
return "/Library/WebServer/CGI-Executables/WiretapCentral"
elif sys.platform == "linux2":
return "/var/www/cgi-bin/WiretapCentral"
def get_ffmpeg_path(self):
"""
Returns the path to the ffmpeg executable that ships with Flame.
:returns: Absolute path as a string
"""
return self.__get_wiretap_central_binary("ffmpeg")
def get_read_frame_path(self):
"""
Returns the path to the read_frame utility that ships with Flame.
:returns: Absolute path as a string
"""
return self.__get_wiretap_central_binary("read_frame")
def sgtk_exception_trap(ex_cls, ex, tb):
"""
UI Popup and logging exception trap override.
This method is used to override the default exception reporting behaviour
inside the embedded Flame python interpreter to make errors more visible
to the user.
It attempts to create a QT messagebox with a formatted error message to
alert the user that something has gong wrong. In addition to this, the
default exception handling is also carried out and the exception is also
written to the log.
Note that this is a global object and not an engine-relative thing, so that
the exception handler will operate correctly even if the engine instance no
longer exists.
"""
# careful about infinite loops here - we mustn't raise exceptions.
# like in other environments and scripts, for TankErrors, we assume that the
# error message is already a nice descriptive, crafted message and try to present
# this in a user friendly fashion
#
# for other exception types, we give a full call stack.
error_message = "Critical: Could not format error message."
try:
traceback_str = "\n".join(traceback.format_tb(tb))
if ex_cls == TankError:
# for TankErrors, we don't show the whole stack trace
error_message = "A Shotgun error was reported:\n\n%s" % ex
else:
error_message = "A Shotgun error was reported:\n\n%s (%s)\n\nTraceback:\n%s" % (ex, ex_cls, traceback_str)
except:
pass
# now try to output it
try:
from sgtk.platform.qt import QtGui, QtCore
if QtCore.QCoreApplication.instance():
# there is an application running - so pop up a message!
QtGui.QMessageBox.critical(None, "Shotgun General Error", error_message)
except:
pass
# and try to log it
try:
error_message = "An exception was raised:\n\n%s (%s)\n\nTraceback:\n%s" % (ex, ex_cls, traceback_str)
logging.getLogger(LOG_CHANNEL).error(error_message)
except:
pass
# in addition to the ui popup, also defer to the default mechanism
sys.__excepthook__(type, ex, tb)
|
en
| 0.850486
|
# Copyright (c) 2014 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. A Toolkit engine for Flame The engine class. This wraps around a series of callbacks in Flame (so called hooks). The Flame engine is a bit different than other engines. Because Flame doesn't have an API, we cannot call Flame, but Flame will call out to the toolkit code. This means that the normal register_command approach won't work inside of Flame - instead, the engine introduces a different scheme of callbacks that apps can register to ensure that they cen do stuff. For apps, the main entry points are register_export_hook and register_batch_hook. For more information, see below. # the name of the folder in the engine which we should register # with Flame to trigger various hooks to run. # our default log file to write to # a 'plan B' safe log file that we call fall back on in case # the default log file cannot be accessed # define constants for the various modes the engine can execute in :returns: A dictionary with information about the application hosting this engine. The returned dictionary is of the following form on success: { "name": "Flame", "version": "2018.3.pr84", } The returned dictionary is of following form on an error preventing the version identification. { "name": "Flame", "version": "unknown" } # The 'SHOTGUN_FLAME_VERSION' environment variable comes from Flame plugin # The 'TOOLKIT_FLAME_VERSION' environment variable comes from Flame classic config # Fallback to initialization value above Overridden constructor where we init some things which need to be defined very early on in the engine startup. # to support use cases where the flame engine isn't started via # the multi-launchapp chain, make sure that hooks that the engine # implements are registered. # the path to the associated python executable # version of Flame we are running # root folder where flame is installed # set the current engine mode. The mode contains information about # how the engine was started - it can be executed either before the # actual DCC starts up (pre-launch), in the DCC itself or on the # backburner farm. This means that there are three distinct bootstrap # scripts which can launch the engine (all contained within the engine itself). # these bootstrap scripts all set an environment variable called # TOOLKIT_FLAME_ENGINE_MODE which defines the desired engine mode. # Transcoder, thumbnail generator and local movie generator will be # initialized on first request for them since, in order to know which # type we will need, we need to wait for the Flame API to be loaded # completely. # Engine construction/setup done before any apps are initialized # set up a custom exception trap for the engine. # it will log the exception and if possible also # display it in a UI # now start the proper init # maintain a list of export options # maintain the export cache # tell QT to interpret C strings as utf-8 # Assuming we're in a new enough version of Flame (2018.3+) we'll # be able to link the Flame project to our SG project. This will # ensure that is a use launches Flame's plugin-based Shotgun # integration that they will be bootstrapped into the correct # project and won't be prompted to choose an SG project to link to. # # NOTE: We only take the initiative here and create the project # link if this is a classic config launch of Flame. One quick way # to knwo that is to just refer to the environment, where we know # that the classic startup script sets some variables. Set up logging for the engine :param install_root: path to flame install root # standard flame log file # test if we can write to the default log file # cannot rotate file in this directory, write to tmp instead. # Set up a rotating logger with 4MiB max file size # Always rotate. Current user might not have the correct permission to open this file # Will open file after roll over # create a global logging object # clear any existing handlers # now that we have a logger, we can warn about a non-std log file :) Specifies the path to the associated python process. This is typically populated as part of the engine startup. :param python_path: path to python, as string Specifies which version of Flame this engine is running. This is typically populated as part of the engine startup. :param major_version_str: Major version number as string :param minor_version_str: Minor version number as string :param patch_version_str: Patch version number as string :param full_version_str: Full version number as string Specifies where the flame installation is located. this may be '/usr/discreet', '/opt/Autodesk' etc. :param install_root: root path to flame installation # cannot call this multiple times This expects a list of dictionaries in the form: {name: "command-name", app_instance: "instance-name", display_name: "Display Name" } The app_instance value will match a particular app instance associated with the engine. The name is the menu name of the command to run when the engine starts up. The display_name is the menu display name of the command to run. If name is '' then all commands from the given app instance are returned. If display_name is not present, name will be used instead. :returns A list of tuples for all commands that match the given setting. Each tuple will be in the form (instance_name, display_name, command_name, callback) # return a dictionary grouping all the commands by instance name # A command without an app instance in the context menu is actually coming from the engine, so we'll # use the engine name instead. # go through the values from the setting and return any matching commands # add the command if the name from the settings is '' or the name matches Do any initialization after apps have been loaded # only run the startup commands when in DCC mode # run any commands registered via run_at_startup Called when the engine is being destroyed # Remove the current engine python hooks from the flame python hooks path # Close every app windows Returns the Flame's main window :return: Widget representing the flame's main window. Returns the python executable associated with this engine :returns: path to python, e.g. '/usr/discreet/python/2016.0.0.322/bin/python' Returns the preset version required for the currently executing version of Flame. Preset xml files in Flame all have a version number to denote which generation of the file format they implement. If you are using an old preset with a new version of Flame, a warning message appears. :returns: Preset version, as string, e.g. '5' # for version 2016 before ext 1, export preset is v5 # flame 2016 extension 1 and above. # flame 2017 and above # # Note: Flame 2017 uses preset 7, however further adjustments to the actual # preset format used is required in individual apps - for the time being, # the preset version is held at v6, ensuring that app apps operate correctly, # but generating a warning message at startup. # The location where flame export presets are located :returns: Path as string # If possible use the Flame python API to get the presets location # flame 2016 presets structure # flame 2017+ presets structure (note the extra flame folder) Convert a path to a preset that can be incomplete to an absolute path. :param preset_path: Path to a preset to find. :param preset_type: Type of preset to look for. :returns: Absolute path to the preset. The location of the flame export preset to use to generate thumbnails. :returns: Path as string The location of the flame export preset to use to generate previews. :returns: Path as string The location of the flame export preset to use to generate local movies. Local movies are linked to assets in Shotgun thru the "Path to Movie" field but are not uploaded on the server. :returns: Path as string The location of wiretap tool :returns: Path as string Compares the given version numbers with the current flame version and returns False if the given version is greater than the current version. Example: - Flame: '2016.1.0.278', version str: '2016.1' => False - Flame: '2016', version str: '2016.1' => True :param version_str: Version to run comparison against # Same version Compares the given version string with the current flame version and returns False if the given version is greater than the current version. Example: - Flame: '2016.1.0.278', version str: '2016.1' => False - Flame: '2016', version str: '2016.1' => True :param version_str: Version to run comparison against Returns Flame's major version number as a string. :returns: String (e.g. '2016') Returns Flame's minor version number as a string. :returns: String (e.g. '2') Returns Flame's patch version number as a string. :returns: String (e.g. '2') Returns Flame's full version number as a string. :returns: String (e.g. '2016.1.0.278') The location where flame is installed. This may be '/usr/discreet', '/opt/Autodesk' etc. :returns: Path as string Property to determine if the current environment has access to a UI or not # check if there is a UI. With Flame, we may run the engine in bootstrap # mode or on the farm - in this case, there is no access to UI. If inside the # DCC UI environment, pyside support is available. # there is an active application Override the base show_panel to create a non-modal dialog that will stay on top of the Flame interface # create the dialog: # show the dialog # lastly, return the instantiated widget Get the QWidget parent for all dialogs created through :meth:`show_dialog` :meth:`show_modal`. Can be overriden in derived classes to return the QWidget to be used as the parent for all TankQDialog's. :return: QT Parent window (:class:`PySide.QtGui.QWidget`) Shows a non-modal dialog window in a way suitable for this engine. The engine will attempt to parent the dialog nicely to the host application. The dialog will be created with a standard Toolkit window title bar where the title will be displayed. .. note:: In some cases, it is necessary to hide the standard Toolkit title bar. You can do this by adding a property to the widget class you are displaying:: @property def hide_tk_title_bar(self): "Tell the system to not show the standard toolkit toolbar" return True **Notes for engine developers** Qt dialog & widget management can be quite tricky in different engines/applications. Because of this, Sgtk provides a few overridable methods with the idea being that when developing a new engine, you only need to override the minimum amount necessary. Making use of these methods in the correct way allows the base Engine class to manage the lifetime of the dialogs and widgets efficiently and safely without you having to worry about it. The methods available are listed here in the hierarchy in which they are called:: show_dialog()/show_modal() _create_dialog_with_widget() _get_dialog_parent() _create_widget() _create_dialog() For example, if you just need to make sure that all dialogs use a specific parent widget then you only need to override _get_dialog_parent() (e.g. the tk-maya engine). However, if you need to implement a two-stage creation then you may need to re-implement show_dialog() and show_modal() to call _create_widget() and _create_dialog() directly rather than using the helper method _create_dialog_with_widget() (e.g. the tk-3dsmax engine). Finally, if the application you are writing an engine for is Qt based then you may not need to override any of these methods (e.g. the tk-nuke engine). :param title: The title of the window. This will appear in the Toolkit title bar. :param bundle: The app, engine or framework object that is associated with this window :param widget_class: The class of the UI to be constructed. This must derive from QWidget. :type widget_class: :class:`PySide.QtGui.QWidget` Additional parameters specified will be passed through to the widget_class constructor. :returns: the created widget_class instance # create the dialog: # show the dialog # lastly, return the instantiated widget Closes the various windows (dialogs, panels, etc.) opened by the engine. # Make a copy of the list of Tank dialogs that have been created by the engine and # are still opened since the original list will be updated when each dialog is closed. # Loop through the list of opened Tank dialogs. # Close the dialog and let its close callback remove it from the original dialog list. Log a debug message :param msg: The debug message to log Log some info :param msg: The info message to log Log a warning :param msg: The warning message to log Log an error :param msg: The error message to log ################################################################################################################ # Engine Bootstrap # Special bootstrap method used to set up the Flame environment. This is designed to execute before Flame has launched, as part of the bootstrapping process. This method assumes that it is being executed inside a Flame python and is called from the app_launcher script which ensures such an environment. The bootstrapper will first import the wiretap API and setup other settings. It then attempts to execute the pre-DCC project creation process, utilizing both wiretap and QT (setup project UI) for this. Finally, it will return the command line args to pass to Flame as it is being launched. :returns: arguments to pass to the app launch process # enable Flame hooks debug # see if we can launch into batch mode. We only do this when in a # shot context and if there is a published batch file in Shotgun # # For now, hard code the logic of how to detect which batch file to load up. # TODO: in the future, we may want to expose this in a hook - but it is arguably # pretty advanced customization :) # # Current logic: Find the latest batch publish belonging to the context # we have a current context to lock on to! # try to see if we can find the latest batch publish # we have a batch file published for this context! # add Flame hooks for this engine # now that we have a wiretap library, call out and initialize the project # automatically Define QT behaviour. Subclassed from base class. # We are running the engine inside of the Flame Application. # alternatively, we are running the engine in backburner # # in both these states, no special QT init is necessary. # Defer to default implementation which looks for pyside and # gracefully fails in case that isn't found. # we are running the engine outside of Flame. # This is special - no QApplication is running at this point - # a state akin to running apps inside the shell engine. # We assume that in pre-launch mode, PySide is available since # we are running within the Flame python. # a simple dialog proxy that pushes the window forward # the trick of activating + raising does not seem to be enough for # modal dialogs. So force put them on top as well. Cache the export asset into the engine cache. :param asset_info: Information dictionary of the asset. See sg_export_hook.postExportAsset for details on the dictionary content. # extract asset information # reinitialize the export cache if the format doesn't fit the current asset Cache the batch export asset into the engine cache. :param info: Information dictionary of the asset See sg_batch_hook.batchExportEnd for details on the dictionary content. ################################################################################################################ # export callbacks handling # # Any apps which are interested in registering custom exporters with Flame should use the methods # below. The register_export_hook() is called by apps in order to create a menu entry # on the Flame export menu. The remaining methods are used to call out from the actual Flame hook # to the relevant app code. # Allows an app to register an interest in one of the Flame export hooks. This is one of the interaction entry points in the system and this is how apps typically have their business logic executed. At app init, an app typically calls this method with a syntax like this: # set up callback map callbacks = {} callbacks["preCustomExport"] = self.pre_custom_export callbacks["preExportAsset"] = self.adjust_path callbacks["postExportAsset"] = self.register_post_asset_job # register with the engine self.engine.register_export_hook("Menu Caption", callbacks) The engine will keep track of things automatically, and whenever the user clicks the "Menu Caption" entry on the menu, the corresponding chain of callbacks will be called. All methods should have the following method signature: def export_callback(self, session_id, info) Where session_id is a unique session identifier (typically only used in advanced scenarios) and info reflects the info parameter passed from Flame (varies for different callbacks). For information which export can currently be registered against, see the flame_hooks/exportHook.py file. :param menu_caption: Text to appear on the Flame export menu :param callbacks: Dictionary of callbacks, see above for details. Internal engine method. Do not use outside of the engine. Returns all export presets registered by apps. :returns: List of preset titles Internal engine method. Do not use outside of the engine. Start a new export session. Creates a session object which represents a single export session in Flame. :param preset_name: The name of the preset which should be executed. :returns: session id string which is later passed into various methods # set up an export session Internal engine method. Do not use outside of the engine. Dispatch method called from the various Flame hooks. This method will ensure that the Flame callbacks will be dispatched to the appropriate registered app callbacks. :param callback_name: Name of the Flame callback method :param session_id: Unique session identifier :param info: Metadata dictionary from Flame # get the preset # call the callback in the preset # the app has registered interest in this! :return: Flame export cache Clear the Flame export cache ################################################################################################################ # batch callbacks handling # # Any apps which are interested in register custom batch exporters with Flame should use the methods # below. The register_batch_hook() is called by apps in order to register an interest in pre and post # export callbacks when in batch mode. The Flame engine will ensure that the app's callbacks will get # called at the right time. # Allows an app to register an interest in one of the Flame batch hooks. This one of the interaction entry points in the system and this is how apps typically have their business logic executed. At app init, an app typically calls this method with a syntax like this: # set up callback map callbacks = {} callbacks["batchExportBegin"] = self.before_export callbacks["batchExportEnd"] = self.after_export # register with the engine self.engine.register_batch_hook(callbacks) The engine will keep track of things automatically, and whenever a batch render executes, the corresponding chain of callbacks will be called. All methods should have the following method signature: def export_callback(self, info) For information which export can currently be registered against, see the flame_hooks/batchHook.py file. :param callbacks: Dictionary of callbacks, see above for details. Internal engine method. Do not use outside of the engine. Dispatch method called from the various Flame hooks. This method will ensure that the Flame callbacks will be dispatched to the appropriate registered app callbacks. :param callback_name: Name of the Flame callback method :param session_id: Unique session identifier :param info: Metadata dictionary from Flame # dispatch to all callbacks # the app has registered interest in this! ################################################################################################################ # backburner integration # Return the hostname for the server which hosts this Flame setup. This is an accessor into the engine hook settings, allowing apps to query which host the closest Flame server is running on. :returns: hostname string Return a location on disk, guaranteed to exist where temporary data can be put in such a way that it will be accessible for all backburner jobs, regardless of which host they execute on. :returns: path :return True if Flame exporter API is supported. # Note. Flame exporter can be used in 2019.1 but there are issues # with transcoding of Movie files that prevent wide use of it # with 2019.1. # :return transcoder: Transcoder to use to trancode a clip from one format to another. :return thumbnail_generator: Thumbnail generator to use to generate thumbnail from Flame's asset published or rendered. :return local_movie_generator: Local movie generator to use to generate local movie file from Flame's asset published or rendered. Run a method in the local backburner queue. :param job_name: Name of the backburner job :param description: Description of the backburner job :param dependencies: None if the backburner job should execute arbitrarily. If you want to set the job up so that it executes after another known task, pass the backburner id or a list of ids here. This is typically used in conjunction with a postExportAsset hook where the export task runs on backburner. In this case, the hook will return the backburner id. By passing that id into this method, you can create a job which only executes after the main export task has completed. :param instance: App or hook to remotely call up :param method_name: Name of method to remotely execute :param args: dictionary or args (**argv style) to pass to method at remote execution :param backburner_server_host: Name of the backburner server host. :return backburner_job_id: Id of the backburner job created # the backburner executable # pass some args - most importantly tell it to run on the local host # looks like : chars are not valid so replace those # run as current user, not as root # attach the executable to the backburner job # increase the max task length to 600 minutes # add basic job info # backburner does not do any kind of sanitaion itself, so ensure that job # info doesn't contain any strange characters etc # remove any non-trivial characters # if the job name contains too many characters, backburner submission fails # there is a convention in flame to append a time stamp to jobs # e.g. 'Export - XXX_YYY_ZZZ (10.02.04) # Specifying a remote backburner manager is only supported on 2016.1 and above # No backburner manager speficied in settings. Ask local backburnerServer # which manager to choose from. (They might be none running locally) # Before 2018, you needed root privileges to execute this command. # Set the server group to the backburner job # Specify the backburner server if provided # Otherwise, fallback to the global backburner servers setting # Set the backburner job dependencies # call the bootstrap script # now we need to capture all of the environment and everything in a file # (thanks backburner!) so that we can replay it later when the task wakes up # On old Flame version, python hooks are running root. We need to run the command as the effective user to # ensure that backburner is running the job as the user who's using the Software to avoir permissions issues. # root # Getting the user name of the user who started Flame (the effective user) # Run the command as the effective user # Make sure that the session is not expired # kick it off ################################################################################################################ # accessors to various core settings and functions Try to returns the path to a binary in the Wiretap Central binary collection. This function is compatible with both new Wiretap Central and the legacy Wiretap Central. :param binary_name: Name of desired binary :returns: Absolute path as a string # Wiretap Central can only be present on MacOS and on Linux # Priority have to be given to every ".bin" executable on the Wiretap Central binary folder # If not found, we should look for the same path without the ".bin" # If we reach this, we are running a legacy Wiretap Central # We don't have any Wiretap Central installed on this workstation Get the path to the Wiretap Central binaries folder based on the current operating system. :return: Path to the Wiretap Central binaries folder Get the path to the legacy Wiretap Central binaries folder based on the current operating system. :return: Path to the legacy Wiretap Central binaries folder Returns the path to the ffmpeg executable that ships with Flame. :returns: Absolute path as a string Returns the path to the read_frame utility that ships with Flame. :returns: Absolute path as a string UI Popup and logging exception trap override. This method is used to override the default exception reporting behaviour inside the embedded Flame python interpreter to make errors more visible to the user. It attempts to create a QT messagebox with a formatted error message to alert the user that something has gong wrong. In addition to this, the default exception handling is also carried out and the exception is also written to the log. Note that this is a global object and not an engine-relative thing, so that the exception handler will operate correctly even if the engine instance no longer exists. # careful about infinite loops here - we mustn't raise exceptions. # like in other environments and scripts, for TankErrors, we assume that the # error message is already a nice descriptive, crafted message and try to present # this in a user friendly fashion # # for other exception types, we give a full call stack. # for TankErrors, we don't show the whole stack trace # now try to output it # there is an application running - so pop up a message! # and try to log it # in addition to the ui popup, also defer to the default mechanism
| 1.969498
| 2
|
elastic/datadog_checks/elastic/metrics.py
|
keisku/integrations-core
| 0
|
6625966
|
<filename>elastic/datadog_checks/elastic/metrics.py
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .utils import byte_to_mebibyte, ms_to_second
# Metrics definition format is a dictionary mapping:
# datadog_metric_name --> (datadog_metric_type, es_metric_name, optional_conversion_func)
# Clusterwise metrics, pre aggregated on ES, compatible with all ES versions
PRIMARY_SHARD_METRICS = {
'elasticsearch.primaries.docs.count': ('gauge', '_all.primaries.docs.count'),
'elasticsearch.primaries.docs.deleted': ('gauge', '_all.primaries.docs.deleted'),
'elasticsearch.primaries.store.size': ('gauge', '_all.primaries.store.size_in_bytes'),
'elasticsearch.primaries.indexing.index.total': ('gauge', '_all.primaries.indexing.index_total'),
'elasticsearch.primaries.indexing.index.time': (
'gauge',
'_all.primaries.indexing.index_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.indexing.index.current': ('gauge', '_all.primaries.indexing.index_current'),
'elasticsearch.primaries.indexing.delete.total': ('gauge', '_all.primaries.indexing.delete_total'),
'elasticsearch.primaries.indexing.delete.time': (
'gauge',
'_all.primaries.indexing.delete_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.indexing.delete.current': ('gauge', '_all.primaries.indexing.delete_current'),
'elasticsearch.primaries.get.total': ('gauge', '_all.primaries.get.total'),
'elasticsearch.primaries.get.time': ('gauge', '_all.primaries.get.time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.primaries.get.current': ('gauge', '_all.primaries.get.current'),
'elasticsearch.primaries.get.exists.total': ('gauge', '_all.primaries.get.exists_total'),
'elasticsearch.primaries.get.exists.time': (
'gauge',
'_all.primaries.get.exists_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.get.missing.total': ('gauge', '_all.primaries.get.missing_total'),
'elasticsearch.primaries.get.missing.time': (
'gauge',
'_all.primaries.get.missing_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.search.query.total': ('gauge', '_all.primaries.search.query_total'),
'elasticsearch.primaries.search.query.time': (
'gauge',
'_all.primaries.search.query_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.search.query.current': ('gauge', '_all.primaries.search.query_current'),
'elasticsearch.primaries.search.fetch.total': ('gauge', '_all.primaries.search.fetch_total'),
'elasticsearch.primaries.search.fetch.time': (
'gauge',
'_all.primaries.search.fetch_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.search.fetch.current': ('gauge', '_all.primaries.search.fetch_current'),
'elasticsearch.indices.count': ('gauge', 'indices', lambda indices: len(indices)),
}
PRIMARY_SHARD_METRICS_POST_7_2_0 = {
'elasticsearch.primaries.refresh.external.total': ('gauge', '_all.primaries.refresh.external_total'),
'elasticsearch.primaries.refresh.external.total.time': (
'gauge',
'_all.primaries.refresh.external_total_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
PRIMARY_SHARD_METRICS_POST_1_0_0 = {
'elasticsearch.primaries.merges.current': ('gauge', '_all.primaries.merges.current'),
'elasticsearch.primaries.merges.current.docs': ('gauge', '_all.primaries.merges.current_docs'),
'elasticsearch.primaries.merges.current.size': ('gauge', '_all.primaries.merges.current_size_in_bytes'),
'elasticsearch.primaries.merges.total': ('gauge', '_all.primaries.merges.total'),
'elasticsearch.primaries.merges.total.time': (
'gauge',
'_all.primaries.merges.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.merges.total.docs': ('gauge', '_all.primaries.merges.total_docs'),
'elasticsearch.primaries.merges.total.size': ('gauge', '_all.primaries.merges.total_size_in_bytes'),
'elasticsearch.primaries.refresh.total': ('gauge', '_all.primaries.refresh.total'),
'elasticsearch.primaries.refresh.total.time': (
'gauge',
'_all.primaries.refresh.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.flush.total': ('gauge', '_all.primaries.flush.total'),
'elasticsearch.primaries.flush.total.time': (
'gauge',
'_all.primaries.flush.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
# Metrics that are common to all Elasticsearch versions
STATS_METRICS = {
'elasticsearch.docs.count': ('gauge', 'indices.docs.count'),
'elasticsearch.docs.deleted': ('gauge', 'indices.docs.deleted'),
'elasticsearch.store.size': ('gauge', 'indices.store.size_in_bytes'),
'elasticsearch.indexing.index.total': ('gauge', 'indices.indexing.index_total'),
'elasticsearch.indexing.index.total.count': ('monotonic_count', 'indices.indexing.index_total'),
'elasticsearch.indexing.index.time': (
'gauge',
'indices.indexing.index_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indexing.index.time.count': (
'monotonic_count',
'indices.indexing.index_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indexing.index.current': ('gauge', 'indices.indexing.index_current'),
'elasticsearch.indexing.delete.total': ('gauge', 'indices.indexing.delete_total'),
'elasticsearch.indexing.delete.total.count': ('monotonic_count', 'indices.indexing.delete_total'),
'elasticsearch.indexing.delete.time': (
'gauge',
'indices.indexing.delete_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indexing.delete.time.count': (
'monotonic_count',
'indices.indexing.delete_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indexing.delete.current': ('gauge', 'indices.indexing.delete_current'),
'elasticsearch.get.total': ('gauge', 'indices.get.total'),
'elasticsearch.get.total.count': ('monotonic_count', 'indices.get.total'),
'elasticsearch.get.time': ('gauge', 'indices.get.time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.get.time.count': ('monotonic_count', 'indices.get.time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.get.current': ('gauge', 'indices.get.current'),
'elasticsearch.get.exists.total': ('gauge', 'indices.get.exists_total'),
'elasticsearch.get.exists.total.count': ('monotonic_count', 'indices.get.exists_total'),
'elasticsearch.get.exists.time': ('gauge', 'indices.get.exists_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.get.exists.time.count': (
'monotonic_count',
'indices.get.exists_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.get.missing.total': ('gauge', 'indices.get.missing_total'),
'elasticsearch.get.missing.total.count': ('monotonic_count', 'indices.get.missing_total'),
'elasticsearch.get.missing.time': ('gauge', 'indices.get.missing_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.get.missing.time.count': (
'monotonic_count',
'indices.get.missing_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.search.query.total': ('gauge', 'indices.search.query_total'),
'elasticsearch.search.query.total.count': ('monotonic_count', 'indices.search.query_total'),
'elasticsearch.search.query.time': ('gauge', 'indices.search.query_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.search.query.time.count': (
'monotonic_count',
'indices.search.query_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.search.query.current': ('gauge', 'indices.search.query_current'),
'elasticsearch.search.fetch.total': ('gauge', 'indices.search.fetch_total'),
'elasticsearch.search.fetch.total.count': ('monotonic_count', 'indices.search.fetch_total'),
'elasticsearch.search.fetch.time': ('gauge', 'indices.search.fetch_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.search.fetch.time.count': (
'monotonic_count',
'indices.search.fetch_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.search.fetch.current': ('gauge', 'indices.search.fetch_current'),
'elasticsearch.indices.segments.count': ('gauge', 'indices.segments.count'),
'elasticsearch.indices.segments.memory_in_bytes': ('gauge', 'indices.segments.memory_in_bytes'),
'elasticsearch.merges.current': ('gauge', 'indices.merges.current'),
'elasticsearch.merges.current.docs': ('gauge', 'indices.merges.current_docs'),
'elasticsearch.merges.current.size': ('gauge', 'indices.merges.current_size_in_bytes'),
'elasticsearch.merges.total': ('gauge', 'indices.merges.total'),
'elasticsearch.merges.total.count': ('monotonic_count', 'indices.merges.total'),
'elasticsearch.merges.total.time': ('gauge', 'indices.merges.total_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.merges.total.time.count': (
'monotonic_count',
'indices.merges.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.merges.total.docs': ('gauge', 'indices.merges.total_docs'),
'elasticsearch.merges.total.docs.count': ('monotonic_count', 'indices.merges.total_docs'),
'elasticsearch.merges.total.size': ('gauge', 'indices.merges.total_size_in_bytes'),
'elasticsearch.merges.total.size.count': ('monotonic_count', 'indices.merges.total_size_in_bytes'),
'elasticsearch.refresh.total': ('gauge', 'indices.refresh.total'),
'elasticsearch.refresh.total.count': ('monotonic_count', 'indices.refresh.total'),
'elasticsearch.refresh.total.time': ('gauge', 'indices.refresh.total_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.refresh.total.time.count': (
'monotonic_count',
'indices.refresh.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.flush.total': ('gauge', 'indices.flush.total'),
'elasticsearch.flush.total.count': ('monotonic_count', 'indices.flush.total'),
'elasticsearch.flush.total.time': ('gauge', 'indices.flush.total_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.flush.total.time.count': (
'monotonic_count',
'indices.flush.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.process.open_fd': ('gauge', 'process.open_file_descriptors'),
'elasticsearch.transport.rx_count': ('gauge', 'transport.rx_count'),
'elasticsearch.transport.rx_count.count': ('monotonic_count', 'transport.rx_count'),
'elasticsearch.transport.tx_count': ('gauge', 'transport.tx_count'),
'elasticsearch.transport.tx_count.count': ('monotonic_count', 'transport.tx_count'),
'elasticsearch.transport.rx_size': ('gauge', 'transport.rx_size_in_bytes'),
'elasticsearch.transport.rx_size.count': ('monotonic_count', 'transport.rx_size_in_bytes'),
'elasticsearch.transport.tx_size': ('gauge', 'transport.tx_size_in_bytes'),
'elasticsearch.transport.tx_size.count': ('monotonic_count', 'transport.tx_size_in_bytes'),
'elasticsearch.transport.server_open': ('gauge', 'transport.server_open'),
'elasticsearch.thread_pool.flush.active': ('gauge', 'thread_pool.flush.active'),
'elasticsearch.thread_pool.flush.threads': ('gauge', 'thread_pool.flush.threads'),
'elasticsearch.thread_pool.flush.threads.count': ('monotonic_count', 'thread_pool.flush.threads'),
'elasticsearch.thread_pool.flush.queue': ('gauge', 'thread_pool.flush.queue'),
'elasticsearch.thread_pool.flush.rejected': ('rate', 'thread_pool.flush.rejected'),
'elasticsearch.thread_pool.flush.rejected.count': ('monotonic_count', 'thread_pool.flush.rejected'),
'elasticsearch.thread_pool.flush.completed': ('gauge', 'thread_pool.flush.completed'),
'elasticsearch.thread_pool.flush.completed.count': ('monotonic_count', 'thread_pool.flush.completed'),
'elasticsearch.thread_pool.generic.active': ('gauge', 'thread_pool.generic.active'),
'elasticsearch.thread_pool.generic.threads': ('gauge', 'thread_pool.generic.threads'),
'elasticsearch.thread_pool.generic.threads.count': ('monotonic_count', 'thread_pool.generic.threads'),
'elasticsearch.thread_pool.generic.queue': ('gauge', 'thread_pool.generic.queue'),
'elasticsearch.thread_pool.generic.rejected': ('rate', 'thread_pool.generic.rejected'),
'elasticsearch.thread_pool.generic.rejected.count': ('monotonic_count', 'thread_pool.generic.rejected'),
'elasticsearch.thread_pool.generic.completed': ('gauge', 'thread_pool.generic.completed'),
'elasticsearch.thread_pool.generic.completed.count': ('monotonic_count', 'thread_pool.generic.completed'),
'elasticsearch.thread_pool.get.active': ('gauge', 'thread_pool.get.active'),
'elasticsearch.thread_pool.get.threads': ('gauge', 'thread_pool.get.threads'),
'elasticsearch.thread_pool.get.threads.count': ('monotonic_count', 'thread_pool.get.threads'),
'elasticsearch.thread_pool.get.queue': ('gauge', 'thread_pool.get.queue'),
'elasticsearch.thread_pool.get.rejected': ('rate', 'thread_pool.get.rejected'),
'elasticsearch.thread_pool.get.rejected.count': ('monotonic_count', 'thread_pool.get.rejected'),
'elasticsearch.thread_pool.get.completed': ('gauge', 'thread_pool.get.completed'),
'elasticsearch.thread_pool.get.completed.count': ('monotonic_count', 'thread_pool.get.completed'),
'elasticsearch.thread_pool.management.active': ('gauge', 'thread_pool.management.active'),
'elasticsearch.thread_pool.management.threads': ('gauge', 'thread_pool.management.threads'),
'elasticsearch.thread_pool.management.threads.count': ('monotonic_count', 'thread_pool.management.threads'),
'elasticsearch.thread_pool.management.queue': ('gauge', 'thread_pool.management.queue'),
'elasticsearch.thread_pool.management.rejected': ('rate', 'thread_pool.management.rejected'),
'elasticsearch.thread_pool.management.rejected.count': ('monotonic_count', 'thread_pool.management.rejected'),
'elasticsearch.thread_pool.management.completed': ('gauge', 'thread_pool.management.completed'),
'elasticsearch.thread_pool.management.completed.count': ('monotonic_count', 'thread_pool.management.completed'),
'elasticsearch.thread_pool.refresh.active': ('gauge', 'thread_pool.refresh.active'),
'elasticsearch.thread_pool.refresh.threads': ('gauge', 'thread_pool.refresh.threads'),
'elasticsearch.thread_pool.refresh.threads.count': ('monotonic_count', 'thread_pool.refresh.threads'),
'elasticsearch.thread_pool.refresh.queue': ('gauge', 'thread_pool.refresh.queue'),
'elasticsearch.thread_pool.refresh.rejected': ('rate', 'thread_pool.refresh.rejected'),
'elasticsearch.thread_pool.refresh.rejected.count': ('monotonic_count', 'thread_pool.refresh.rejected'),
'elasticsearch.thread_pool.refresh.completed': ('gauge', 'thread_pool.refresh.completed'),
'elasticsearch.thread_pool.refresh.completed.count': ('monotonic_count', 'thread_pool.refresh.completed'),
'elasticsearch.thread_pool.search.active': ('gauge', 'thread_pool.search.active'),
'elasticsearch.thread_pool.search.threads': ('gauge', 'thread_pool.search.threads'),
'elasticsearch.thread_pool.search.threads.count': ('monotonic_count', 'thread_pool.search.threads'),
'elasticsearch.thread_pool.search.queue': ('gauge', 'thread_pool.search.queue'),
'elasticsearch.thread_pool.search.rejected': ('rate', 'thread_pool.search.rejected'),
'elasticsearch.thread_pool.search.rejected.count': ('monotonic_count', 'thread_pool.search.rejected'),
'elasticsearch.thread_pool.search.completed': ('gauge', 'thread_pool.search.completed'),
'elasticsearch.thread_pool.search.completed.count': ('monotonic_count', 'thread_pool.search.completed'),
'elasticsearch.thread_pool.snapshot.active': ('gauge', 'thread_pool.snapshot.active'),
'elasticsearch.thread_pool.snapshot.threads': ('gauge', 'thread_pool.snapshot.threads'),
'elasticsearch.thread_pool.snapshot.threads.count': ('monotonic_count', 'thread_pool.snapshot.threads'),
'elasticsearch.thread_pool.snapshot.queue': ('gauge', 'thread_pool.snapshot.queue'),
'elasticsearch.thread_pool.snapshot.rejected': ('rate', 'thread_pool.snapshot.rejected'),
'elasticsearch.thread_pool.snapshot.rejected.count': ('monotonic_count', 'thread_pool.snapshot.rejected'),
'elasticsearch.thread_pool.snapshot.completed': ('gauge', 'thread_pool.snapshot.completed'),
'elasticsearch.thread_pool.snapshot.completed.count': ('monotonic_count', 'thread_pool.snapshot.completed'),
'elasticsearch.thread_pool.warmer.active': ('gauge', 'thread_pool.warmer.active'),
'elasticsearch.thread_pool.warmer.threads': ('gauge', 'thread_pool.warmer.threads'),
'elasticsearch.thread_pool.warmer.queue': ('gauge', 'thread_pool.warmer.queue'),
'elasticsearch.thread_pool.warmer.rejected': ('rate', 'thread_pool.warmer.rejected'),
'elasticsearch.thread_pool.warmer.completed': ('gauge', 'thread_pool.warmer.completed'),
'elasticsearch.http.current_open': ('gauge', 'http.current_open'),
'elasticsearch.http.total_opened': ('gauge', 'http.total_opened'),
'elasticsearch.http.total_opened.count': ('monotonic_count', 'http.total_opened'),
'jvm.mem.heap_committed': ('gauge', 'jvm.mem.heap_committed_in_bytes'),
'jvm.mem.heap_used': ('gauge', 'jvm.mem.heap_used_in_bytes'),
'jvm.mem.heap_in_use': ('gauge', 'jvm.mem.heap_used_percent'),
'jvm.mem.heap_max': ('gauge', 'jvm.mem.heap_max_in_bytes'),
'jvm.mem.non_heap_committed': ('gauge', 'jvm.mem.non_heap_committed_in_bytes'),
'jvm.mem.non_heap_used': ('gauge', 'jvm.mem.non_heap_used_in_bytes'),
'jvm.mem.pools.young.used': ('gauge', 'jvm.mem.pools.young.used_in_bytes'),
'jvm.mem.pools.young.max': ('gauge', 'jvm.mem.pools.young.max_in_bytes'),
'jvm.mem.pools.old.used': ('gauge', 'jvm.mem.pools.old.used_in_bytes'),
'jvm.mem.pools.old.max': ('gauge', 'jvm.mem.pools.old.max_in_bytes'),
'jvm.mem.pools.survivor.used': ('gauge', 'jvm.mem.pools.survivor.used_in_bytes'),
'jvm.mem.pools.survivor.max': ('gauge', 'jvm.mem.pools.survivor.max_in_bytes'),
'jvm.threads.count': ('gauge', 'jvm.threads.count'),
'jvm.threads.peak_count': ('gauge', 'jvm.threads.peak_count'),
'elasticsearch.fs.total.total_in_bytes': ('gauge', 'fs.total.total_in_bytes'),
'elasticsearch.fs.total.free_in_bytes': ('gauge', 'fs.total.free_in_bytes'),
'elasticsearch.fs.total.available_in_bytes': ('gauge', 'fs.total.available_in_bytes'),
}
ADDITIONAL_METRICS_POST_7_9_0 = {
'elasticsearch.indexing_pressure.memory.current.coordinating_in_bytes': (
'gauge',
'indexing_pressure.memory.current.coordinating_in_bytes',
),
'elasticsearch.indexing_pressure.memory.current.primary_in_bytes': (
'gauge',
'indexing_pressure.memory.current.primary_in_bytes',
),
'elasticsearch.indexing_pressure.memory.current.replica_in_bytes': (
'gauge',
'indexing_pressure.memory.current.replica_in_bytes',
),
}
ADDITIONAL_METRICS_POST_7_2_0 = {
'elasticsearch.refresh.external.total': ('gauge', 'indices.refresh.external_total'),
'elasticsearch.refresh.external.total.time': (
'gauge',
'indices.refresh.external_total_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
ADDITIONAL_METRICS_PRE_7_0_0 = {
'elasticsearch.thread_pool.index.active': ('gauge', 'thread_pool.index.active'),
'elasticsearch.thread_pool.index.queue': ('gauge', 'thread_pool.index.queue'),
'elasticsearch.thread_pool.index.threads': ('gauge', 'thread_pool.index.threads'),
'elasticsearch.thread_pool.index.threads.count': ('monotonic_count', 'thread_pool.index.threads'),
'elasticsearch.thread_pool.index.rejected': ('rate', 'thread_pool.index.rejected'),
'elasticsearch.thread_pool.index.rejected.count': ('monotonic_count', 'thread_pool.index.rejected'),
'elasticsearch.thread_pool.index.completed': ('gauge', 'thread_pool.index.completed'),
'elasticsearch.thread_pool.index.completed.count': ('monotonic_count', 'thread_pool.index.completed'),
}
ADDITIONAL_METRICS_PRE_5_0_0 = {
'elasticsearch.thread_pool.percolate.active': ('gauge', 'thread_pool.percolate.active'),
'elasticsearch.thread_pool.percolate.threads': ('gauge', 'thread_pool.percolate.threads'),
'elasticsearch.thread_pool.percolate.queue': ('gauge', 'thread_pool.percolate.queue'),
'elasticsearch.thread_pool.percolate.rejected': ('rate', 'thread_pool.percolate.rejected'),
'elasticsearch.thread_pool.suggest.active': ('gauge', 'thread_pool.suggest.active'),
'elasticsearch.thread_pool.suggest.threads': ('gauge', 'thread_pool.suggest.threads'),
'elasticsearch.thread_pool.suggest.queue': ('gauge', 'thread_pool.suggest.queue'),
'elasticsearch.thread_pool.suggest.rejected': ('rate', 'thread_pool.suggest.rejected'),
}
# Metrics for index level
INDEX_STATS_METRICS = {
'elasticsearch.index.health': ('gauge', 'health'),
'elasticsearch.index.health.reverse': ('gauge', 'health_reverse'),
'elasticsearch.index.docs.count': ('gauge', 'docs_count'),
'elasticsearch.index.docs.deleted': ('gauge', 'docs_deleted'),
'elasticsearch.index.primary_shards': ('gauge', 'primary_shards'),
'elasticsearch.index.replica_shards': ('gauge', 'replica_shards'),
'elasticsearch.index.primary_store_size': ('gauge', 'primary_store_size'),
'elasticsearch.index.store_size': ('gauge', 'store_size'),
}
JVM_METRICS_POST_0_90_10 = {
'jvm.gc.collectors.young.count': ('gauge', 'jvm.gc.collectors.young.collection_count'),
'jvm.gc.collectors.young.collection_time': (
'gauge',
'jvm.gc.collectors.young.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
'jvm.gc.collectors.old.count': ('gauge', 'jvm.gc.collectors.old.collection_count'),
'jvm.gc.collectors.old.collection_time': (
'gauge',
'jvm.gc.collectors.old.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
JVM_METRICS_RATE = {
# Submit metrics as rate
'jvm.gc.collectors.young.rate': ('rate', 'jvm.gc.collectors.young.collection_count'),
'jvm.gc.collectors.young.collection_time.rate': (
'rate',
'jvm.gc.collectors.young.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
'jvm.gc.collectors.old.rate': ('rate', 'jvm.gc.collectors.old.collection_count'),
'jvm.gc.collectors.old.collection_time.rate': (
'rate',
'jvm.gc.collectors.old.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
JVM_METRICS_PRE_0_90_10 = {
'jvm.gc.concurrent_mark_sweep.count': ('gauge', 'jvm.gc.collectors.ConcurrentMarkSweep.collection_count'),
'jvm.gc.concurrent_mark_sweep.collection_time': (
'gauge',
'jvm.gc.collectors.ConcurrentMarkSweep.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
'jvm.gc.par_new.count': ('gauge', 'jvm.gc.collectors.ParNew.collection_count'),
'jvm.gc.par_new.collection_time': (
'gauge',
'jvm.gc.collectors.ParNew.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
'jvm.gc.collection_count': ('gauge', 'jvm.gc.collection_count'),
'jvm.gc.collection_time': ('gauge', 'jvm.gc.collection_time_in_millis', lambda ms: ms_to_second(ms)),
}
ADDITIONAL_METRICS_POST_0_90_5 = {
'elasticsearch.search.fetch.open_contexts': ('gauge', 'indices.search.open_contexts'),
'elasticsearch.fielddata.size': ('gauge', 'indices.fielddata.memory_size_in_bytes'),
'elasticsearch.fielddata.evictions': ('gauge', 'indices.fielddata.evictions'),
'elasticsearch.fielddata.evictions.count': ('monotonic_count', 'indices.fielddata.evictions'),
}
ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0 = {
'elasticsearch.cache.filter.evictions': ('gauge', 'indices.filter_cache.evictions'),
'elasticsearch.cache.filter.evictions.count': ('monotonic_count', 'indices.filter_cache.evictions'),
'elasticsearch.cache.filter.size': ('gauge', 'indices.filter_cache.memory_size_in_bytes'),
'elasticsearch.id_cache.size': ('gauge', 'indices.id_cache.memory_size_in_bytes'),
}
ADDITIONAL_METRICS_PRE_0_90_5 = {
'elasticsearch.cache.field.evictions': ('gauge', 'indices.cache.field_evictions'),
'elasticsearch.cache.field.size': ('gauge', 'indices.cache.field_size_in_bytes'),
'elasticsearch.cache.filter.count': ('gauge', 'indices.cache.filter_count'),
'elasticsearch.cache.filter.evictions': ('gauge', 'indices.cache.filter_evictions'),
'elasticsearch.cache.filter.size': ('gauge', 'indices.cache.filter_size_in_bytes'),
}
ADDITIONAL_METRICS_POST_1_0_0 = {
'elasticsearch.indices.translog.size_in_bytes': ('gauge', 'indices.translog.size_in_bytes'),
'elasticsearch.indices.translog.operations': ('gauge', 'indices.translog.operations'),
}
# Stats are only valid for v1.x
ADDITIONAL_METRICS_1_x = {
'elasticsearch.fs.total.disk_reads': ('rate', 'fs.total.disk_reads'),
'elasticsearch.fs.total.disk_writes': ('rate', 'fs.total.disk_writes'),
'elasticsearch.fs.total.disk_io_op': ('rate', 'fs.total.disk_io_op'),
'elasticsearch.fs.total.disk_read_size_in_bytes': ('gauge', 'fs.total.disk_read_size_in_bytes'),
'elasticsearch.fs.total.disk_write_size_in_bytes': ('gauge', 'fs.total.disk_write_size_in_bytes'),
'elasticsearch.fs.total.disk_io_size_in_bytes': ('gauge', 'fs.total.disk_io_size_in_bytes'),
}
ADDITIONAL_METRICS_POST_1_3_0 = {
'elasticsearch.indices.segments.index_writer_memory_in_bytes': (
'gauge',
'indices.segments.index_writer_memory_in_bytes',
),
'elasticsearch.indices.segments.version_map_memory_in_bytes': (
'gauge',
'indices.segments.version_map_memory_in_bytes',
),
}
ADDITIONAL_METRICS_POST_1_4_0 = {
'elasticsearch.indices.indexing.throttle_time': (
'rate',
'indices.indexing.throttle_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indices.indexing.throttle_time.count': (
'monotonic_count',
'indices.indexing.throttle_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indices.query_cache.memory_size_in_bytes': ('gauge', 'indices.query_cache.memory_size_in_bytes'),
'elasticsearch.indices.query_cache.hit_count': ('rate', 'indices.query_cache.hit_count'),
'elasticsearch.indices.query_cache.hit_count.count': ('monotonic_count', 'indices.query_cache.hit_count'),
'elasticsearch.indices.query_cache.miss_count': ('rate', 'indices.query_cache.miss_count'),
'elasticsearch.indices.query_cache.miss_count.total': ('monotonic_count', 'indices.query_cache.miss_count'),
'elasticsearch.indices.query_cache.evictions': ('rate', 'indices.query_cache.evictions'),
'elasticsearch.indices.query_cache.evictions.count': ('monotonic_count', 'indices.query_cache.evictions'),
'elasticsearch.indices.segments.index_writer_max_memory_in_bytes': (
'gauge',
'indices.segments.index_writer_max_memory_in_bytes',
),
'elasticsearch.indices.segments.fixed_bit_set_memory_in_bytes': (
'gauge',
'indices.segments.fixed_bit_set_memory_in_bytes',
),
'elasticsearch.breakers.fielddata.estimated_size_in_bytes': ('gauge', 'breakers.fielddata.estimated_size_in_bytes'),
'elasticsearch.breakers.fielddata.overhead': ('gauge', 'breakers.fielddata.overhead'),
'elasticsearch.breakers.fielddata.tripped': ('rate', 'breakers.fielddata.tripped'),
'elasticsearch.breakers.parent.estimated_size_in_bytes': ('gauge', 'breakers.parent.estimated_size_in_bytes'),
'elasticsearch.breakers.parent.overhead': ('gauge', 'breakers.parent.overhead'),
'elasticsearch.breakers.parent.tripped': ('rate', 'breakers.parent.tripped'),
'elasticsearch.breakers.request.estimated_size_in_bytes': ('gauge', 'breakers.request.estimated_size_in_bytes'),
'elasticsearch.breakers.request.overhead': ('gauge', 'breakers.request.overhead'),
'elasticsearch.breakers.request.tripped': ('rate', 'breakers.request.tripped'),
'elasticsearch.thread_pool.listener.active': ('gauge', 'thread_pool.listener.active'),
'elasticsearch.thread_pool.listener.threads': ('gauge', 'thread_pool.listener.threads'),
'elasticsearch.thread_pool.listener.threads.count': ('monotonic_count', 'thread_pool.listener.threads'),
'elasticsearch.thread_pool.listener.queue': ('gauge', 'thread_pool.listener.queue'),
'elasticsearch.thread_pool.listener.rejected': ('rate', 'thread_pool.listener.rejected'),
'elasticsearch.thread_pool.listener.rejected.count': ('monotonic_count', 'thread_pool.listener.rejected'),
}
ADDITIONAL_METRICS_POST_1_5_0 = {
'elasticsearch.indices.recovery.current_as_source': ('gauge', 'indices.recovery.current_as_source'),
'elasticsearch.indices.recovery.current_as_target': ('gauge', 'indices.recovery.current_as_target'),
'elasticsearch.indices.recovery.throttle_time': (
'rate',
'indices.recovery.throttle_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indices.recovery.throttle_time.count': (
'monotonic_count',
'indices.recovery.throttle_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
ADDITIONAL_METRICS_POST_1_6_0 = {
'elasticsearch.thread_pool.fetch_shard_started.active': ('gauge', 'thread_pool.fetch_shard_started.active'),
'elasticsearch.thread_pool.fetch_shard_started.threads': ('gauge', 'thread_pool.fetch_shard_started.threads'),
'elasticsearch.thread_pool.fetch_shard_started.queue': ('gauge', 'thread_pool.fetch_shard_started.queue'),
'elasticsearch.thread_pool.fetch_shard_started.rejected': ('rate', 'thread_pool.fetch_shard_started.rejected'),
'elasticsearch.thread_pool.fetch_shard_store.active': ('gauge', 'thread_pool.fetch_shard_store.active'),
'elasticsearch.thread_pool.fetch_shard_store.threads': ('gauge', 'thread_pool.fetch_shard_store.threads'),
'elasticsearch.thread_pool.fetch_shard_store.queue': ('gauge', 'thread_pool.fetch_shard_store.queue'),
'elasticsearch.thread_pool.fetch_shard_store.rejected': ('rate', 'thread_pool.fetch_shard_store.rejected'),
}
ADDITIONAL_METRICS_PRE_2_0 = {
'elasticsearch.thread_pool.merge.active': ('gauge', 'thread_pool.merge.active'),
'elasticsearch.thread_pool.merge.threads': ('gauge', 'thread_pool.merge.threads'),
'elasticsearch.thread_pool.merge.queue': ('gauge', 'thread_pool.merge.queue'),
'elasticsearch.thread_pool.merge.rejected': ('rate', 'thread_pool.merge.rejected'),
}
ADDITIONAL_METRICS_POST_2_0 = {
# Some of these may very well exist in previous ES versions, but not worth the time/effort
# to find where they were introduced
'elasticsearch.indices.query_cache.cache_size': ('gauge', 'indices.query_cache.cache_size'),
'elasticsearch.indices.query_cache.cache_count': ('rate', 'indices.query_cache.cache_count'),
'elasticsearch.indices.query_cache.total_count': ('rate', 'indices.query_cache.total_count'),
'elasticsearch.indices.segments.doc_values_memory_in_bytes': (
'gauge',
'indices.segments.doc_values_memory_in_bytes',
),
'elasticsearch.indices.segments.norms_memory_in_bytes': ('gauge', 'indices.segments.norms_memory_in_bytes'),
'elasticsearch.indices.segments.stored_fields_memory_in_bytes': (
'gauge',
'indices.segments.stored_fields_memory_in_bytes',
),
'elasticsearch.indices.segments.term_vectors_memory_in_bytes': (
'gauge',
'indices.segments.term_vectors_memory_in_bytes',
),
'elasticsearch.indices.segments.terms_memory_in_bytes': ('gauge', 'indices.segments.terms_memory_in_bytes'),
'elasticsearch.indices.request_cache.memory_size_in_bytes': ('gauge', 'indices.request_cache.memory_size_in_bytes'),
'elasticsearch.indices.request_cache.evictions': ('rate', 'indices.request_cache.evictions'),
'elasticsearch.indices.request_cache.evictions.count': ('monotonic_count', 'indices.request_cache.evictions'),
'elasticsearch.indices.request_cache.hit_count': ('rate', 'indices.request_cache.hit_count'),
'elasticsearch.indices.request_cache.miss_count': ('rate', 'indices.request_cache.miss_count'),
'elasticsearch.indices.request_cache.miss_count.count': ('monotonic_count', 'indices.request_cache.miss_count'),
}
ADDITIONAL_METRICS_POST_2_1 = {
'elasticsearch.indices.indexing.index_failed': ('rate', 'indices.indexing.index_failed'),
'elasticsearch.thread_pool.force_merge.active': ('gauge', 'thread_pool.force_merge.active'),
'elasticsearch.thread_pool.force_merge.threads': ('gauge', 'thread_pool.force_merge.threads'),
'elasticsearch.thread_pool.force_merge.queue': ('gauge', 'thread_pool.force_merge.queue'),
'elasticsearch.thread_pool.force_merge.rejected': ('rate', 'thread_pool.force_merge.rejected'),
}
ADDITIONAL_METRICS_5_x = {
'elasticsearch.fs.total.disk_io_op': ('rate', 'fs.io_stats.total.operations'),
'elasticsearch.fs.total.disk_reads': ('rate', 'fs.io_stats.total.read_operations'),
'elasticsearch.fs.total.disk_writes': ('rate', 'fs.io_stats.total.write_operations'),
'elasticsearch.fs.total.disk_read_size_in_bytes': ('gauge', 'fs.io_stats.total.read_kilobytes'),
'elasticsearch.fs.total.disk_write_size_in_bytes': ('gauge', 'fs.io_stats.total.write_kilobytes'),
'elasticsearch.breakers.inflight_requests.tripped': ('gauge', 'breakers.in_flight_requests.tripped'),
'elasticsearch.breakers.inflight_requests.overhead': ('gauge', 'breakers.in_flight_requests.overhead'),
'elasticsearch.breakers.inflight_requests.estimated_size_in_bytes': (
'gauge',
'breakers.in_flight_requests.estimated_size_in_bytes',
),
'elasticsearch.search.scroll.total': ('gauge', 'indices.search.scroll_total'),
'elasticsearch.search.scroll.total.count': ('monotonic_count', 'indices.search.scroll_total'),
'elasticsearch.search.scroll.time': ('gauge', 'indices.search.scroll_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.search.scroll.time.count': (
'monotonic_count',
'indices.search.scroll_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.search.scroll.current': ('gauge', 'indices.search.scroll_current'),
}
ADDITIONAL_METRICS_PRE_6_3 = {
'elasticsearch.thread_pool.bulk.active': ('gauge', 'thread_pool.bulk.active'),
'elasticsearch.thread_pool.bulk.threads': ('gauge', 'thread_pool.bulk.threads'),
'elasticsearch.thread_pool.bulk.threads.count': ('monotonic_count', 'thread_pool.bulk.threads'),
'elasticsearch.thread_pool.bulk.queue': ('gauge', 'thread_pool.bulk.queue'),
'elasticsearch.thread_pool.bulk.rejected': ('rate', 'thread_pool.bulk.rejected'),
'elasticsearch.thread_pool.bulk.rejected.count': ('monotonic_count', 'thread_pool.bulk.rejected'),
'elasticsearch.thread_pool.bulk.completed': ('rate', 'thread_pool.bulk.completed'),
'elasticsearch.thread_pool.bulk.completed.count': ('monotonic_count', 'thread_pool.bulk.completed'),
}
ADDITIONAL_METRICS_POST_6_3 = {
'elasticsearch.thread_pool.write.active': ('gauge', 'thread_pool.write.active'),
'elasticsearch.thread_pool.write.threads': ('gauge', 'thread_pool.write.threads'),
'elasticsearch.thread_pool.write.threads.count': ('monotonic_count', 'thread_pool.write.threads'),
'elasticsearch.thread_pool.write.queue': ('gauge', 'thread_pool.write.queue'),
'elasticsearch.thread_pool.write.rejected': ('rate', 'thread_pool.write.rejected'),
'elasticsearch.thread_pool.write.rejected.count': ('monotonic_count', 'thread_pool.write.rejected'),
'elasticsearch.thread_pool.write.completed': ('rate', 'thread_pool.write.completed'),
'elasticsearch.thread_pool.write.completed.count': ('monotonic_count', 'thread_pool.write.completed'),
}
CLUSTER_HEALTH_METRICS = {
'elasticsearch.number_of_nodes': ('gauge', 'number_of_nodes'),
'elasticsearch.number_of_data_nodes': ('gauge', 'number_of_data_nodes'),
'elasticsearch.active_primary_shards': ('gauge', 'active_primary_shards'),
'elasticsearch.active_shards': ('gauge', 'active_shards'),
'elasticsearch.relocating_shards': ('gauge', 'relocating_shards'),
'elasticsearch.initializing_shards': ('gauge', 'initializing_shards'),
'elasticsearch.unassigned_shards': ('gauge', 'unassigned_shards'),
'elasticsearch.cluster_status': ('gauge', 'status', lambda v: {'red': 0, 'yellow': 1, 'green': 2}.get(v, -1)),
}
CLUSTER_HEALTH_METRICS_POST_2_4 = {'elasticsearch.delayed_unassigned_shards': ('gauge', 'delayed_unassigned_shards')}
CLUSTER_PENDING_TASKS = {
'elasticsearch.pending_tasks_total': ('gauge', 'pending_task_total'),
'elasticsearch.pending_tasks_priority_high': ('gauge', 'pending_tasks_priority_high'),
'elasticsearch.pending_tasks_priority_urgent': ('gauge', 'pending_tasks_priority_urgent'),
'elasticsearch.pending_tasks_time_in_queue': ('gauge', 'pending_tasks_time_in_queue'),
}
SLM_POLICY_METRICS = {
'elasticsearch.slm.snapshot_deletion_failures': ('gauge', 'stats.snapshot_deletion_failures'),
'elasticsearch.slm.snapshots_deleted': ('gauge', 'stats.snapshots_deleted'),
'elasticsearch.slm.snapshots_failed': ('gauge', 'stats.snapshots_failed'),
'elasticsearch.slm.snapshots_taken': ('gauge', 'stats.snapshots_taken'),
}
NODE_SYSTEM_METRICS = {
'system.mem.free': ('gauge', 'os.mem.free_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.mem.usable': ('gauge', 'os.mem.free_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.mem.used': ('gauge', 'os.mem.used_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.swap.free': ('gauge', 'os.swap.free_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.swap.used': ('gauge', 'os.swap.used_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.net.bytes_rcvd': ('gauge', 'transport.rx_size_in_bytes'),
'system.net.bytes_sent': ('gauge', 'transport.tx_size_in_bytes'),
}
NODE_SYSTEM_METRICS_POST_1 = {
'system.mem.total': ('gauge', 'os.mem.total_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.swap.total': ('gauge', 'os.swap.total_in_bytes', lambda b: byte_to_mebibyte(b)),
}
NODE_SYSTEM_METRICS_POST_5 = {
'system.cpu.idle': ('gauge', 'os.cpu.percent', lambda v: (100 - v)),
'system.load.1': ('gauge', 'os.cpu.load_average.1m'),
'system.load.5': ('gauge', 'os.cpu.load_average.5m'),
'system.load.15': ('gauge', 'os.cpu.load_average.15m'),
'elasticsearch.cgroup.cpu.stat.number_of_elapsed_periods': (
'gauge',
'os.cgroup.cpu.stat.number_of_elapsed_periods',
),
'elasticsearch.cgroup.cpu.stat.number_of_times_throttled': (
'gauge',
'os.cgroup.cpu.stat.number_of_times_throttled',
),
'elasticsearch.process.cpu.percent': ('gauge', 'process.cpu.percent'),
}
CAT_ALLOCATION_METRICS = {
'elasticsearch.shards': ('gauge', 'shards'),
'elasticsearch.disk.indices': ('gauge', 'disk_indices'),
'elasticsearch.disk.used': ('gauge', 'disk_used'),
'elasticsearch.disk.avail': ('gauge', 'disk_avail'),
'elasticsearch.disk.total': ('gauge', 'disk_total'),
'elasticsearch.disk.percent': ('gauge', 'disk_percent'),
}
def stats_for_version(version, jvm_rate=False):
"""
Get the proper set of stats metrics for the specified ES version
"""
metrics = dict(STATS_METRICS)
# JVM additional metrics
if version >= [0, 90, 10]:
metrics.update(JVM_METRICS_POST_0_90_10)
if jvm_rate:
metrics.update(JVM_METRICS_RATE)
else:
metrics.update(JVM_METRICS_PRE_0_90_10)
# Additional Stats metrics
if version >= [0, 90, 5]:
metrics.update(ADDITIONAL_METRICS_POST_0_90_5)
else:
metrics.update(ADDITIONAL_METRICS_PRE_0_90_5)
if version >= [1, 0, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_0_0)
if version < [2, 0, 0]:
metrics.update(ADDITIONAL_METRICS_PRE_2_0)
if version >= [0, 90, 5]:
metrics.update(ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0)
if version >= [1, 0, 0]:
metrics.update(ADDITIONAL_METRICS_1_x)
if version >= [1, 3, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_3_0)
if version >= [1, 4, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_4_0)
if version >= [1, 5, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_5_0)
if version >= [1, 6, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_6_0)
if version >= [2, 0, 0]:
metrics.update(ADDITIONAL_METRICS_POST_2_0)
if version >= [2, 1, 0]:
metrics.update(ADDITIONAL_METRICS_POST_2_1)
if version >= [5, 0, 0]:
metrics.update(ADDITIONAL_METRICS_5_x)
if version < [5, 0, 0]:
metrics.update(ADDITIONAL_METRICS_PRE_5_0_0)
if version >= [6, 3, 0]:
metrics.update(ADDITIONAL_METRICS_POST_6_3)
else:
metrics.update(ADDITIONAL_METRICS_PRE_6_3)
if version < [7, 0, 0]:
metrics.update(ADDITIONAL_METRICS_PRE_7_0_0)
if version >= [7, 2, 0]:
metrics.update(ADDITIONAL_METRICS_POST_7_2_0)
if version >= [7, 9, 0]:
metrics.update(ADDITIONAL_METRICS_POST_7_9_0)
return metrics
def pshard_stats_for_version(version):
"""
Get the proper set of pshard metrics for the specified ES version
"""
pshard_stats_metrics = dict(PRIMARY_SHARD_METRICS)
if version >= [1, 0, 0]:
pshard_stats_metrics.update(PRIMARY_SHARD_METRICS_POST_1_0_0)
if version >= [7, 2, 0]:
pshard_stats_metrics.update(PRIMARY_SHARD_METRICS_POST_7_2_0)
return pshard_stats_metrics
def health_stats_for_version(version):
"""
Get the proper set of health metrics for the specified ES version
"""
cluster_health_metrics = dict(CLUSTER_HEALTH_METRICS)
if version >= [2, 4, 0]:
cluster_health_metrics.update(CLUSTER_HEALTH_METRICS_POST_2_4)
return cluster_health_metrics
def slm_stats_for_version(version):
"""
Get the proper set of SLM metrics for the specified ES version
"""
slm_health_metrics = {}
if version >= [7, 4, 0]:
slm_health_metrics.update(dict(SLM_POLICY_METRICS))
return slm_health_metrics
def index_stats_for_version(version):
"""
Get the proper set of index metrics for the specified ES version
"""
index_stats = {}
if version:
index_stats.update(INDEX_STATS_METRICS)
return index_stats
def node_system_stats_for_version(version):
"""
Get the proper set of os metrics for the specified ES version
"""
node_system_stats = dict(NODE_SYSTEM_METRICS)
if version >= [1, 0, 0]:
node_system_stats.update(NODE_SYSTEM_METRICS_POST_1)
if version >= [5, 0, 0]:
node_system_stats.update(NODE_SYSTEM_METRICS_POST_5)
return node_system_stats
|
<filename>elastic/datadog_checks/elastic/metrics.py
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .utils import byte_to_mebibyte, ms_to_second
# Metrics definition format is a dictionary mapping:
# datadog_metric_name --> (datadog_metric_type, es_metric_name, optional_conversion_func)
# Clusterwise metrics, pre aggregated on ES, compatible with all ES versions
PRIMARY_SHARD_METRICS = {
'elasticsearch.primaries.docs.count': ('gauge', '_all.primaries.docs.count'),
'elasticsearch.primaries.docs.deleted': ('gauge', '_all.primaries.docs.deleted'),
'elasticsearch.primaries.store.size': ('gauge', '_all.primaries.store.size_in_bytes'),
'elasticsearch.primaries.indexing.index.total': ('gauge', '_all.primaries.indexing.index_total'),
'elasticsearch.primaries.indexing.index.time': (
'gauge',
'_all.primaries.indexing.index_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.indexing.index.current': ('gauge', '_all.primaries.indexing.index_current'),
'elasticsearch.primaries.indexing.delete.total': ('gauge', '_all.primaries.indexing.delete_total'),
'elasticsearch.primaries.indexing.delete.time': (
'gauge',
'_all.primaries.indexing.delete_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.indexing.delete.current': ('gauge', '_all.primaries.indexing.delete_current'),
'elasticsearch.primaries.get.total': ('gauge', '_all.primaries.get.total'),
'elasticsearch.primaries.get.time': ('gauge', '_all.primaries.get.time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.primaries.get.current': ('gauge', '_all.primaries.get.current'),
'elasticsearch.primaries.get.exists.total': ('gauge', '_all.primaries.get.exists_total'),
'elasticsearch.primaries.get.exists.time': (
'gauge',
'_all.primaries.get.exists_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.get.missing.total': ('gauge', '_all.primaries.get.missing_total'),
'elasticsearch.primaries.get.missing.time': (
'gauge',
'_all.primaries.get.missing_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.search.query.total': ('gauge', '_all.primaries.search.query_total'),
'elasticsearch.primaries.search.query.time': (
'gauge',
'_all.primaries.search.query_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.search.query.current': ('gauge', '_all.primaries.search.query_current'),
'elasticsearch.primaries.search.fetch.total': ('gauge', '_all.primaries.search.fetch_total'),
'elasticsearch.primaries.search.fetch.time': (
'gauge',
'_all.primaries.search.fetch_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.search.fetch.current': ('gauge', '_all.primaries.search.fetch_current'),
'elasticsearch.indices.count': ('gauge', 'indices', lambda indices: len(indices)),
}
PRIMARY_SHARD_METRICS_POST_7_2_0 = {
'elasticsearch.primaries.refresh.external.total': ('gauge', '_all.primaries.refresh.external_total'),
'elasticsearch.primaries.refresh.external.total.time': (
'gauge',
'_all.primaries.refresh.external_total_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
PRIMARY_SHARD_METRICS_POST_1_0_0 = {
'elasticsearch.primaries.merges.current': ('gauge', '_all.primaries.merges.current'),
'elasticsearch.primaries.merges.current.docs': ('gauge', '_all.primaries.merges.current_docs'),
'elasticsearch.primaries.merges.current.size': ('gauge', '_all.primaries.merges.current_size_in_bytes'),
'elasticsearch.primaries.merges.total': ('gauge', '_all.primaries.merges.total'),
'elasticsearch.primaries.merges.total.time': (
'gauge',
'_all.primaries.merges.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.merges.total.docs': ('gauge', '_all.primaries.merges.total_docs'),
'elasticsearch.primaries.merges.total.size': ('gauge', '_all.primaries.merges.total_size_in_bytes'),
'elasticsearch.primaries.refresh.total': ('gauge', '_all.primaries.refresh.total'),
'elasticsearch.primaries.refresh.total.time': (
'gauge',
'_all.primaries.refresh.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.primaries.flush.total': ('gauge', '_all.primaries.flush.total'),
'elasticsearch.primaries.flush.total.time': (
'gauge',
'_all.primaries.flush.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
# Metrics that are common to all Elasticsearch versions
STATS_METRICS = {
'elasticsearch.docs.count': ('gauge', 'indices.docs.count'),
'elasticsearch.docs.deleted': ('gauge', 'indices.docs.deleted'),
'elasticsearch.store.size': ('gauge', 'indices.store.size_in_bytes'),
'elasticsearch.indexing.index.total': ('gauge', 'indices.indexing.index_total'),
'elasticsearch.indexing.index.total.count': ('monotonic_count', 'indices.indexing.index_total'),
'elasticsearch.indexing.index.time': (
'gauge',
'indices.indexing.index_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indexing.index.time.count': (
'monotonic_count',
'indices.indexing.index_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indexing.index.current': ('gauge', 'indices.indexing.index_current'),
'elasticsearch.indexing.delete.total': ('gauge', 'indices.indexing.delete_total'),
'elasticsearch.indexing.delete.total.count': ('monotonic_count', 'indices.indexing.delete_total'),
'elasticsearch.indexing.delete.time': (
'gauge',
'indices.indexing.delete_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indexing.delete.time.count': (
'monotonic_count',
'indices.indexing.delete_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indexing.delete.current': ('gauge', 'indices.indexing.delete_current'),
'elasticsearch.get.total': ('gauge', 'indices.get.total'),
'elasticsearch.get.total.count': ('monotonic_count', 'indices.get.total'),
'elasticsearch.get.time': ('gauge', 'indices.get.time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.get.time.count': ('monotonic_count', 'indices.get.time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.get.current': ('gauge', 'indices.get.current'),
'elasticsearch.get.exists.total': ('gauge', 'indices.get.exists_total'),
'elasticsearch.get.exists.total.count': ('monotonic_count', 'indices.get.exists_total'),
'elasticsearch.get.exists.time': ('gauge', 'indices.get.exists_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.get.exists.time.count': (
'monotonic_count',
'indices.get.exists_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.get.missing.total': ('gauge', 'indices.get.missing_total'),
'elasticsearch.get.missing.total.count': ('monotonic_count', 'indices.get.missing_total'),
'elasticsearch.get.missing.time': ('gauge', 'indices.get.missing_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.get.missing.time.count': (
'monotonic_count',
'indices.get.missing_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.search.query.total': ('gauge', 'indices.search.query_total'),
'elasticsearch.search.query.total.count': ('monotonic_count', 'indices.search.query_total'),
'elasticsearch.search.query.time': ('gauge', 'indices.search.query_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.search.query.time.count': (
'monotonic_count',
'indices.search.query_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.search.query.current': ('gauge', 'indices.search.query_current'),
'elasticsearch.search.fetch.total': ('gauge', 'indices.search.fetch_total'),
'elasticsearch.search.fetch.total.count': ('monotonic_count', 'indices.search.fetch_total'),
'elasticsearch.search.fetch.time': ('gauge', 'indices.search.fetch_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.search.fetch.time.count': (
'monotonic_count',
'indices.search.fetch_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.search.fetch.current': ('gauge', 'indices.search.fetch_current'),
'elasticsearch.indices.segments.count': ('gauge', 'indices.segments.count'),
'elasticsearch.indices.segments.memory_in_bytes': ('gauge', 'indices.segments.memory_in_bytes'),
'elasticsearch.merges.current': ('gauge', 'indices.merges.current'),
'elasticsearch.merges.current.docs': ('gauge', 'indices.merges.current_docs'),
'elasticsearch.merges.current.size': ('gauge', 'indices.merges.current_size_in_bytes'),
'elasticsearch.merges.total': ('gauge', 'indices.merges.total'),
'elasticsearch.merges.total.count': ('monotonic_count', 'indices.merges.total'),
'elasticsearch.merges.total.time': ('gauge', 'indices.merges.total_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.merges.total.time.count': (
'monotonic_count',
'indices.merges.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.merges.total.docs': ('gauge', 'indices.merges.total_docs'),
'elasticsearch.merges.total.docs.count': ('monotonic_count', 'indices.merges.total_docs'),
'elasticsearch.merges.total.size': ('gauge', 'indices.merges.total_size_in_bytes'),
'elasticsearch.merges.total.size.count': ('monotonic_count', 'indices.merges.total_size_in_bytes'),
'elasticsearch.refresh.total': ('gauge', 'indices.refresh.total'),
'elasticsearch.refresh.total.count': ('monotonic_count', 'indices.refresh.total'),
'elasticsearch.refresh.total.time': ('gauge', 'indices.refresh.total_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.refresh.total.time.count': (
'monotonic_count',
'indices.refresh.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.flush.total': ('gauge', 'indices.flush.total'),
'elasticsearch.flush.total.count': ('monotonic_count', 'indices.flush.total'),
'elasticsearch.flush.total.time': ('gauge', 'indices.flush.total_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.flush.total.time.count': (
'monotonic_count',
'indices.flush.total_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.process.open_fd': ('gauge', 'process.open_file_descriptors'),
'elasticsearch.transport.rx_count': ('gauge', 'transport.rx_count'),
'elasticsearch.transport.rx_count.count': ('monotonic_count', 'transport.rx_count'),
'elasticsearch.transport.tx_count': ('gauge', 'transport.tx_count'),
'elasticsearch.transport.tx_count.count': ('monotonic_count', 'transport.tx_count'),
'elasticsearch.transport.rx_size': ('gauge', 'transport.rx_size_in_bytes'),
'elasticsearch.transport.rx_size.count': ('monotonic_count', 'transport.rx_size_in_bytes'),
'elasticsearch.transport.tx_size': ('gauge', 'transport.tx_size_in_bytes'),
'elasticsearch.transport.tx_size.count': ('monotonic_count', 'transport.tx_size_in_bytes'),
'elasticsearch.transport.server_open': ('gauge', 'transport.server_open'),
'elasticsearch.thread_pool.flush.active': ('gauge', 'thread_pool.flush.active'),
'elasticsearch.thread_pool.flush.threads': ('gauge', 'thread_pool.flush.threads'),
'elasticsearch.thread_pool.flush.threads.count': ('monotonic_count', 'thread_pool.flush.threads'),
'elasticsearch.thread_pool.flush.queue': ('gauge', 'thread_pool.flush.queue'),
'elasticsearch.thread_pool.flush.rejected': ('rate', 'thread_pool.flush.rejected'),
'elasticsearch.thread_pool.flush.rejected.count': ('monotonic_count', 'thread_pool.flush.rejected'),
'elasticsearch.thread_pool.flush.completed': ('gauge', 'thread_pool.flush.completed'),
'elasticsearch.thread_pool.flush.completed.count': ('monotonic_count', 'thread_pool.flush.completed'),
'elasticsearch.thread_pool.generic.active': ('gauge', 'thread_pool.generic.active'),
'elasticsearch.thread_pool.generic.threads': ('gauge', 'thread_pool.generic.threads'),
'elasticsearch.thread_pool.generic.threads.count': ('monotonic_count', 'thread_pool.generic.threads'),
'elasticsearch.thread_pool.generic.queue': ('gauge', 'thread_pool.generic.queue'),
'elasticsearch.thread_pool.generic.rejected': ('rate', 'thread_pool.generic.rejected'),
'elasticsearch.thread_pool.generic.rejected.count': ('monotonic_count', 'thread_pool.generic.rejected'),
'elasticsearch.thread_pool.generic.completed': ('gauge', 'thread_pool.generic.completed'),
'elasticsearch.thread_pool.generic.completed.count': ('monotonic_count', 'thread_pool.generic.completed'),
'elasticsearch.thread_pool.get.active': ('gauge', 'thread_pool.get.active'),
'elasticsearch.thread_pool.get.threads': ('gauge', 'thread_pool.get.threads'),
'elasticsearch.thread_pool.get.threads.count': ('monotonic_count', 'thread_pool.get.threads'),
'elasticsearch.thread_pool.get.queue': ('gauge', 'thread_pool.get.queue'),
'elasticsearch.thread_pool.get.rejected': ('rate', 'thread_pool.get.rejected'),
'elasticsearch.thread_pool.get.rejected.count': ('monotonic_count', 'thread_pool.get.rejected'),
'elasticsearch.thread_pool.get.completed': ('gauge', 'thread_pool.get.completed'),
'elasticsearch.thread_pool.get.completed.count': ('monotonic_count', 'thread_pool.get.completed'),
'elasticsearch.thread_pool.management.active': ('gauge', 'thread_pool.management.active'),
'elasticsearch.thread_pool.management.threads': ('gauge', 'thread_pool.management.threads'),
'elasticsearch.thread_pool.management.threads.count': ('monotonic_count', 'thread_pool.management.threads'),
'elasticsearch.thread_pool.management.queue': ('gauge', 'thread_pool.management.queue'),
'elasticsearch.thread_pool.management.rejected': ('rate', 'thread_pool.management.rejected'),
'elasticsearch.thread_pool.management.rejected.count': ('monotonic_count', 'thread_pool.management.rejected'),
'elasticsearch.thread_pool.management.completed': ('gauge', 'thread_pool.management.completed'),
'elasticsearch.thread_pool.management.completed.count': ('monotonic_count', 'thread_pool.management.completed'),
'elasticsearch.thread_pool.refresh.active': ('gauge', 'thread_pool.refresh.active'),
'elasticsearch.thread_pool.refresh.threads': ('gauge', 'thread_pool.refresh.threads'),
'elasticsearch.thread_pool.refresh.threads.count': ('monotonic_count', 'thread_pool.refresh.threads'),
'elasticsearch.thread_pool.refresh.queue': ('gauge', 'thread_pool.refresh.queue'),
'elasticsearch.thread_pool.refresh.rejected': ('rate', 'thread_pool.refresh.rejected'),
'elasticsearch.thread_pool.refresh.rejected.count': ('monotonic_count', 'thread_pool.refresh.rejected'),
'elasticsearch.thread_pool.refresh.completed': ('gauge', 'thread_pool.refresh.completed'),
'elasticsearch.thread_pool.refresh.completed.count': ('monotonic_count', 'thread_pool.refresh.completed'),
'elasticsearch.thread_pool.search.active': ('gauge', 'thread_pool.search.active'),
'elasticsearch.thread_pool.search.threads': ('gauge', 'thread_pool.search.threads'),
'elasticsearch.thread_pool.search.threads.count': ('monotonic_count', 'thread_pool.search.threads'),
'elasticsearch.thread_pool.search.queue': ('gauge', 'thread_pool.search.queue'),
'elasticsearch.thread_pool.search.rejected': ('rate', 'thread_pool.search.rejected'),
'elasticsearch.thread_pool.search.rejected.count': ('monotonic_count', 'thread_pool.search.rejected'),
'elasticsearch.thread_pool.search.completed': ('gauge', 'thread_pool.search.completed'),
'elasticsearch.thread_pool.search.completed.count': ('monotonic_count', 'thread_pool.search.completed'),
'elasticsearch.thread_pool.snapshot.active': ('gauge', 'thread_pool.snapshot.active'),
'elasticsearch.thread_pool.snapshot.threads': ('gauge', 'thread_pool.snapshot.threads'),
'elasticsearch.thread_pool.snapshot.threads.count': ('monotonic_count', 'thread_pool.snapshot.threads'),
'elasticsearch.thread_pool.snapshot.queue': ('gauge', 'thread_pool.snapshot.queue'),
'elasticsearch.thread_pool.snapshot.rejected': ('rate', 'thread_pool.snapshot.rejected'),
'elasticsearch.thread_pool.snapshot.rejected.count': ('monotonic_count', 'thread_pool.snapshot.rejected'),
'elasticsearch.thread_pool.snapshot.completed': ('gauge', 'thread_pool.snapshot.completed'),
'elasticsearch.thread_pool.snapshot.completed.count': ('monotonic_count', 'thread_pool.snapshot.completed'),
'elasticsearch.thread_pool.warmer.active': ('gauge', 'thread_pool.warmer.active'),
'elasticsearch.thread_pool.warmer.threads': ('gauge', 'thread_pool.warmer.threads'),
'elasticsearch.thread_pool.warmer.queue': ('gauge', 'thread_pool.warmer.queue'),
'elasticsearch.thread_pool.warmer.rejected': ('rate', 'thread_pool.warmer.rejected'),
'elasticsearch.thread_pool.warmer.completed': ('gauge', 'thread_pool.warmer.completed'),
'elasticsearch.http.current_open': ('gauge', 'http.current_open'),
'elasticsearch.http.total_opened': ('gauge', 'http.total_opened'),
'elasticsearch.http.total_opened.count': ('monotonic_count', 'http.total_opened'),
'jvm.mem.heap_committed': ('gauge', 'jvm.mem.heap_committed_in_bytes'),
'jvm.mem.heap_used': ('gauge', 'jvm.mem.heap_used_in_bytes'),
'jvm.mem.heap_in_use': ('gauge', 'jvm.mem.heap_used_percent'),
'jvm.mem.heap_max': ('gauge', 'jvm.mem.heap_max_in_bytes'),
'jvm.mem.non_heap_committed': ('gauge', 'jvm.mem.non_heap_committed_in_bytes'),
'jvm.mem.non_heap_used': ('gauge', 'jvm.mem.non_heap_used_in_bytes'),
'jvm.mem.pools.young.used': ('gauge', 'jvm.mem.pools.young.used_in_bytes'),
'jvm.mem.pools.young.max': ('gauge', 'jvm.mem.pools.young.max_in_bytes'),
'jvm.mem.pools.old.used': ('gauge', 'jvm.mem.pools.old.used_in_bytes'),
'jvm.mem.pools.old.max': ('gauge', 'jvm.mem.pools.old.max_in_bytes'),
'jvm.mem.pools.survivor.used': ('gauge', 'jvm.mem.pools.survivor.used_in_bytes'),
'jvm.mem.pools.survivor.max': ('gauge', 'jvm.mem.pools.survivor.max_in_bytes'),
'jvm.threads.count': ('gauge', 'jvm.threads.count'),
'jvm.threads.peak_count': ('gauge', 'jvm.threads.peak_count'),
'elasticsearch.fs.total.total_in_bytes': ('gauge', 'fs.total.total_in_bytes'),
'elasticsearch.fs.total.free_in_bytes': ('gauge', 'fs.total.free_in_bytes'),
'elasticsearch.fs.total.available_in_bytes': ('gauge', 'fs.total.available_in_bytes'),
}
ADDITIONAL_METRICS_POST_7_9_0 = {
'elasticsearch.indexing_pressure.memory.current.coordinating_in_bytes': (
'gauge',
'indexing_pressure.memory.current.coordinating_in_bytes',
),
'elasticsearch.indexing_pressure.memory.current.primary_in_bytes': (
'gauge',
'indexing_pressure.memory.current.primary_in_bytes',
),
'elasticsearch.indexing_pressure.memory.current.replica_in_bytes': (
'gauge',
'indexing_pressure.memory.current.replica_in_bytes',
),
}
ADDITIONAL_METRICS_POST_7_2_0 = {
'elasticsearch.refresh.external.total': ('gauge', 'indices.refresh.external_total'),
'elasticsearch.refresh.external.total.time': (
'gauge',
'indices.refresh.external_total_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
ADDITIONAL_METRICS_PRE_7_0_0 = {
'elasticsearch.thread_pool.index.active': ('gauge', 'thread_pool.index.active'),
'elasticsearch.thread_pool.index.queue': ('gauge', 'thread_pool.index.queue'),
'elasticsearch.thread_pool.index.threads': ('gauge', 'thread_pool.index.threads'),
'elasticsearch.thread_pool.index.threads.count': ('monotonic_count', 'thread_pool.index.threads'),
'elasticsearch.thread_pool.index.rejected': ('rate', 'thread_pool.index.rejected'),
'elasticsearch.thread_pool.index.rejected.count': ('monotonic_count', 'thread_pool.index.rejected'),
'elasticsearch.thread_pool.index.completed': ('gauge', 'thread_pool.index.completed'),
'elasticsearch.thread_pool.index.completed.count': ('monotonic_count', 'thread_pool.index.completed'),
}
ADDITIONAL_METRICS_PRE_5_0_0 = {
'elasticsearch.thread_pool.percolate.active': ('gauge', 'thread_pool.percolate.active'),
'elasticsearch.thread_pool.percolate.threads': ('gauge', 'thread_pool.percolate.threads'),
'elasticsearch.thread_pool.percolate.queue': ('gauge', 'thread_pool.percolate.queue'),
'elasticsearch.thread_pool.percolate.rejected': ('rate', 'thread_pool.percolate.rejected'),
'elasticsearch.thread_pool.suggest.active': ('gauge', 'thread_pool.suggest.active'),
'elasticsearch.thread_pool.suggest.threads': ('gauge', 'thread_pool.suggest.threads'),
'elasticsearch.thread_pool.suggest.queue': ('gauge', 'thread_pool.suggest.queue'),
'elasticsearch.thread_pool.suggest.rejected': ('rate', 'thread_pool.suggest.rejected'),
}
# Metrics for index level
INDEX_STATS_METRICS = {
'elasticsearch.index.health': ('gauge', 'health'),
'elasticsearch.index.health.reverse': ('gauge', 'health_reverse'),
'elasticsearch.index.docs.count': ('gauge', 'docs_count'),
'elasticsearch.index.docs.deleted': ('gauge', 'docs_deleted'),
'elasticsearch.index.primary_shards': ('gauge', 'primary_shards'),
'elasticsearch.index.replica_shards': ('gauge', 'replica_shards'),
'elasticsearch.index.primary_store_size': ('gauge', 'primary_store_size'),
'elasticsearch.index.store_size': ('gauge', 'store_size'),
}
JVM_METRICS_POST_0_90_10 = {
'jvm.gc.collectors.young.count': ('gauge', 'jvm.gc.collectors.young.collection_count'),
'jvm.gc.collectors.young.collection_time': (
'gauge',
'jvm.gc.collectors.young.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
'jvm.gc.collectors.old.count': ('gauge', 'jvm.gc.collectors.old.collection_count'),
'jvm.gc.collectors.old.collection_time': (
'gauge',
'jvm.gc.collectors.old.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
JVM_METRICS_RATE = {
# Submit metrics as rate
'jvm.gc.collectors.young.rate': ('rate', 'jvm.gc.collectors.young.collection_count'),
'jvm.gc.collectors.young.collection_time.rate': (
'rate',
'jvm.gc.collectors.young.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
'jvm.gc.collectors.old.rate': ('rate', 'jvm.gc.collectors.old.collection_count'),
'jvm.gc.collectors.old.collection_time.rate': (
'rate',
'jvm.gc.collectors.old.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
JVM_METRICS_PRE_0_90_10 = {
'jvm.gc.concurrent_mark_sweep.count': ('gauge', 'jvm.gc.collectors.ConcurrentMarkSweep.collection_count'),
'jvm.gc.concurrent_mark_sweep.collection_time': (
'gauge',
'jvm.gc.collectors.ConcurrentMarkSweep.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
'jvm.gc.par_new.count': ('gauge', 'jvm.gc.collectors.ParNew.collection_count'),
'jvm.gc.par_new.collection_time': (
'gauge',
'jvm.gc.collectors.ParNew.collection_time_in_millis',
lambda ms: ms_to_second(ms),
),
'jvm.gc.collection_count': ('gauge', 'jvm.gc.collection_count'),
'jvm.gc.collection_time': ('gauge', 'jvm.gc.collection_time_in_millis', lambda ms: ms_to_second(ms)),
}
ADDITIONAL_METRICS_POST_0_90_5 = {
'elasticsearch.search.fetch.open_contexts': ('gauge', 'indices.search.open_contexts'),
'elasticsearch.fielddata.size': ('gauge', 'indices.fielddata.memory_size_in_bytes'),
'elasticsearch.fielddata.evictions': ('gauge', 'indices.fielddata.evictions'),
'elasticsearch.fielddata.evictions.count': ('monotonic_count', 'indices.fielddata.evictions'),
}
ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0 = {
'elasticsearch.cache.filter.evictions': ('gauge', 'indices.filter_cache.evictions'),
'elasticsearch.cache.filter.evictions.count': ('monotonic_count', 'indices.filter_cache.evictions'),
'elasticsearch.cache.filter.size': ('gauge', 'indices.filter_cache.memory_size_in_bytes'),
'elasticsearch.id_cache.size': ('gauge', 'indices.id_cache.memory_size_in_bytes'),
}
ADDITIONAL_METRICS_PRE_0_90_5 = {
'elasticsearch.cache.field.evictions': ('gauge', 'indices.cache.field_evictions'),
'elasticsearch.cache.field.size': ('gauge', 'indices.cache.field_size_in_bytes'),
'elasticsearch.cache.filter.count': ('gauge', 'indices.cache.filter_count'),
'elasticsearch.cache.filter.evictions': ('gauge', 'indices.cache.filter_evictions'),
'elasticsearch.cache.filter.size': ('gauge', 'indices.cache.filter_size_in_bytes'),
}
ADDITIONAL_METRICS_POST_1_0_0 = {
'elasticsearch.indices.translog.size_in_bytes': ('gauge', 'indices.translog.size_in_bytes'),
'elasticsearch.indices.translog.operations': ('gauge', 'indices.translog.operations'),
}
# Stats are only valid for v1.x
ADDITIONAL_METRICS_1_x = {
'elasticsearch.fs.total.disk_reads': ('rate', 'fs.total.disk_reads'),
'elasticsearch.fs.total.disk_writes': ('rate', 'fs.total.disk_writes'),
'elasticsearch.fs.total.disk_io_op': ('rate', 'fs.total.disk_io_op'),
'elasticsearch.fs.total.disk_read_size_in_bytes': ('gauge', 'fs.total.disk_read_size_in_bytes'),
'elasticsearch.fs.total.disk_write_size_in_bytes': ('gauge', 'fs.total.disk_write_size_in_bytes'),
'elasticsearch.fs.total.disk_io_size_in_bytes': ('gauge', 'fs.total.disk_io_size_in_bytes'),
}
ADDITIONAL_METRICS_POST_1_3_0 = {
'elasticsearch.indices.segments.index_writer_memory_in_bytes': (
'gauge',
'indices.segments.index_writer_memory_in_bytes',
),
'elasticsearch.indices.segments.version_map_memory_in_bytes': (
'gauge',
'indices.segments.version_map_memory_in_bytes',
),
}
ADDITIONAL_METRICS_POST_1_4_0 = {
'elasticsearch.indices.indexing.throttle_time': (
'rate',
'indices.indexing.throttle_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indices.indexing.throttle_time.count': (
'monotonic_count',
'indices.indexing.throttle_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indices.query_cache.memory_size_in_bytes': ('gauge', 'indices.query_cache.memory_size_in_bytes'),
'elasticsearch.indices.query_cache.hit_count': ('rate', 'indices.query_cache.hit_count'),
'elasticsearch.indices.query_cache.hit_count.count': ('monotonic_count', 'indices.query_cache.hit_count'),
'elasticsearch.indices.query_cache.miss_count': ('rate', 'indices.query_cache.miss_count'),
'elasticsearch.indices.query_cache.miss_count.total': ('monotonic_count', 'indices.query_cache.miss_count'),
'elasticsearch.indices.query_cache.evictions': ('rate', 'indices.query_cache.evictions'),
'elasticsearch.indices.query_cache.evictions.count': ('monotonic_count', 'indices.query_cache.evictions'),
'elasticsearch.indices.segments.index_writer_max_memory_in_bytes': (
'gauge',
'indices.segments.index_writer_max_memory_in_bytes',
),
'elasticsearch.indices.segments.fixed_bit_set_memory_in_bytes': (
'gauge',
'indices.segments.fixed_bit_set_memory_in_bytes',
),
'elasticsearch.breakers.fielddata.estimated_size_in_bytes': ('gauge', 'breakers.fielddata.estimated_size_in_bytes'),
'elasticsearch.breakers.fielddata.overhead': ('gauge', 'breakers.fielddata.overhead'),
'elasticsearch.breakers.fielddata.tripped': ('rate', 'breakers.fielddata.tripped'),
'elasticsearch.breakers.parent.estimated_size_in_bytes': ('gauge', 'breakers.parent.estimated_size_in_bytes'),
'elasticsearch.breakers.parent.overhead': ('gauge', 'breakers.parent.overhead'),
'elasticsearch.breakers.parent.tripped': ('rate', 'breakers.parent.tripped'),
'elasticsearch.breakers.request.estimated_size_in_bytes': ('gauge', 'breakers.request.estimated_size_in_bytes'),
'elasticsearch.breakers.request.overhead': ('gauge', 'breakers.request.overhead'),
'elasticsearch.breakers.request.tripped': ('rate', 'breakers.request.tripped'),
'elasticsearch.thread_pool.listener.active': ('gauge', 'thread_pool.listener.active'),
'elasticsearch.thread_pool.listener.threads': ('gauge', 'thread_pool.listener.threads'),
'elasticsearch.thread_pool.listener.threads.count': ('monotonic_count', 'thread_pool.listener.threads'),
'elasticsearch.thread_pool.listener.queue': ('gauge', 'thread_pool.listener.queue'),
'elasticsearch.thread_pool.listener.rejected': ('rate', 'thread_pool.listener.rejected'),
'elasticsearch.thread_pool.listener.rejected.count': ('monotonic_count', 'thread_pool.listener.rejected'),
}
ADDITIONAL_METRICS_POST_1_5_0 = {
'elasticsearch.indices.recovery.current_as_source': ('gauge', 'indices.recovery.current_as_source'),
'elasticsearch.indices.recovery.current_as_target': ('gauge', 'indices.recovery.current_as_target'),
'elasticsearch.indices.recovery.throttle_time': (
'rate',
'indices.recovery.throttle_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.indices.recovery.throttle_time.count': (
'monotonic_count',
'indices.recovery.throttle_time_in_millis',
lambda ms: ms_to_second(ms),
),
}
ADDITIONAL_METRICS_POST_1_6_0 = {
'elasticsearch.thread_pool.fetch_shard_started.active': ('gauge', 'thread_pool.fetch_shard_started.active'),
'elasticsearch.thread_pool.fetch_shard_started.threads': ('gauge', 'thread_pool.fetch_shard_started.threads'),
'elasticsearch.thread_pool.fetch_shard_started.queue': ('gauge', 'thread_pool.fetch_shard_started.queue'),
'elasticsearch.thread_pool.fetch_shard_started.rejected': ('rate', 'thread_pool.fetch_shard_started.rejected'),
'elasticsearch.thread_pool.fetch_shard_store.active': ('gauge', 'thread_pool.fetch_shard_store.active'),
'elasticsearch.thread_pool.fetch_shard_store.threads': ('gauge', 'thread_pool.fetch_shard_store.threads'),
'elasticsearch.thread_pool.fetch_shard_store.queue': ('gauge', 'thread_pool.fetch_shard_store.queue'),
'elasticsearch.thread_pool.fetch_shard_store.rejected': ('rate', 'thread_pool.fetch_shard_store.rejected'),
}
ADDITIONAL_METRICS_PRE_2_0 = {
'elasticsearch.thread_pool.merge.active': ('gauge', 'thread_pool.merge.active'),
'elasticsearch.thread_pool.merge.threads': ('gauge', 'thread_pool.merge.threads'),
'elasticsearch.thread_pool.merge.queue': ('gauge', 'thread_pool.merge.queue'),
'elasticsearch.thread_pool.merge.rejected': ('rate', 'thread_pool.merge.rejected'),
}
ADDITIONAL_METRICS_POST_2_0 = {
# Some of these may very well exist in previous ES versions, but not worth the time/effort
# to find where they were introduced
'elasticsearch.indices.query_cache.cache_size': ('gauge', 'indices.query_cache.cache_size'),
'elasticsearch.indices.query_cache.cache_count': ('rate', 'indices.query_cache.cache_count'),
'elasticsearch.indices.query_cache.total_count': ('rate', 'indices.query_cache.total_count'),
'elasticsearch.indices.segments.doc_values_memory_in_bytes': (
'gauge',
'indices.segments.doc_values_memory_in_bytes',
),
'elasticsearch.indices.segments.norms_memory_in_bytes': ('gauge', 'indices.segments.norms_memory_in_bytes'),
'elasticsearch.indices.segments.stored_fields_memory_in_bytes': (
'gauge',
'indices.segments.stored_fields_memory_in_bytes',
),
'elasticsearch.indices.segments.term_vectors_memory_in_bytes': (
'gauge',
'indices.segments.term_vectors_memory_in_bytes',
),
'elasticsearch.indices.segments.terms_memory_in_bytes': ('gauge', 'indices.segments.terms_memory_in_bytes'),
'elasticsearch.indices.request_cache.memory_size_in_bytes': ('gauge', 'indices.request_cache.memory_size_in_bytes'),
'elasticsearch.indices.request_cache.evictions': ('rate', 'indices.request_cache.evictions'),
'elasticsearch.indices.request_cache.evictions.count': ('monotonic_count', 'indices.request_cache.evictions'),
'elasticsearch.indices.request_cache.hit_count': ('rate', 'indices.request_cache.hit_count'),
'elasticsearch.indices.request_cache.miss_count': ('rate', 'indices.request_cache.miss_count'),
'elasticsearch.indices.request_cache.miss_count.count': ('monotonic_count', 'indices.request_cache.miss_count'),
}
ADDITIONAL_METRICS_POST_2_1 = {
'elasticsearch.indices.indexing.index_failed': ('rate', 'indices.indexing.index_failed'),
'elasticsearch.thread_pool.force_merge.active': ('gauge', 'thread_pool.force_merge.active'),
'elasticsearch.thread_pool.force_merge.threads': ('gauge', 'thread_pool.force_merge.threads'),
'elasticsearch.thread_pool.force_merge.queue': ('gauge', 'thread_pool.force_merge.queue'),
'elasticsearch.thread_pool.force_merge.rejected': ('rate', 'thread_pool.force_merge.rejected'),
}
ADDITIONAL_METRICS_5_x = {
'elasticsearch.fs.total.disk_io_op': ('rate', 'fs.io_stats.total.operations'),
'elasticsearch.fs.total.disk_reads': ('rate', 'fs.io_stats.total.read_operations'),
'elasticsearch.fs.total.disk_writes': ('rate', 'fs.io_stats.total.write_operations'),
'elasticsearch.fs.total.disk_read_size_in_bytes': ('gauge', 'fs.io_stats.total.read_kilobytes'),
'elasticsearch.fs.total.disk_write_size_in_bytes': ('gauge', 'fs.io_stats.total.write_kilobytes'),
'elasticsearch.breakers.inflight_requests.tripped': ('gauge', 'breakers.in_flight_requests.tripped'),
'elasticsearch.breakers.inflight_requests.overhead': ('gauge', 'breakers.in_flight_requests.overhead'),
'elasticsearch.breakers.inflight_requests.estimated_size_in_bytes': (
'gauge',
'breakers.in_flight_requests.estimated_size_in_bytes',
),
'elasticsearch.search.scroll.total': ('gauge', 'indices.search.scroll_total'),
'elasticsearch.search.scroll.total.count': ('monotonic_count', 'indices.search.scroll_total'),
'elasticsearch.search.scroll.time': ('gauge', 'indices.search.scroll_time_in_millis', lambda ms: ms_to_second(ms)),
'elasticsearch.search.scroll.time.count': (
'monotonic_count',
'indices.search.scroll_time_in_millis',
lambda ms: ms_to_second(ms),
),
'elasticsearch.search.scroll.current': ('gauge', 'indices.search.scroll_current'),
}
ADDITIONAL_METRICS_PRE_6_3 = {
'elasticsearch.thread_pool.bulk.active': ('gauge', 'thread_pool.bulk.active'),
'elasticsearch.thread_pool.bulk.threads': ('gauge', 'thread_pool.bulk.threads'),
'elasticsearch.thread_pool.bulk.threads.count': ('monotonic_count', 'thread_pool.bulk.threads'),
'elasticsearch.thread_pool.bulk.queue': ('gauge', 'thread_pool.bulk.queue'),
'elasticsearch.thread_pool.bulk.rejected': ('rate', 'thread_pool.bulk.rejected'),
'elasticsearch.thread_pool.bulk.rejected.count': ('monotonic_count', 'thread_pool.bulk.rejected'),
'elasticsearch.thread_pool.bulk.completed': ('rate', 'thread_pool.bulk.completed'),
'elasticsearch.thread_pool.bulk.completed.count': ('monotonic_count', 'thread_pool.bulk.completed'),
}
ADDITIONAL_METRICS_POST_6_3 = {
'elasticsearch.thread_pool.write.active': ('gauge', 'thread_pool.write.active'),
'elasticsearch.thread_pool.write.threads': ('gauge', 'thread_pool.write.threads'),
'elasticsearch.thread_pool.write.threads.count': ('monotonic_count', 'thread_pool.write.threads'),
'elasticsearch.thread_pool.write.queue': ('gauge', 'thread_pool.write.queue'),
'elasticsearch.thread_pool.write.rejected': ('rate', 'thread_pool.write.rejected'),
'elasticsearch.thread_pool.write.rejected.count': ('monotonic_count', 'thread_pool.write.rejected'),
'elasticsearch.thread_pool.write.completed': ('rate', 'thread_pool.write.completed'),
'elasticsearch.thread_pool.write.completed.count': ('monotonic_count', 'thread_pool.write.completed'),
}
CLUSTER_HEALTH_METRICS = {
'elasticsearch.number_of_nodes': ('gauge', 'number_of_nodes'),
'elasticsearch.number_of_data_nodes': ('gauge', 'number_of_data_nodes'),
'elasticsearch.active_primary_shards': ('gauge', 'active_primary_shards'),
'elasticsearch.active_shards': ('gauge', 'active_shards'),
'elasticsearch.relocating_shards': ('gauge', 'relocating_shards'),
'elasticsearch.initializing_shards': ('gauge', 'initializing_shards'),
'elasticsearch.unassigned_shards': ('gauge', 'unassigned_shards'),
'elasticsearch.cluster_status': ('gauge', 'status', lambda v: {'red': 0, 'yellow': 1, 'green': 2}.get(v, -1)),
}
CLUSTER_HEALTH_METRICS_POST_2_4 = {'elasticsearch.delayed_unassigned_shards': ('gauge', 'delayed_unassigned_shards')}
CLUSTER_PENDING_TASKS = {
'elasticsearch.pending_tasks_total': ('gauge', 'pending_task_total'),
'elasticsearch.pending_tasks_priority_high': ('gauge', 'pending_tasks_priority_high'),
'elasticsearch.pending_tasks_priority_urgent': ('gauge', 'pending_tasks_priority_urgent'),
'elasticsearch.pending_tasks_time_in_queue': ('gauge', 'pending_tasks_time_in_queue'),
}
SLM_POLICY_METRICS = {
'elasticsearch.slm.snapshot_deletion_failures': ('gauge', 'stats.snapshot_deletion_failures'),
'elasticsearch.slm.snapshots_deleted': ('gauge', 'stats.snapshots_deleted'),
'elasticsearch.slm.snapshots_failed': ('gauge', 'stats.snapshots_failed'),
'elasticsearch.slm.snapshots_taken': ('gauge', 'stats.snapshots_taken'),
}
NODE_SYSTEM_METRICS = {
'system.mem.free': ('gauge', 'os.mem.free_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.mem.usable': ('gauge', 'os.mem.free_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.mem.used': ('gauge', 'os.mem.used_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.swap.free': ('gauge', 'os.swap.free_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.swap.used': ('gauge', 'os.swap.used_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.net.bytes_rcvd': ('gauge', 'transport.rx_size_in_bytes'),
'system.net.bytes_sent': ('gauge', 'transport.tx_size_in_bytes'),
}
NODE_SYSTEM_METRICS_POST_1 = {
'system.mem.total': ('gauge', 'os.mem.total_in_bytes', lambda b: byte_to_mebibyte(b)),
'system.swap.total': ('gauge', 'os.swap.total_in_bytes', lambda b: byte_to_mebibyte(b)),
}
NODE_SYSTEM_METRICS_POST_5 = {
'system.cpu.idle': ('gauge', 'os.cpu.percent', lambda v: (100 - v)),
'system.load.1': ('gauge', 'os.cpu.load_average.1m'),
'system.load.5': ('gauge', 'os.cpu.load_average.5m'),
'system.load.15': ('gauge', 'os.cpu.load_average.15m'),
'elasticsearch.cgroup.cpu.stat.number_of_elapsed_periods': (
'gauge',
'os.cgroup.cpu.stat.number_of_elapsed_periods',
),
'elasticsearch.cgroup.cpu.stat.number_of_times_throttled': (
'gauge',
'os.cgroup.cpu.stat.number_of_times_throttled',
),
'elasticsearch.process.cpu.percent': ('gauge', 'process.cpu.percent'),
}
CAT_ALLOCATION_METRICS = {
'elasticsearch.shards': ('gauge', 'shards'),
'elasticsearch.disk.indices': ('gauge', 'disk_indices'),
'elasticsearch.disk.used': ('gauge', 'disk_used'),
'elasticsearch.disk.avail': ('gauge', 'disk_avail'),
'elasticsearch.disk.total': ('gauge', 'disk_total'),
'elasticsearch.disk.percent': ('gauge', 'disk_percent'),
}
def stats_for_version(version, jvm_rate=False):
"""
Get the proper set of stats metrics for the specified ES version
"""
metrics = dict(STATS_METRICS)
# JVM additional metrics
if version >= [0, 90, 10]:
metrics.update(JVM_METRICS_POST_0_90_10)
if jvm_rate:
metrics.update(JVM_METRICS_RATE)
else:
metrics.update(JVM_METRICS_PRE_0_90_10)
# Additional Stats metrics
if version >= [0, 90, 5]:
metrics.update(ADDITIONAL_METRICS_POST_0_90_5)
else:
metrics.update(ADDITIONAL_METRICS_PRE_0_90_5)
if version >= [1, 0, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_0_0)
if version < [2, 0, 0]:
metrics.update(ADDITIONAL_METRICS_PRE_2_0)
if version >= [0, 90, 5]:
metrics.update(ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0)
if version >= [1, 0, 0]:
metrics.update(ADDITIONAL_METRICS_1_x)
if version >= [1, 3, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_3_0)
if version >= [1, 4, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_4_0)
if version >= [1, 5, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_5_0)
if version >= [1, 6, 0]:
metrics.update(ADDITIONAL_METRICS_POST_1_6_0)
if version >= [2, 0, 0]:
metrics.update(ADDITIONAL_METRICS_POST_2_0)
if version >= [2, 1, 0]:
metrics.update(ADDITIONAL_METRICS_POST_2_1)
if version >= [5, 0, 0]:
metrics.update(ADDITIONAL_METRICS_5_x)
if version < [5, 0, 0]:
metrics.update(ADDITIONAL_METRICS_PRE_5_0_0)
if version >= [6, 3, 0]:
metrics.update(ADDITIONAL_METRICS_POST_6_3)
else:
metrics.update(ADDITIONAL_METRICS_PRE_6_3)
if version < [7, 0, 0]:
metrics.update(ADDITIONAL_METRICS_PRE_7_0_0)
if version >= [7, 2, 0]:
metrics.update(ADDITIONAL_METRICS_POST_7_2_0)
if version >= [7, 9, 0]:
metrics.update(ADDITIONAL_METRICS_POST_7_9_0)
return metrics
def pshard_stats_for_version(version):
"""
Get the proper set of pshard metrics for the specified ES version
"""
pshard_stats_metrics = dict(PRIMARY_SHARD_METRICS)
if version >= [1, 0, 0]:
pshard_stats_metrics.update(PRIMARY_SHARD_METRICS_POST_1_0_0)
if version >= [7, 2, 0]:
pshard_stats_metrics.update(PRIMARY_SHARD_METRICS_POST_7_2_0)
return pshard_stats_metrics
def health_stats_for_version(version):
"""
Get the proper set of health metrics for the specified ES version
"""
cluster_health_metrics = dict(CLUSTER_HEALTH_METRICS)
if version >= [2, 4, 0]:
cluster_health_metrics.update(CLUSTER_HEALTH_METRICS_POST_2_4)
return cluster_health_metrics
def slm_stats_for_version(version):
"""
Get the proper set of SLM metrics for the specified ES version
"""
slm_health_metrics = {}
if version >= [7, 4, 0]:
slm_health_metrics.update(dict(SLM_POLICY_METRICS))
return slm_health_metrics
def index_stats_for_version(version):
"""
Get the proper set of index metrics for the specified ES version
"""
index_stats = {}
if version:
index_stats.update(INDEX_STATS_METRICS)
return index_stats
def node_system_stats_for_version(version):
"""
Get the proper set of os metrics for the specified ES version
"""
node_system_stats = dict(NODE_SYSTEM_METRICS)
if version >= [1, 0, 0]:
node_system_stats.update(NODE_SYSTEM_METRICS_POST_1)
if version >= [5, 0, 0]:
node_system_stats.update(NODE_SYSTEM_METRICS_POST_5)
return node_system_stats
|
en
| 0.747144
|
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) # Metrics definition format is a dictionary mapping: # datadog_metric_name --> (datadog_metric_type, es_metric_name, optional_conversion_func) # Clusterwise metrics, pre aggregated on ES, compatible with all ES versions # Metrics that are common to all Elasticsearch versions # Metrics for index level # Submit metrics as rate # Stats are only valid for v1.x # Some of these may very well exist in previous ES versions, but not worth the time/effort # to find where they were introduced Get the proper set of stats metrics for the specified ES version # JVM additional metrics # Additional Stats metrics Get the proper set of pshard metrics for the specified ES version Get the proper set of health metrics for the specified ES version Get the proper set of SLM metrics for the specified ES version Get the proper set of index metrics for the specified ES version Get the proper set of os metrics for the specified ES version
| 1.855798
| 2
|
penman/interface.py
|
rafaelanchieta/AMR-Aligner
| 2
|
6625967
|
"""
Functions for basic reading and writing of PENMAN graphs.
"""
from typing import Union, Iterable, List
from pathlib import Path
from penman.codec import PENMANCodec
from penman.model import Model
from penman.graph import Graph
from penman.types import (Variable, file_or_filename)
def decode(s: str,
model: Model = None) -> Graph:
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
model: the model used for interpreting the graph
Returns:
the Graph object described by *s*
Example:
>>> from penman.interface import decode
>>> decode('(b / bark-01 :ARG0 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = PENMANCodec(model=model)
return codec.decode(s)
def encode(g: Graph,
top: Variable = None,
model: Model = None,
indent: Union[int, bool] = -1,
compact: bool = False) -> str:
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: if given, the node to use as the top in serialization
model: the model used for interpreting the graph
indent: how to indent formatted strings
compact: if ``True``, put initial attributes on the first line
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> from penman.interface import encode
>>> from penman.graph import Graph
>>> encode(Graph([('h', 'instance', 'hi')]))
'(h / hi)'
"""
codec = PENMANCodec(model=model)
return codec.encode(g,
top=top,
indent=indent,
compact=compact)
def load(source: file_or_filename,
model: Model = None) -> List[Graph]:
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
model: the model used for interpreting the graph
Returns:
a list of Graph objects
"""
codec = PENMANCodec(model=model)
if isinstance(source, (str, Path)):
with open(source) as fh:
return list(codec.iterdecode(fh))
else:
assert hasattr(source, 'read')
return list(codec.iterdecode(source))
def loads(string: str,
model: Model = None) -> List[Graph]:
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
model: the model used for interpreting the graph
Returns:
a list of Graph objects
"""
codec = PENMANCodec(model=model)
return list(codec.iterdecode(string))
def dump(graphs: Iterable[Graph],
file: file_or_filename,
model: Model = None,
indent: Union[int, bool] = -1,
compact: bool = False) -> None:
"""
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
model: the model used for interpreting the graph
indent: how to indent formatted strings
compact: if ``True``, put initial attributes on the first line
"""
codec = PENMANCodec(model=model)
if isinstance(file, (str, Path)):
with open(file, 'w') as fh:
_dump(fh, graphs, codec, indent, compact)
else:
assert hasattr(file, 'write')
_dump(file, graphs, codec, indent, compact)
def _dump(fh, gs, codec, indent, compact):
"""Helper method for dump() for incremental printing."""
ss = (codec.encode(g, indent=indent, compact=compact)
for g in gs)
try:
print(next(ss), file=fh)
except StopIteration:
return
for s in ss:
print(file=fh)
print(s, file=fh)
def dumps(graphs: Iterable[Graph],
model: Model = None,
indent: Union[int, bool] = -1,
compact: bool = False) -> str:
"""
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
model: the model used for interpreting the graph
indent: how to indent formatted strings
compact: if ``True``, put initial attributes on the first line
Returns:
the string of serialized graphs
"""
codec = PENMANCodec(model=model)
strings = [codec.encode(g, indent=indent, compact=compact)
for g in graphs]
return '\n\n'.join(strings)
|
"""
Functions for basic reading and writing of PENMAN graphs.
"""
from typing import Union, Iterable, List
from pathlib import Path
from penman.codec import PENMANCodec
from penman.model import Model
from penman.graph import Graph
from penman.types import (Variable, file_or_filename)
def decode(s: str,
model: Model = None) -> Graph:
"""
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
model: the model used for interpreting the graph
Returns:
the Graph object described by *s*
Example:
>>> from penman.interface import decode
>>> decode('(b / bark-01 :ARG0 (d / dog))')
<Graph object (top=b) at ...>
"""
codec = PENMANCodec(model=model)
return codec.decode(s)
def encode(g: Graph,
top: Variable = None,
model: Model = None,
indent: Union[int, bool] = -1,
compact: bool = False) -> str:
"""
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: if given, the node to use as the top in serialization
model: the model used for interpreting the graph
indent: how to indent formatted strings
compact: if ``True``, put initial attributes on the first line
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> from penman.interface import encode
>>> from penman.graph import Graph
>>> encode(Graph([('h', 'instance', 'hi')]))
'(h / hi)'
"""
codec = PENMANCodec(model=model)
return codec.encode(g,
top=top,
indent=indent,
compact=compact)
def load(source: file_or_filename,
model: Model = None) -> List[Graph]:
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
model: the model used for interpreting the graph
Returns:
a list of Graph objects
"""
codec = PENMANCodec(model=model)
if isinstance(source, (str, Path)):
with open(source) as fh:
return list(codec.iterdecode(fh))
else:
assert hasattr(source, 'read')
return list(codec.iterdecode(source))
def loads(string: str,
model: Model = None) -> List[Graph]:
"""
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
model: the model used for interpreting the graph
Returns:
a list of Graph objects
"""
codec = PENMANCodec(model=model)
return list(codec.iterdecode(string))
def dump(graphs: Iterable[Graph],
file: file_or_filename,
model: Model = None,
indent: Union[int, bool] = -1,
compact: bool = False) -> None:
"""
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
model: the model used for interpreting the graph
indent: how to indent formatted strings
compact: if ``True``, put initial attributes on the first line
"""
codec = PENMANCodec(model=model)
if isinstance(file, (str, Path)):
with open(file, 'w') as fh:
_dump(fh, graphs, codec, indent, compact)
else:
assert hasattr(file, 'write')
_dump(file, graphs, codec, indent, compact)
def _dump(fh, gs, codec, indent, compact):
"""Helper method for dump() for incremental printing."""
ss = (codec.encode(g, indent=indent, compact=compact)
for g in gs)
try:
print(next(ss), file=fh)
except StopIteration:
return
for s in ss:
print(file=fh)
print(s, file=fh)
def dumps(graphs: Iterable[Graph],
model: Model = None,
indent: Union[int, bool] = -1,
compact: bool = False) -> str:
"""
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
model: the model used for interpreting the graph
indent: how to indent formatted strings
compact: if ``True``, put initial attributes on the first line
Returns:
the string of serialized graphs
"""
codec = PENMANCodec(model=model)
strings = [codec.encode(g, indent=indent, compact=compact)
for g in graphs]
return '\n\n'.join(strings)
|
en
| 0.726693
|
Functions for basic reading and writing of PENMAN graphs. Deserialize PENMAN-serialized *s* into its Graph object Args: s: a string containing a single PENMAN-serialized graph model: the model used for interpreting the graph Returns: the Graph object described by *s* Example: >>> from penman.interface import decode >>> decode('(b / bark-01 :ARG0 (d / dog))') <Graph object (top=b) at ...> Serialize the graph *g* from *top* to PENMAN notation. Args: g: the Graph object top: if given, the node to use as the top in serialization model: the model used for interpreting the graph indent: how to indent formatted strings compact: if ``True``, put initial attributes on the first line Returns: the PENMAN-serialized string of the Graph *g* Example: >>> from penman.interface import encode >>> from penman.graph import Graph >>> encode(Graph([('h', 'instance', 'hi')])) '(h / hi)' Deserialize a list of PENMAN-encoded graphs from *source*. Args: source: a filename or file-like object to read from model: the model used for interpreting the graph Returns: a list of Graph objects Deserialize a list of PENMAN-encoded graphs from *string*. Args: string: a string containing graph data model: the model used for interpreting the graph Returns: a list of Graph objects Serialize each graph in *graphs* to PENMAN and write to *file*. Args: graphs: an iterable of Graph objects file: a filename or file-like object to write to model: the model used for interpreting the graph indent: how to indent formatted strings compact: if ``True``, put initial attributes on the first line Helper method for dump() for incremental printing. Serialize each graph in *graphs* to the PENMAN format. Args: graphs: an iterable of Graph objects model: the model used for interpreting the graph indent: how to indent formatted strings compact: if ``True``, put initial attributes on the first line Returns: the string of serialized graphs
| 3.309209
| 3
|
tests/test_PCA.py
|
gdalessi/clustering
| 7
|
6625968
|
'''
MODULE: test_PCA.py
@Authors:
<NAME> [1,2]
[1]: Université Libre de Bruxelles, Aero-Thermo-Mechanics Laboratory, Bruxelles, Belgium
[2]: CRECK Modeling Lab, Department of Chemistry, Materials and Chemical Engineering, Politecnico di Milano
@Contacts:
<EMAIL>
@Additional notes:
This code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
Please report any bug to: <EMAIL>
'''
import unittest
import numpy as np
from numpy import linalg as LA
import matplotlib
import matplotlib.pyplot as plt
import OpenMORe.model_order_reduction as model_order_reduction
from OpenMORe.utilities import *
class testPCA(unittest.TestCase):
def setUp(self):
self.X = np.random.rand(30,5)
self.nPCtest = 1
self.kernelType = 'rbf'
self.nVarTest = 3
self.selMethod1 = 'procrustes'
self.selMethod2 = 'b4'
self.selMethod3 = 'b2'
def tearDown(self):
pass
def test_pca(self):
globalPCA = model_order_reduction.PCA(self.X)
globalPCA.eigens = self.nPCtest
globalPCA.plot_explained_variance = False
PCs, eigenvalues = globalPCA.fit()
explained = globalPCA.get_explained()
self.assertEqual(PCs.shape[1],self.nPCtest)
self.assertEqual(len(eigenvalues),self.nPCtest)
self.assertIsInstance(explained, float)
def test_varSelection(self):
linearSelection = model_order_reduction.variables_selection(self.X)
linearSelection.eigens = self.nPCtest
linearSelection.retained = self.nVarTest
linearSelection.method = self.selMethod1
labels1, ____ = linearSelection.fit()
linearSelection.method = self.selMethod2
labels2, ____ = linearSelection.fit()
linearSelection.method = self.selMethod3
labels3, ____ = linearSelection.fit()
self.assertEqual(len(labels1), self.nVarTest)
self.assertEqual(len(labels2), self.nVarTest)
self.assertEqual(len(labels3), self.nVarTest)
|
'''
MODULE: test_PCA.py
@Authors:
<NAME> [1,2]
[1]: Université Libre de Bruxelles, Aero-Thermo-Mechanics Laboratory, Bruxelles, Belgium
[2]: CRECK Modeling Lab, Department of Chemistry, Materials and Chemical Engineering, Politecnico di Milano
@Contacts:
<EMAIL>
@Additional notes:
This code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
Please report any bug to: <EMAIL>
'''
import unittest
import numpy as np
from numpy import linalg as LA
import matplotlib
import matplotlib.pyplot as plt
import OpenMORe.model_order_reduction as model_order_reduction
from OpenMORe.utilities import *
class testPCA(unittest.TestCase):
def setUp(self):
self.X = np.random.rand(30,5)
self.nPCtest = 1
self.kernelType = 'rbf'
self.nVarTest = 3
self.selMethod1 = 'procrustes'
self.selMethod2 = 'b4'
self.selMethod3 = 'b2'
def tearDown(self):
pass
def test_pca(self):
globalPCA = model_order_reduction.PCA(self.X)
globalPCA.eigens = self.nPCtest
globalPCA.plot_explained_variance = False
PCs, eigenvalues = globalPCA.fit()
explained = globalPCA.get_explained()
self.assertEqual(PCs.shape[1],self.nPCtest)
self.assertEqual(len(eigenvalues),self.nPCtest)
self.assertIsInstance(explained, float)
def test_varSelection(self):
linearSelection = model_order_reduction.variables_selection(self.X)
linearSelection.eigens = self.nPCtest
linearSelection.retained = self.nVarTest
linearSelection.method = self.selMethod1
labels1, ____ = linearSelection.fit()
linearSelection.method = self.selMethod2
labels2, ____ = linearSelection.fit()
linearSelection.method = self.selMethod3
labels3, ____ = linearSelection.fit()
self.assertEqual(len(labels1), self.nVarTest)
self.assertEqual(len(labels2), self.nVarTest)
self.assertEqual(len(labels3), self.nVarTest)
|
en
| 0.714394
|
MODULE: test_PCA.py @Authors: <NAME> [1,2] [1]: Université Libre de Bruxelles, Aero-Thermo-Mechanics Laboratory, Bruxelles, Belgium [2]: CRECK Modeling Lab, Department of Chemistry, Materials and Chemical Engineering, Politecnico di Milano @Contacts: <EMAIL> @Additional notes: This code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; Please report any bug to: <EMAIL>
| 2.169346
| 2
|
tests/test_web_app.py
|
agronholm/aiohttp
| 0
|
6625969
|
import asyncio
from unittest import mock
import pytest
from async_generator import async_generator, yield_
from aiohttp import log, web
from aiohttp.abc import AbstractAccessLogger, AbstractRouter
from aiohttp.helpers import DEBUG, PY_36
from aiohttp.test_utils import make_mocked_coro
async def test_app_ctor() -> None:
loop = asyncio.get_event_loop()
with pytest.warns(DeprecationWarning):
app = web.Application(loop=loop)
assert loop is app.loop
assert app.logger is log.web_logger
def test_app_call() -> None:
app = web.Application()
assert app is app()
def test_app_default_loop() -> None:
app = web.Application()
assert app.loop is None
async def test_set_loop() -> None:
loop = asyncio.get_event_loop()
app = web.Application()
app._set_loop(loop)
assert app.loop is loop
def test_set_loop_default_loop() -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
app = web.Application()
app._set_loop(None)
assert app.loop is loop
asyncio.set_event_loop(None)
def test_set_loop_with_different_loops() -> None:
loop = asyncio.new_event_loop()
app = web.Application()
app._set_loop(loop)
assert app.loop is loop
with pytest.raises(RuntimeError):
app._set_loop(loop=object())
@pytest.mark.parametrize('debug', [True, False])
async def test_app_make_handler_debug_exc(mocker, debug) -> None:
app = web.Application(debug=debug)
srv = mocker.patch('aiohttp.web_app.Server')
app._make_handler()
srv.assert_called_with(app._handle,
request_factory=app._make_request,
access_log_class=mock.ANY,
loop=asyncio.get_event_loop(),
debug=debug)
async def test_app_make_handler_args(mocker) -> None:
app = web.Application(handler_args={'test': True})
srv = mocker.patch('aiohttp.web_app.Server')
app._make_handler()
srv.assert_called_with(app._handle,
request_factory=app._make_request,
access_log_class=mock.ANY,
loop=asyncio.get_event_loop(),
debug=mock.ANY, test=True)
async def test_app_make_handler_access_log_class(mocker) -> None:
class Logger:
pass
app = web.Application()
with pytest.raises(TypeError):
app._make_handler(access_log_class=Logger)
class Logger(AbstractAccessLogger):
def log(self, request, response, time):
self.logger.info('msg')
srv = mocker.patch('aiohttp.web_app.Server')
app._make_handler(access_log_class=Logger)
srv.assert_called_with(app._handle,
access_log_class=Logger,
request_factory=app._make_request,
loop=asyncio.get_event_loop(),
debug=mock.ANY)
app = web.Application(handler_args={'access_log_class': Logger})
app._make_handler(access_log_class=Logger)
srv.assert_called_with(app._handle,
access_log_class=Logger,
request_factory=app._make_request,
loop=asyncio.get_event_loop(),
debug=mock.ANY)
async def test_app_make_handler_raises_deprecation_warning() -> None:
app = web.Application()
with pytest.warns(DeprecationWarning):
app.make_handler()
async def test_app_register_on_finish() -> None:
app = web.Application()
cb1 = make_mocked_coro(None)
cb2 = make_mocked_coro(None)
app.on_cleanup.append(cb1)
app.on_cleanup.append(cb2)
app.freeze()
await app.cleanup()
cb1.assert_called_once_with(app)
cb2.assert_called_once_with(app)
async def test_app_register_coro() -> None:
app = web.Application()
fut = asyncio.get_event_loop().create_future()
async def cb(app):
await asyncio.sleep(0.001)
fut.set_result(123)
app.on_cleanup.append(cb)
app.freeze()
await app.cleanup()
assert fut.done()
assert 123 == fut.result()
def test_non_default_router() -> None:
router = mock.Mock(spec=AbstractRouter)
with pytest.warns(DeprecationWarning):
app = web.Application(router=router)
assert router is app.router
def test_logging() -> None:
logger = mock.Mock()
app = web.Application()
app.logger = logger
assert app.logger is logger
async def test_on_shutdown() -> None:
app = web.Application()
called = False
async def on_shutdown(app_param):
nonlocal called
assert app is app_param
called = True
app.on_shutdown.append(on_shutdown)
app.freeze()
await app.shutdown()
assert called
async def test_on_startup() -> None:
app = web.Application()
long_running1_called = False
long_running2_called = False
all_long_running_called = False
async def long_running1(app_param):
nonlocal long_running1_called
assert app is app_param
long_running1_called = True
async def long_running2(app_param):
nonlocal long_running2_called
assert app is app_param
long_running2_called = True
async def on_startup_all_long_running(app_param):
nonlocal all_long_running_called
assert app is app_param
all_long_running_called = True
return await asyncio.gather(long_running1(app_param),
long_running2(app_param))
app.on_startup.append(on_startup_all_long_running)
app.freeze()
await app.startup()
assert long_running1_called
assert long_running2_called
assert all_long_running_called
def test_app_delitem() -> None:
app = web.Application()
app['key'] = 'value'
assert len(app) == 1
del app['key']
assert len(app) == 0
def test_app_freeze() -> None:
app = web.Application()
subapp = mock.Mock()
subapp._middlewares = ()
app._subapps.append(subapp)
app.freeze()
assert subapp.freeze.called
app.freeze()
assert len(subapp.freeze.call_args_list) == 1
def test_equality() -> None:
app1 = web.Application()
app2 = web.Application()
assert app1 == app1
assert app1 != app2
def test_app_run_middlewares() -> None:
root = web.Application()
sub = web.Application()
root.add_subapp('/sub', sub)
root.freeze()
assert root._run_middlewares is False
@web.middleware
async def middleware(request, handler):
return await handler(request)
root = web.Application(middlewares=[middleware])
sub = web.Application()
root.add_subapp('/sub', sub)
root.freeze()
assert root._run_middlewares is True
root = web.Application()
sub = web.Application(middlewares=[middleware])
root.add_subapp('/sub', sub)
root.freeze()
assert root._run_middlewares is True
def test_subapp_pre_frozen_after_adding() -> None:
app = web.Application()
subapp = web.Application()
app.add_subapp('/prefix', subapp)
assert subapp.pre_frozen
assert not subapp.frozen
@pytest.mark.skipif(not PY_36,
reason="Python 3.6+ required")
def test_app_inheritance() -> None:
with pytest.warns(DeprecationWarning):
class A(web.Application):
pass
@pytest.mark.skipif(not DEBUG,
reason="The check is applied in DEBUG mode only")
def test_app_custom_attr() -> None:
app = web.Application()
with pytest.warns(DeprecationWarning):
app.custom = None
async def test_cleanup_ctx() -> None:
app = web.Application()
out = []
def f(num):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
return inner
app.cleanup_ctx.append(f(1))
app.cleanup_ctx.append(f(2))
app.freeze()
await app.startup()
assert out == ['pre_1', 'pre_2']
await app.cleanup()
assert out == ['pre_1', 'pre_2', 'post_2', 'post_1']
async def test_cleanup_ctx_exception_on_startup() -> None:
app = web.Application()
out = []
exc = Exception('fail')
def f(num, fail=False):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
if fail:
raise exc
await yield_(None)
out.append('post_' + str(num))
return inner
app.cleanup_ctx.append(f(1))
app.cleanup_ctx.append(f(2, True))
app.cleanup_ctx.append(f(3))
app.freeze()
with pytest.raises(Exception) as ctx:
await app.startup()
assert ctx.value is exc
assert out == ['pre_1', 'pre_2']
await app.cleanup()
assert out == ['pre_1', 'pre_2', 'post_1']
async def test_cleanup_ctx_exception_on_cleanup() -> None:
app = web.Application()
out = []
exc = Exception('fail')
def f(num, fail=False):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
if fail:
raise exc
return inner
app.cleanup_ctx.append(f(1))
app.cleanup_ctx.append(f(2, True))
app.cleanup_ctx.append(f(3))
app.freeze()
await app.startup()
assert out == ['pre_1', 'pre_2', 'pre_3']
with pytest.raises(Exception) as ctx:
await app.cleanup()
assert ctx.value is exc
assert out == ['pre_1', 'pre_2', 'pre_3', 'post_3', 'post_2', 'post_1']
async def test_cleanup_ctx_exception_on_cleanup_multiple() -> None:
app = web.Application()
out = []
def f(num, fail=False):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
if fail:
raise Exception('fail_' + str(num))
return inner
app.cleanup_ctx.append(f(1))
app.cleanup_ctx.append(f(2, True))
app.cleanup_ctx.append(f(3, True))
app.freeze()
await app.startup()
assert out == ['pre_1', 'pre_2', 'pre_3']
with pytest.raises(web.CleanupError) as ctx:
await app.cleanup()
exc = ctx.value
assert len(exc.exceptions) == 2
assert str(exc.exceptions[0]) == 'fail_3'
assert str(exc.exceptions[1]) == 'fail_2'
assert out == ['pre_1', 'pre_2', 'pre_3', 'post_3', 'post_2', 'post_1']
async def test_cleanup_ctx_multiple_yields() -> None:
app = web.Application()
out = []
def f(num):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
await yield_(None)
return inner
app.cleanup_ctx.append(f(1))
app.freeze()
await app.startup()
assert out == ['pre_1']
with pytest.raises(RuntimeError) as ctx:
await app.cleanup()
assert "has more than one 'yield'" in str(ctx.value)
assert out == ['pre_1', 'post_1']
async def test_mixe_cleanup_ctx_on_startup_and_on_cleanup() -> None:
app = web.Application()
out = []
def startup(num):
async def inner(app):
out.append('pre_' + str(num))
return inner
def cleanup(num):
async def inner(app):
out.append('post_' + str(num))
return inner
def cleanup_ctx(num):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
return inner
app.on_startup.append(startup(1))
app.cleanup_ctx.append(cleanup_ctx(2))
app.on_startup.append(startup(3))
app.cleanup_ctx.append(cleanup_ctx(4))
app.on_startup.append(startup(5))
app.freeze()
await app.startup()
assert out == ['pre_1', 'pre_2', 'pre_3', 'pre_4', 'pre_5']
del out[:]
await app.cleanup()
assert out == ['post_4', 'post_2']
async def test_subapp_chained_config_dict_visibility(aiohttp_client) -> None:
async def main_handler(request):
assert request.config_dict['key1'] == 'val1'
assert 'key2' not in request.config_dict
return web.Response(status=200)
root = web.Application()
root['key1'] = 'val1'
root.add_routes([web.get('/', main_handler)])
async def sub_handler(request):
assert request.config_dict['key1'] == 'val1'
assert request.config_dict['key2'] == 'val2'
return web.Response(status=201)
sub = web.Application()
sub['key2'] = 'val2'
sub.add_routes([web.get('/', sub_handler)])
root.add_subapp('/sub', sub)
client = await aiohttp_client(root)
resp = await client.get('/')
assert resp.status == 200
resp = await client.get('/sub/')
assert resp.status == 201
async def test_subapp_chained_config_dict_overriding(aiohttp_client) -> None:
async def main_handler(request):
assert request.config_dict['key'] == 'val1'
return web.Response(status=200)
root = web.Application()
root['key'] = 'val1'
root.add_routes([web.get('/', main_handler)])
async def sub_handler(request):
assert request.config_dict['key'] == 'val2'
return web.Response(status=201)
sub = web.Application()
sub['key'] = 'val2'
sub.add_routes([web.get('/', sub_handler)])
root.add_subapp('/sub', sub)
client = await aiohttp_client(root)
resp = await client.get('/')
assert resp.status == 200
resp = await client.get('/sub/')
assert resp.status == 201
async def test_subapp_on_startup(aiohttp_client) -> None:
subapp = web.Application()
startup_called = False
async def on_startup(app):
nonlocal startup_called
startup_called = True
app['startup'] = True
subapp.on_startup.append(on_startup)
ctx_pre_called = False
ctx_post_called = False
@async_generator
async def cleanup_ctx(app):
nonlocal ctx_pre_called, ctx_post_called
ctx_pre_called = True
app['cleanup'] = True
await yield_(None)
ctx_post_called = True
subapp.cleanup_ctx.append(cleanup_ctx)
shutdown_called = False
async def on_shutdown(app):
nonlocal shutdown_called
shutdown_called = True
subapp.on_shutdown.append(on_shutdown)
cleanup_called = False
async def on_cleanup(app):
nonlocal cleanup_called
cleanup_called = True
subapp.on_cleanup.append(on_cleanup)
app = web.Application()
app.add_subapp('/subapp', subapp)
assert not startup_called
assert not ctx_pre_called
assert not ctx_post_called
assert not shutdown_called
assert not cleanup_called
assert subapp.on_startup.frozen
assert subapp.cleanup_ctx.frozen
assert subapp.on_shutdown.frozen
assert subapp.on_cleanup.frozen
assert subapp.router.frozen
client = await aiohttp_client(app)
assert startup_called
assert ctx_pre_called
assert not ctx_post_called
assert not shutdown_called
assert not cleanup_called
await client.close()
assert startup_called
assert ctx_pre_called
assert ctx_post_called
assert shutdown_called
assert cleanup_called
|
import asyncio
from unittest import mock
import pytest
from async_generator import async_generator, yield_
from aiohttp import log, web
from aiohttp.abc import AbstractAccessLogger, AbstractRouter
from aiohttp.helpers import DEBUG, PY_36
from aiohttp.test_utils import make_mocked_coro
async def test_app_ctor() -> None:
loop = asyncio.get_event_loop()
with pytest.warns(DeprecationWarning):
app = web.Application(loop=loop)
assert loop is app.loop
assert app.logger is log.web_logger
def test_app_call() -> None:
app = web.Application()
assert app is app()
def test_app_default_loop() -> None:
app = web.Application()
assert app.loop is None
async def test_set_loop() -> None:
loop = asyncio.get_event_loop()
app = web.Application()
app._set_loop(loop)
assert app.loop is loop
def test_set_loop_default_loop() -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
app = web.Application()
app._set_loop(None)
assert app.loop is loop
asyncio.set_event_loop(None)
def test_set_loop_with_different_loops() -> None:
loop = asyncio.new_event_loop()
app = web.Application()
app._set_loop(loop)
assert app.loop is loop
with pytest.raises(RuntimeError):
app._set_loop(loop=object())
@pytest.mark.parametrize('debug', [True, False])
async def test_app_make_handler_debug_exc(mocker, debug) -> None:
app = web.Application(debug=debug)
srv = mocker.patch('aiohttp.web_app.Server')
app._make_handler()
srv.assert_called_with(app._handle,
request_factory=app._make_request,
access_log_class=mock.ANY,
loop=asyncio.get_event_loop(),
debug=debug)
async def test_app_make_handler_args(mocker) -> None:
app = web.Application(handler_args={'test': True})
srv = mocker.patch('aiohttp.web_app.Server')
app._make_handler()
srv.assert_called_with(app._handle,
request_factory=app._make_request,
access_log_class=mock.ANY,
loop=asyncio.get_event_loop(),
debug=mock.ANY, test=True)
async def test_app_make_handler_access_log_class(mocker) -> None:
class Logger:
pass
app = web.Application()
with pytest.raises(TypeError):
app._make_handler(access_log_class=Logger)
class Logger(AbstractAccessLogger):
def log(self, request, response, time):
self.logger.info('msg')
srv = mocker.patch('aiohttp.web_app.Server')
app._make_handler(access_log_class=Logger)
srv.assert_called_with(app._handle,
access_log_class=Logger,
request_factory=app._make_request,
loop=asyncio.get_event_loop(),
debug=mock.ANY)
app = web.Application(handler_args={'access_log_class': Logger})
app._make_handler(access_log_class=Logger)
srv.assert_called_with(app._handle,
access_log_class=Logger,
request_factory=app._make_request,
loop=asyncio.get_event_loop(),
debug=mock.ANY)
async def test_app_make_handler_raises_deprecation_warning() -> None:
app = web.Application()
with pytest.warns(DeprecationWarning):
app.make_handler()
async def test_app_register_on_finish() -> None:
app = web.Application()
cb1 = make_mocked_coro(None)
cb2 = make_mocked_coro(None)
app.on_cleanup.append(cb1)
app.on_cleanup.append(cb2)
app.freeze()
await app.cleanup()
cb1.assert_called_once_with(app)
cb2.assert_called_once_with(app)
async def test_app_register_coro() -> None:
app = web.Application()
fut = asyncio.get_event_loop().create_future()
async def cb(app):
await asyncio.sleep(0.001)
fut.set_result(123)
app.on_cleanup.append(cb)
app.freeze()
await app.cleanup()
assert fut.done()
assert 123 == fut.result()
def test_non_default_router() -> None:
router = mock.Mock(spec=AbstractRouter)
with pytest.warns(DeprecationWarning):
app = web.Application(router=router)
assert router is app.router
def test_logging() -> None:
logger = mock.Mock()
app = web.Application()
app.logger = logger
assert app.logger is logger
async def test_on_shutdown() -> None:
app = web.Application()
called = False
async def on_shutdown(app_param):
nonlocal called
assert app is app_param
called = True
app.on_shutdown.append(on_shutdown)
app.freeze()
await app.shutdown()
assert called
async def test_on_startup() -> None:
app = web.Application()
long_running1_called = False
long_running2_called = False
all_long_running_called = False
async def long_running1(app_param):
nonlocal long_running1_called
assert app is app_param
long_running1_called = True
async def long_running2(app_param):
nonlocal long_running2_called
assert app is app_param
long_running2_called = True
async def on_startup_all_long_running(app_param):
nonlocal all_long_running_called
assert app is app_param
all_long_running_called = True
return await asyncio.gather(long_running1(app_param),
long_running2(app_param))
app.on_startup.append(on_startup_all_long_running)
app.freeze()
await app.startup()
assert long_running1_called
assert long_running2_called
assert all_long_running_called
def test_app_delitem() -> None:
app = web.Application()
app['key'] = 'value'
assert len(app) == 1
del app['key']
assert len(app) == 0
def test_app_freeze() -> None:
app = web.Application()
subapp = mock.Mock()
subapp._middlewares = ()
app._subapps.append(subapp)
app.freeze()
assert subapp.freeze.called
app.freeze()
assert len(subapp.freeze.call_args_list) == 1
def test_equality() -> None:
app1 = web.Application()
app2 = web.Application()
assert app1 == app1
assert app1 != app2
def test_app_run_middlewares() -> None:
root = web.Application()
sub = web.Application()
root.add_subapp('/sub', sub)
root.freeze()
assert root._run_middlewares is False
@web.middleware
async def middleware(request, handler):
return await handler(request)
root = web.Application(middlewares=[middleware])
sub = web.Application()
root.add_subapp('/sub', sub)
root.freeze()
assert root._run_middlewares is True
root = web.Application()
sub = web.Application(middlewares=[middleware])
root.add_subapp('/sub', sub)
root.freeze()
assert root._run_middlewares is True
def test_subapp_pre_frozen_after_adding() -> None:
app = web.Application()
subapp = web.Application()
app.add_subapp('/prefix', subapp)
assert subapp.pre_frozen
assert not subapp.frozen
@pytest.mark.skipif(not PY_36,
reason="Python 3.6+ required")
def test_app_inheritance() -> None:
with pytest.warns(DeprecationWarning):
class A(web.Application):
pass
@pytest.mark.skipif(not DEBUG,
reason="The check is applied in DEBUG mode only")
def test_app_custom_attr() -> None:
app = web.Application()
with pytest.warns(DeprecationWarning):
app.custom = None
async def test_cleanup_ctx() -> None:
app = web.Application()
out = []
def f(num):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
return inner
app.cleanup_ctx.append(f(1))
app.cleanup_ctx.append(f(2))
app.freeze()
await app.startup()
assert out == ['pre_1', 'pre_2']
await app.cleanup()
assert out == ['pre_1', 'pre_2', 'post_2', 'post_1']
async def test_cleanup_ctx_exception_on_startup() -> None:
app = web.Application()
out = []
exc = Exception('fail')
def f(num, fail=False):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
if fail:
raise exc
await yield_(None)
out.append('post_' + str(num))
return inner
app.cleanup_ctx.append(f(1))
app.cleanup_ctx.append(f(2, True))
app.cleanup_ctx.append(f(3))
app.freeze()
with pytest.raises(Exception) as ctx:
await app.startup()
assert ctx.value is exc
assert out == ['pre_1', 'pre_2']
await app.cleanup()
assert out == ['pre_1', 'pre_2', 'post_1']
async def test_cleanup_ctx_exception_on_cleanup() -> None:
app = web.Application()
out = []
exc = Exception('fail')
def f(num, fail=False):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
if fail:
raise exc
return inner
app.cleanup_ctx.append(f(1))
app.cleanup_ctx.append(f(2, True))
app.cleanup_ctx.append(f(3))
app.freeze()
await app.startup()
assert out == ['pre_1', 'pre_2', 'pre_3']
with pytest.raises(Exception) as ctx:
await app.cleanup()
assert ctx.value is exc
assert out == ['pre_1', 'pre_2', 'pre_3', 'post_3', 'post_2', 'post_1']
async def test_cleanup_ctx_exception_on_cleanup_multiple() -> None:
app = web.Application()
out = []
def f(num, fail=False):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
if fail:
raise Exception('fail_' + str(num))
return inner
app.cleanup_ctx.append(f(1))
app.cleanup_ctx.append(f(2, True))
app.cleanup_ctx.append(f(3, True))
app.freeze()
await app.startup()
assert out == ['pre_1', 'pre_2', 'pre_3']
with pytest.raises(web.CleanupError) as ctx:
await app.cleanup()
exc = ctx.value
assert len(exc.exceptions) == 2
assert str(exc.exceptions[0]) == 'fail_3'
assert str(exc.exceptions[1]) == 'fail_2'
assert out == ['pre_1', 'pre_2', 'pre_3', 'post_3', 'post_2', 'post_1']
async def test_cleanup_ctx_multiple_yields() -> None:
app = web.Application()
out = []
def f(num):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
await yield_(None)
return inner
app.cleanup_ctx.append(f(1))
app.freeze()
await app.startup()
assert out == ['pre_1']
with pytest.raises(RuntimeError) as ctx:
await app.cleanup()
assert "has more than one 'yield'" in str(ctx.value)
assert out == ['pre_1', 'post_1']
async def test_mixe_cleanup_ctx_on_startup_and_on_cleanup() -> None:
app = web.Application()
out = []
def startup(num):
async def inner(app):
out.append('pre_' + str(num))
return inner
def cleanup(num):
async def inner(app):
out.append('post_' + str(num))
return inner
def cleanup_ctx(num):
@async_generator
async def inner(app):
out.append('pre_' + str(num))
await yield_(None)
out.append('post_' + str(num))
return inner
app.on_startup.append(startup(1))
app.cleanup_ctx.append(cleanup_ctx(2))
app.on_startup.append(startup(3))
app.cleanup_ctx.append(cleanup_ctx(4))
app.on_startup.append(startup(5))
app.freeze()
await app.startup()
assert out == ['pre_1', 'pre_2', 'pre_3', 'pre_4', 'pre_5']
del out[:]
await app.cleanup()
assert out == ['post_4', 'post_2']
async def test_subapp_chained_config_dict_visibility(aiohttp_client) -> None:
async def main_handler(request):
assert request.config_dict['key1'] == 'val1'
assert 'key2' not in request.config_dict
return web.Response(status=200)
root = web.Application()
root['key1'] = 'val1'
root.add_routes([web.get('/', main_handler)])
async def sub_handler(request):
assert request.config_dict['key1'] == 'val1'
assert request.config_dict['key2'] == 'val2'
return web.Response(status=201)
sub = web.Application()
sub['key2'] = 'val2'
sub.add_routes([web.get('/', sub_handler)])
root.add_subapp('/sub', sub)
client = await aiohttp_client(root)
resp = await client.get('/')
assert resp.status == 200
resp = await client.get('/sub/')
assert resp.status == 201
async def test_subapp_chained_config_dict_overriding(aiohttp_client) -> None:
async def main_handler(request):
assert request.config_dict['key'] == 'val1'
return web.Response(status=200)
root = web.Application()
root['key'] = 'val1'
root.add_routes([web.get('/', main_handler)])
async def sub_handler(request):
assert request.config_dict['key'] == 'val2'
return web.Response(status=201)
sub = web.Application()
sub['key'] = 'val2'
sub.add_routes([web.get('/', sub_handler)])
root.add_subapp('/sub', sub)
client = await aiohttp_client(root)
resp = await client.get('/')
assert resp.status == 200
resp = await client.get('/sub/')
assert resp.status == 201
async def test_subapp_on_startup(aiohttp_client) -> None:
subapp = web.Application()
startup_called = False
async def on_startup(app):
nonlocal startup_called
startup_called = True
app['startup'] = True
subapp.on_startup.append(on_startup)
ctx_pre_called = False
ctx_post_called = False
@async_generator
async def cleanup_ctx(app):
nonlocal ctx_pre_called, ctx_post_called
ctx_pre_called = True
app['cleanup'] = True
await yield_(None)
ctx_post_called = True
subapp.cleanup_ctx.append(cleanup_ctx)
shutdown_called = False
async def on_shutdown(app):
nonlocal shutdown_called
shutdown_called = True
subapp.on_shutdown.append(on_shutdown)
cleanup_called = False
async def on_cleanup(app):
nonlocal cleanup_called
cleanup_called = True
subapp.on_cleanup.append(on_cleanup)
app = web.Application()
app.add_subapp('/subapp', subapp)
assert not startup_called
assert not ctx_pre_called
assert not ctx_post_called
assert not shutdown_called
assert not cleanup_called
assert subapp.on_startup.frozen
assert subapp.cleanup_ctx.frozen
assert subapp.on_shutdown.frozen
assert subapp.on_cleanup.frozen
assert subapp.router.frozen
client = await aiohttp_client(app)
assert startup_called
assert ctx_pre_called
assert not ctx_post_called
assert not shutdown_called
assert not cleanup_called
await client.close()
assert startup_called
assert ctx_pre_called
assert ctx_post_called
assert shutdown_called
assert cleanup_called
|
none
| 1
| 2.125969
| 2
|
|
python/Django/venv/djangoschool/school/models.py
|
Pitoontakoonpol/Python
| 0
|
6625970
|
from django.db import models
class ExamScore(models.Model):
allsubject = (('math', 'คณิตศาสตร์'),
('sci', 'วิทยาศาสตร์'),
('eng', 'ภาษาอังกฤษ'),
('art', 'ศิลป์'),
('physics', 'ฟิสิกส์'),
('bio', 'ชีววิทยา')
)
subject = models.CharField(max_length=100, choices=allsubject, default='math')
studentName = models.CharField(max_length=50)
score = models.IntegerField(default=0)
def __str__(self):
return self.studentName + ', ' + self.subject + ', ' + str(self.score)
|
from django.db import models
class ExamScore(models.Model):
allsubject = (('math', 'คณิตศาสตร์'),
('sci', 'วิทยาศาสตร์'),
('eng', 'ภาษาอังกฤษ'),
('art', 'ศิลป์'),
('physics', 'ฟิสิกส์'),
('bio', 'ชีววิทยา')
)
subject = models.CharField(max_length=100, choices=allsubject, default='math')
studentName = models.CharField(max_length=50)
score = models.IntegerField(default=0)
def __str__(self):
return self.studentName + ', ' + self.subject + ', ' + str(self.score)
|
none
| 1
| 2.309561
| 2
|
|
projeto001/qt_divisores.py
|
gerssivaldosantos/MeuGuru
| 1
|
6625971
|
def qtd_divisores(numero):
contador = 0
""" Percorrendo do 1 ao número e verificando se o resto de divisão de cada
número no intervalo é igual à zero, caso seja, adição ao contador """
for i in range(1, numero + 1):
if numero % i == 0:
print(i)
contador += 1
return contador
|
def qtd_divisores(numero):
contador = 0
""" Percorrendo do 1 ao número e verificando se o resto de divisão de cada
número no intervalo é igual à zero, caso seja, adição ao contador """
for i in range(1, numero + 1):
if numero % i == 0:
print(i)
contador += 1
return contador
|
pt
| 0.945307
|
Percorrendo do 1 ao número e verificando se o resto de divisão de cada número no intervalo é igual à zero, caso seja, adição ao contador
| 3.780334
| 4
|
comet/train/quantize.py
|
kearnsw/comet-commonsense
| 0
|
6625972
|
import torch
from comet.interactive import functions as interactive
import comet.train.atomic_train as train
from comet.train.opt import OpenAIAdam
import comet.data.config as cfg
num_calibration_batches = 10
opt, state_dict = interactive.load_model_file("models/6.25e-05_adam_64_20500.pickle")
data_loader, text_encoder = interactive.load_data("atomic", opt)
n_ctx = data_loader.max_event + data_loader.max_effect
n_vocab = len(text_encoder.encoder) + n_ctx
model = interactive.make_model(opt, n_vocab, n_ctx, state_dict).to('cpu')
model.eval()
# Specify quantization configuration
# Start with simple min/max range estimation and per-tensor quantization of weights
model.qconfig = torch.quantization.default_qconfig
print(model.qconfig)
torch.quantization.prepare(model, inplace=True)
# Calibrate first
print('Post Training Quantization Prepare: Inserting Observers')
config_file = "config/atomic/config_{}.json".format(0)
config = cfg.read_config(cfg.load_config(config_file))
opt, meta = cfg.get_parameters(config)
# Calibrate with the training set
model.eval()
optimizer = OpenAIAdam(model.parameters(),
lr=opt.train.dynamic.lr,
schedule=opt.train.static.lrsched,
warmup=opt.train.static.lrwarm,
t_total=100,
b1=opt.train.static.b1,
b2=opt.train.static.b2,
e=opt.train.static.e,
l2=opt.train.static.l2,
vector_l2=opt.train.static.vl2,
max_grad_norm=opt.train.static.clip)
trainer = train.make_trainer(
opt, meta, data_loader, model, optimizer)
trainer.set_evaluator(opt, model, data_loader)
trainer.opt.train.dynamic.epoch = 0
trainer.run_evaluation_cycle()
print('Post Training Quantization: Calibration done')
# Convert to quantized model
torch.quantization.convert(model, inplace=True)
print('Post Training Quantization: Convert done')
trainer.save_model()
#top1, top5 = evaluate(myModel, criterion, data_loader_test, neval_batches=num_eval_batches)
#print('Evaluation accuracy on %d images, %2.2f'%(num_eval_batches * eval_batch_size, top1.avg))
|
import torch
from comet.interactive import functions as interactive
import comet.train.atomic_train as train
from comet.train.opt import OpenAIAdam
import comet.data.config as cfg
num_calibration_batches = 10
opt, state_dict = interactive.load_model_file("models/6.25e-05_adam_64_20500.pickle")
data_loader, text_encoder = interactive.load_data("atomic", opt)
n_ctx = data_loader.max_event + data_loader.max_effect
n_vocab = len(text_encoder.encoder) + n_ctx
model = interactive.make_model(opt, n_vocab, n_ctx, state_dict).to('cpu')
model.eval()
# Specify quantization configuration
# Start with simple min/max range estimation and per-tensor quantization of weights
model.qconfig = torch.quantization.default_qconfig
print(model.qconfig)
torch.quantization.prepare(model, inplace=True)
# Calibrate first
print('Post Training Quantization Prepare: Inserting Observers')
config_file = "config/atomic/config_{}.json".format(0)
config = cfg.read_config(cfg.load_config(config_file))
opt, meta = cfg.get_parameters(config)
# Calibrate with the training set
model.eval()
optimizer = OpenAIAdam(model.parameters(),
lr=opt.train.dynamic.lr,
schedule=opt.train.static.lrsched,
warmup=opt.train.static.lrwarm,
t_total=100,
b1=opt.train.static.b1,
b2=opt.train.static.b2,
e=opt.train.static.e,
l2=opt.train.static.l2,
vector_l2=opt.train.static.vl2,
max_grad_norm=opt.train.static.clip)
trainer = train.make_trainer(
opt, meta, data_loader, model, optimizer)
trainer.set_evaluator(opt, model, data_loader)
trainer.opt.train.dynamic.epoch = 0
trainer.run_evaluation_cycle()
print('Post Training Quantization: Calibration done')
# Convert to quantized model
torch.quantization.convert(model, inplace=True)
print('Post Training Quantization: Convert done')
trainer.save_model()
#top1, top5 = evaluate(myModel, criterion, data_loader_test, neval_batches=num_eval_batches)
#print('Evaluation accuracy on %d images, %2.2f'%(num_eval_batches * eval_batch_size, top1.avg))
|
en
| 0.622898
|
# Specify quantization configuration # Start with simple min/max range estimation and per-tensor quantization of weights # Calibrate first # Calibrate with the training set # Convert to quantized model #top1, top5 = evaluate(myModel, criterion, data_loader_test, neval_batches=num_eval_batches) #print('Evaluation accuracy on %d images, %2.2f'%(num_eval_batches * eval_batch_size, top1.avg))
| 2.027939
| 2
|
src/annotateGenome/__init__.py
|
hui-sheen/annotateGenome
| 0
|
6625973
|
<filename>src/annotateGenome/__init__.py
name = "annotate_genome"
|
<filename>src/annotateGenome/__init__.py
name = "annotate_genome"
|
none
| 1
| 1.010023
| 1
|
|
examples/dbm_cifar_naive.py
|
enijkamp/rbm
| 0
|
6625974
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Train 3072-5000-1000 Gaussian-Bernoulli-Multinomial
DBM with pre-training on "smoothed" CIFAR-10 (with 1000 least
significant singular values removed), as suggested in [1].
Per sample validation mean reconstruction error for DBM monotonically
decreases during training from ~0.99 to (only) ~0.5 after 1500 epochs.
The training took approx. 47m + 119m + 22h 40m ~ 1d 1h 30m on GTX 1060.
Note that DBM is trained without centering.
After models are trained, Gaussian RBM is discriminatively fine-tuned.
It achieves 59.78% accuracy on a test set.
References
----------
[1] <NAME> and <NAME>. Learning multiple layers of features
from tine images. 2009.
"""
print __doc__
import os
import argparse
import numpy as np
from scipy.linalg import svd
from keras import regularizers
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.initializers import glorot_uniform
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, BatchNormalization as BN
from sklearn.metrics import accuracy_score
import env
from bm import DBM
from bm.rbm import GaussianRBM, MultinomialRBM
from bm.utils import (RNG, Stopwatch,
one_hot, one_hot_decision_function, unhot)
from bm.utils.dataset import load_cifar10
from bm.utils.optimizers import MultiAdam
def make_smoothing(X_train, n_train, args):
X_s = None
X_s_path = os.path.join(args.data_path, 'X_s.npy')
do_smoothing = True
if os.path.isfile(X_s_path):
print "\nLoading smoothed data ..."
X_s = np.load(X_s_path)
print "Checking augmented data ..."
if len(X_s) == n_train:
do_smoothing = False
if do_smoothing:
print "\nSmoothing data ..."
X_m = X_train.mean(axis=0)
X_train -= X_m
with Stopwatch(verbose=True) as s:
[U, s, Vh] = svd(X_train,
full_matrices=False,
compute_uv=True,
overwrite_a=True,
check_finite=False)
s[-1000:] = 0.
X_s = U.dot(np.diag(s).dot(Vh))
X_s += X_m
# save to disk
np.save(X_s_path, X_s)
print "\n"
return X_s
def make_grbm((X_train, X_val), args):
if os.path.isdir(args.grbm_dirpath):
print "\nLoading G-RBM ...\n\n"
grbm = GaussianRBM.load_model(args.grbm_dirpath)
else:
print "\nTraining G-RBM ...\n\n"
grbm = GaussianRBM(n_visible=32 * 32 * 3,
n_hidden=5000,
sigma=1.,
W_init=0.0008,
vb_init=0.,
hb_init=0.,
n_gibbs_steps=args.n_gibbs_steps[0],
learning_rate=args.lr[0],
momentum=np.geomspace(0.5, 0.9, 8),
max_epoch=args.epochs[0],
batch_size=args.batch_size[0],
l2=args.l2[0],
sample_v_states=True,
sample_h_states=True,
sparsity_cost=0.,
dbm_first=True, # !!!
metrics_config=dict(
msre=True,
feg=True,
train_metrics_every_iter=1000,
val_metrics_every_epoch=2,
feg_every_epoch=2,
n_batches_for_feg=50,
),
verbose=True,
display_filters=12,
display_hidden_activations=24,
v_shape=(32, 32, 3),
dtype='float32',
tf_saver_params=dict(max_to_keep=1),
model_path=args.grbm_dirpath)
grbm.fit(X_train, X_val)
return grbm
def make_mrbm((Q_train, Q_val), args):
if os.path.isdir(args.mrbm_dirpath):
print "\nLoading M-RBM ...\n\n"
mrbm = MultinomialRBM.load_model(args.mrbm_dirpath)
else:
print "\nTraining M-RBM ...\n\n"
mrbm = MultinomialRBM(n_visible=5000,
n_hidden=1000,
n_samples=1000,
W_init=0.01,
hb_init=0.,
vb_init=0.,
n_gibbs_steps=args.n_gibbs_steps[1],
learning_rate=args.lr[1],
momentum=np.geomspace(0.5, 0.9, 8),
max_epoch=args.epochs[1],
batch_size=args.batch_size[1],
l2=args.l2[1],
sample_h_states=True,
sample_v_states=False,
sparsity_cost=0.,
dbm_last=True, # !!!
metrics_config=dict(
msre=True,
pll=True,
feg=True,
train_metrics_every_iter=400,
val_metrics_every_epoch=2,
feg_every_epoch=2,
n_batches_for_feg=50,
),
verbose=True,
display_filters=0,
display_hidden_activations=100,
random_seed=1337,
dtype='float32',
tf_saver_params=dict(max_to_keep=1),
model_path=args.mrbm_dirpath)
mrbm.fit(Q_train, Q_val)
return mrbm
def make_rbm_transform(rbm, X, path, np_dtype=None):
H = None
transform = True
if os.path.isfile(path):
H = np.load(path)
if len(X) == len(H):
transform = False
if transform:
H = rbm.transform(X, np_dtype=np_dtype)
np.save(path, H)
return H
def make_dbm((X_train, X_val), rbms, (Q, G), args):
if os.path.isdir(args.dbm_dirpath):
print "\nLoading DBM ...\n\n"
dbm = DBM.load_model(args.dbm_dirpath)
dbm.load_rbms(rbms) # !!!
else:
print "\nTraining DBM ...\n\n"
dbm = DBM(rbms=rbms,
n_particles=args.n_particles,
v_particle_init=X_train[:args.n_particles].copy(),
h_particles_init=(Q[:args.n_particles].copy(),
G[:args.n_particles].copy()),
n_gibbs_steps=args.n_gibbs_steps[2],
max_mf_updates=args.max_mf_updates,
mf_tol=args.mf_tol,
learning_rate=np.geomspace(args.lr[2], 1e-5, args.epochs[2]),
momentum=np.geomspace(0.5, 0.9, 10),
max_epoch=args.epochs[2],
batch_size=args.batch_size[2],
l2=args.l2[2],
max_norm=args.max_norm,
sample_v_states=True,
sample_h_states=(True, True),
sparsity_cost=0.,
train_metrics_every_iter=1000,
val_metrics_every_epoch=2,
random_seed=args.random_seed[2],
verbose=True,
save_after_each_epoch=True,
display_filters=12,
display_particles=36,
v_shape=(32, 32, 3),
dtype='float32',
tf_saver_params=dict(max_to_keep=1),
model_path=args.dbm_dirpath)
dbm.fit(X_train, X_val)
return dbm
def make_mlp((X_train, y_train), (X_val, y_val), (X_test, y_test),
(W, hb), args):
dense_params = {}
if W is not None and hb is not None:
dense_params['weights'] = (W, hb)
# define and initialize MLP model
mlp = Sequential([
Dense(5000, input_shape=(3 * 32 * 32,),
kernel_regularizer=regularizers.l2(args.mlp_l2),
kernel_initializer=glorot_uniform(seed=3333),
**dense_params),
BN(),
Activation('relu'),
Dropout(args.mlp_dropout, seed=4444),
Dense(10, kernel_initializer=glorot_uniform(seed=5555)),
Activation('softmax'),
])
mlp.compile(optimizer=MultiAdam(lr=0.001,
lr_multipliers={'dense_1': args.mlp_lrm[0],
'dense_2': args.mlp_lrm[1]}),
loss='categorical_crossentropy',
metrics=['accuracy'])
# train and evaluate classifier
with Stopwatch(verbose=True) as s:
early_stopping = EarlyStopping(monitor=args.mlp_val_metric, patience=12, verbose=2)
reduce_lr = ReduceLROnPlateau(monitor=args.mlp_val_metric, factor=0.2, verbose=2,
patience=6, min_lr=1e-5)
callbacks = [early_stopping, reduce_lr]
try:
mlp.fit(X_train, one_hot(y_train, n_classes=10),
epochs=args.mlp_epochs,
batch_size=args.mlp_batch_size,
shuffle=False,
validation_data=(X_val, one_hot(y_val, n_classes=10)),
callbacks=callbacks)
except KeyboardInterrupt:
pass
y_pred = mlp.predict(X_test)
y_pred = unhot(one_hot_decision_function(y_pred), n_classes=10)
print "Test accuracy: {:.4f}".format(accuracy_score(y_test, y_pred))
# save predictions, targets, and fine-tuned weights
np.save(args.mlp_save_prefix + 'y_pred.npy', y_pred)
np.save(args.mlp_save_prefix + 'y_test.npy', y_test)
W_finetuned, _ = mlp.layers[0].get_weights()
np.save(args.mlp_save_prefix + 'W_finetuned.npy', W_finetuned)
def main():
# training settings
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# general
parser.add_argument('--gpu', type=str, default='0', metavar='ID',
help="ID of the GPU to train on (or '' to train on CPU)")
# data
parser.add_argument('--n-train', type=int, default=49000, metavar='N',
help='number of training examples')
parser.add_argument('--n-val', type=int, default=1000, metavar='N',
help='number of validation examples')
parser.add_argument('--data-path', type=str, default='../data/', metavar='PATH',
help='directory for storing augmented data etc.')
# common for RBMs and DBM
parser.add_argument('--n-gibbs-steps', type=int, default=(1, 1, 1), metavar='N', nargs='+',
help='(initial) number of Gibbs steps for CD/PCD')
parser.add_argument('--lr', type=float, default=(5e-4, 1e-4, 8e-5), metavar='LR', nargs='+',
help='(initial) learning rates')
parser.add_argument('--epochs', type=int, default=(120, 180, 1500), metavar='N', nargs='+',
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=(100, 100, 100), metavar='B', nargs='+',
help='input batch size for training, `--n-train` and `--n-val`' + \
'must be divisible by this number (for DBM)')
parser.add_argument('--l2', type=float, default=(0.01, 0.05, 1e-8), metavar='L2', nargs='+',
help='L2 weight decay coefficients')
parser.add_argument('--random-seed', type=int, default=(1337, 1111, 2222), metavar='N', nargs='+',
help='random seeds for models training')
# save dirpaths
parser.add_argument('--grbm-dirpath', type=str, default='../models/grbm_cifar_naive/', metavar='DIRPATH',
help='directory path to save Gaussian RBM')
parser.add_argument('--mrbm-dirpath', type=str, default='../models/mrbm_cifar_naive/', metavar='DIRPATH',
help='directory path to save Multinomial RBM')
parser.add_argument('--dbm-dirpath', type=str, default='../models/dbm_cifar_naive/', metavar='DIRPATH',
help='directory path to save DBM')
# DBM related
parser.add_argument('--n-particles', type=int, default=100, metavar='M',
help='number of persistent Markov chains')
parser.add_argument('--max-mf-updates', type=int, default=50, metavar='N',
help='maximum number of mean-field updates per weight update')
parser.add_argument('--mf-tol', type=float, default=1e-11, metavar='TOL',
help='mean-field tolerance')
parser.add_argument('--max-norm', type=float, default=4., metavar='C',
help='maximum norm constraint')
# MLP related
parser.add_argument('--mlp-no-init', action='store_true',
help='if enabled, use random initialization')
parser.add_argument('--mlp-l2', type=float, default=1e-4, metavar='L2',
help='L2 weight decay coefficient')
parser.add_argument('--mlp-lrm', type=float, default=(0.1, 1.), metavar='LRM', nargs='+',
help='learning rate multipliers of 1e-3')
parser.add_argument('--mlp-epochs', type=int, default=100, metavar='N',
help='number of epochs to train')
parser.add_argument('--mlp-val-metric', type=str, default='val_acc', metavar='S',
help="metric on validation set to perform early stopping, {'val_acc', 'val_loss'}")
parser.add_argument('--mlp-batch-size', type=int, default=128, metavar='N',
help='input batch size for training')
parser.add_argument('--mlp-dropout', type=float, default=0.64, metavar='P',
help='probability of visible units being set to zero')
parser.add_argument('--mlp-save-prefix', type=str, default='../data/grbm_naive_', metavar='PREFIX',
help='prefix to save MLP predictions and targets')
# parse and check params
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
for x, m in (
(args.n_gibbs_steps, 3),
(args.lr, 3),
(args.epochs, 3),
(args.batch_size, 3),
(args.l2, 3),
(args.random_seed, 3),
):
if len(x) == 1:
x *= m
# prepare data (load + scale + split)
print "\nPreparing data ..."
X, y = load_cifar10(mode='train', path=args.data_path)
X = X.astype(np.float32)
X /= 255.
RNG(seed=42).shuffle(X)
RNG(seed=42).shuffle(y)
n_train = min(len(X), args.n_train)
n_val = min(len(X), args.n_val)
X_train = X[:n_train]
X_val = X[-n_val:]
y_train = y[:n_train]
y_val = y[-n_val:]
# remove 1000 least significant singular values
X_train = make_smoothing(X_train, n_train, args)
print X_train.shape
# center and normalize training data
X_s_mean = X_train.mean(axis=0)
X_s_std = X_train.std(axis=0)
mean_path = os.path.join(args.data_path, 'X_s_mean.npy')
std_path = os.path.join(args.data_path, 'X_s_std.npy')
if not os.path.isfile(mean_path):
np.save(mean_path, X_s_mean)
if not os.path.isfile(std_path):
np.save(std_path, X_s_std)
X_train -= X_s_mean
X_train /= X_s_std
X_val -= X_s_mean
X_val /= X_s_std
print "Mean: ({0:.3f}, ...); std: ({1:.3f}, ...)".format(X_train.mean(axis=0)[0],
X_train.std(axis=0)[0])
print "Range: ({0:.3f}, {1:.3f})\n\n".format(X_train.min(), X_train.max())
# pre-train Gaussian RBM
grbm = make_grbm((X_train, X_val), args)
# extract features Q = p_{G-RBM}(h|v=X)
print "\nExtracting features from G-RBM ...\n\n"
Q_train, Q_val = None, None
if not os.path.isdir(args.mrbm_dirpath) or not os.path.isdir(args.dbm_dirpath):
Q_train_path = os.path.join(args.data_path, 'Q_train_cifar_naive.npy')
Q_train = make_rbm_transform(grbm, X_train, Q_train_path)
if not os.path.isdir(args.mrbm_dirpath):
Q_val_path = os.path.join(args.data_path, 'Q_val_cifar_naive.npy')
Q_val = make_rbm_transform(grbm, X_val, Q_val_path)
# pre-train Multinomial RBM (M-RBM)
mrbm = make_mrbm((Q_train, Q_val), args)
# extract features G = p_{M-RBM}(h|v=Q)
print "\nExtracting features from M-RBM ...\n\n"
Q, G = None, None
if not os.path.isdir(args.dbm_dirpath):
Q = Q_train[:args.n_particles]
G_path = os.path.join(args.data_path, 'G_train_cifar_naive.npy')
G = make_rbm_transform(mrbm, Q, G_path)
# jointly train DBM
dbm = make_dbm((X_train, X_val), (grbm, mrbm), (Q, G), args)
# load test data
X_test, y_test = load_cifar10(mode='test', path=args.data_path)
X_test /= 255.
X_test -= X_s_mean
X_test /= X_s_std
# G-RBM discriminative fine-tuning:
# initialize MLP with learned weights,
# add FC layer and train using backprop
print "\nG-RBM Discriminative fine-tuning ...\n\n"
W, hb = None, None
if not args.mlp_no_init:
weights = grbm.get_tf_params(scope='weights')
W = weights['W']
hb = weights['hb']
make_mlp((X_train, y_train), (X_val, y_val), (X_test, y_test),
(W, hb), args)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Train 3072-5000-1000 Gaussian-Bernoulli-Multinomial
DBM with pre-training on "smoothed" CIFAR-10 (with 1000 least
significant singular values removed), as suggested in [1].
Per sample validation mean reconstruction error for DBM monotonically
decreases during training from ~0.99 to (only) ~0.5 after 1500 epochs.
The training took approx. 47m + 119m + 22h 40m ~ 1d 1h 30m on GTX 1060.
Note that DBM is trained without centering.
After models are trained, Gaussian RBM is discriminatively fine-tuned.
It achieves 59.78% accuracy on a test set.
References
----------
[1] <NAME> and <NAME>. Learning multiple layers of features
from tine images. 2009.
"""
print __doc__
import os
import argparse
import numpy as np
from scipy.linalg import svd
from keras import regularizers
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.initializers import glorot_uniform
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, BatchNormalization as BN
from sklearn.metrics import accuracy_score
import env
from bm import DBM
from bm.rbm import GaussianRBM, MultinomialRBM
from bm.utils import (RNG, Stopwatch,
one_hot, one_hot_decision_function, unhot)
from bm.utils.dataset import load_cifar10
from bm.utils.optimizers import MultiAdam
def make_smoothing(X_train, n_train, args):
X_s = None
X_s_path = os.path.join(args.data_path, 'X_s.npy')
do_smoothing = True
if os.path.isfile(X_s_path):
print "\nLoading smoothed data ..."
X_s = np.load(X_s_path)
print "Checking augmented data ..."
if len(X_s) == n_train:
do_smoothing = False
if do_smoothing:
print "\nSmoothing data ..."
X_m = X_train.mean(axis=0)
X_train -= X_m
with Stopwatch(verbose=True) as s:
[U, s, Vh] = svd(X_train,
full_matrices=False,
compute_uv=True,
overwrite_a=True,
check_finite=False)
s[-1000:] = 0.
X_s = U.dot(np.diag(s).dot(Vh))
X_s += X_m
# save to disk
np.save(X_s_path, X_s)
print "\n"
return X_s
def make_grbm((X_train, X_val), args):
if os.path.isdir(args.grbm_dirpath):
print "\nLoading G-RBM ...\n\n"
grbm = GaussianRBM.load_model(args.grbm_dirpath)
else:
print "\nTraining G-RBM ...\n\n"
grbm = GaussianRBM(n_visible=32 * 32 * 3,
n_hidden=5000,
sigma=1.,
W_init=0.0008,
vb_init=0.,
hb_init=0.,
n_gibbs_steps=args.n_gibbs_steps[0],
learning_rate=args.lr[0],
momentum=np.geomspace(0.5, 0.9, 8),
max_epoch=args.epochs[0],
batch_size=args.batch_size[0],
l2=args.l2[0],
sample_v_states=True,
sample_h_states=True,
sparsity_cost=0.,
dbm_first=True, # !!!
metrics_config=dict(
msre=True,
feg=True,
train_metrics_every_iter=1000,
val_metrics_every_epoch=2,
feg_every_epoch=2,
n_batches_for_feg=50,
),
verbose=True,
display_filters=12,
display_hidden_activations=24,
v_shape=(32, 32, 3),
dtype='float32',
tf_saver_params=dict(max_to_keep=1),
model_path=args.grbm_dirpath)
grbm.fit(X_train, X_val)
return grbm
def make_mrbm((Q_train, Q_val), args):
if os.path.isdir(args.mrbm_dirpath):
print "\nLoading M-RBM ...\n\n"
mrbm = MultinomialRBM.load_model(args.mrbm_dirpath)
else:
print "\nTraining M-RBM ...\n\n"
mrbm = MultinomialRBM(n_visible=5000,
n_hidden=1000,
n_samples=1000,
W_init=0.01,
hb_init=0.,
vb_init=0.,
n_gibbs_steps=args.n_gibbs_steps[1],
learning_rate=args.lr[1],
momentum=np.geomspace(0.5, 0.9, 8),
max_epoch=args.epochs[1],
batch_size=args.batch_size[1],
l2=args.l2[1],
sample_h_states=True,
sample_v_states=False,
sparsity_cost=0.,
dbm_last=True, # !!!
metrics_config=dict(
msre=True,
pll=True,
feg=True,
train_metrics_every_iter=400,
val_metrics_every_epoch=2,
feg_every_epoch=2,
n_batches_for_feg=50,
),
verbose=True,
display_filters=0,
display_hidden_activations=100,
random_seed=1337,
dtype='float32',
tf_saver_params=dict(max_to_keep=1),
model_path=args.mrbm_dirpath)
mrbm.fit(Q_train, Q_val)
return mrbm
def make_rbm_transform(rbm, X, path, np_dtype=None):
H = None
transform = True
if os.path.isfile(path):
H = np.load(path)
if len(X) == len(H):
transform = False
if transform:
H = rbm.transform(X, np_dtype=np_dtype)
np.save(path, H)
return H
def make_dbm((X_train, X_val), rbms, (Q, G), args):
if os.path.isdir(args.dbm_dirpath):
print "\nLoading DBM ...\n\n"
dbm = DBM.load_model(args.dbm_dirpath)
dbm.load_rbms(rbms) # !!!
else:
print "\nTraining DBM ...\n\n"
dbm = DBM(rbms=rbms,
n_particles=args.n_particles,
v_particle_init=X_train[:args.n_particles].copy(),
h_particles_init=(Q[:args.n_particles].copy(),
G[:args.n_particles].copy()),
n_gibbs_steps=args.n_gibbs_steps[2],
max_mf_updates=args.max_mf_updates,
mf_tol=args.mf_tol,
learning_rate=np.geomspace(args.lr[2], 1e-5, args.epochs[2]),
momentum=np.geomspace(0.5, 0.9, 10),
max_epoch=args.epochs[2],
batch_size=args.batch_size[2],
l2=args.l2[2],
max_norm=args.max_norm,
sample_v_states=True,
sample_h_states=(True, True),
sparsity_cost=0.,
train_metrics_every_iter=1000,
val_metrics_every_epoch=2,
random_seed=args.random_seed[2],
verbose=True,
save_after_each_epoch=True,
display_filters=12,
display_particles=36,
v_shape=(32, 32, 3),
dtype='float32',
tf_saver_params=dict(max_to_keep=1),
model_path=args.dbm_dirpath)
dbm.fit(X_train, X_val)
return dbm
def make_mlp((X_train, y_train), (X_val, y_val), (X_test, y_test),
(W, hb), args):
dense_params = {}
if W is not None and hb is not None:
dense_params['weights'] = (W, hb)
# define and initialize MLP model
mlp = Sequential([
Dense(5000, input_shape=(3 * 32 * 32,),
kernel_regularizer=regularizers.l2(args.mlp_l2),
kernel_initializer=glorot_uniform(seed=3333),
**dense_params),
BN(),
Activation('relu'),
Dropout(args.mlp_dropout, seed=4444),
Dense(10, kernel_initializer=glorot_uniform(seed=5555)),
Activation('softmax'),
])
mlp.compile(optimizer=MultiAdam(lr=0.001,
lr_multipliers={'dense_1': args.mlp_lrm[0],
'dense_2': args.mlp_lrm[1]}),
loss='categorical_crossentropy',
metrics=['accuracy'])
# train and evaluate classifier
with Stopwatch(verbose=True) as s:
early_stopping = EarlyStopping(monitor=args.mlp_val_metric, patience=12, verbose=2)
reduce_lr = ReduceLROnPlateau(monitor=args.mlp_val_metric, factor=0.2, verbose=2,
patience=6, min_lr=1e-5)
callbacks = [early_stopping, reduce_lr]
try:
mlp.fit(X_train, one_hot(y_train, n_classes=10),
epochs=args.mlp_epochs,
batch_size=args.mlp_batch_size,
shuffle=False,
validation_data=(X_val, one_hot(y_val, n_classes=10)),
callbacks=callbacks)
except KeyboardInterrupt:
pass
y_pred = mlp.predict(X_test)
y_pred = unhot(one_hot_decision_function(y_pred), n_classes=10)
print "Test accuracy: {:.4f}".format(accuracy_score(y_test, y_pred))
# save predictions, targets, and fine-tuned weights
np.save(args.mlp_save_prefix + 'y_pred.npy', y_pred)
np.save(args.mlp_save_prefix + 'y_test.npy', y_test)
W_finetuned, _ = mlp.layers[0].get_weights()
np.save(args.mlp_save_prefix + 'W_finetuned.npy', W_finetuned)
def main():
# training settings
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# general
parser.add_argument('--gpu', type=str, default='0', metavar='ID',
help="ID of the GPU to train on (or '' to train on CPU)")
# data
parser.add_argument('--n-train', type=int, default=49000, metavar='N',
help='number of training examples')
parser.add_argument('--n-val', type=int, default=1000, metavar='N',
help='number of validation examples')
parser.add_argument('--data-path', type=str, default='../data/', metavar='PATH',
help='directory for storing augmented data etc.')
# common for RBMs and DBM
parser.add_argument('--n-gibbs-steps', type=int, default=(1, 1, 1), metavar='N', nargs='+',
help='(initial) number of Gibbs steps for CD/PCD')
parser.add_argument('--lr', type=float, default=(5e-4, 1e-4, 8e-5), metavar='LR', nargs='+',
help='(initial) learning rates')
parser.add_argument('--epochs', type=int, default=(120, 180, 1500), metavar='N', nargs='+',
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=(100, 100, 100), metavar='B', nargs='+',
help='input batch size for training, `--n-train` and `--n-val`' + \
'must be divisible by this number (for DBM)')
parser.add_argument('--l2', type=float, default=(0.01, 0.05, 1e-8), metavar='L2', nargs='+',
help='L2 weight decay coefficients')
parser.add_argument('--random-seed', type=int, default=(1337, 1111, 2222), metavar='N', nargs='+',
help='random seeds for models training')
# save dirpaths
parser.add_argument('--grbm-dirpath', type=str, default='../models/grbm_cifar_naive/', metavar='DIRPATH',
help='directory path to save Gaussian RBM')
parser.add_argument('--mrbm-dirpath', type=str, default='../models/mrbm_cifar_naive/', metavar='DIRPATH',
help='directory path to save Multinomial RBM')
parser.add_argument('--dbm-dirpath', type=str, default='../models/dbm_cifar_naive/', metavar='DIRPATH',
help='directory path to save DBM')
# DBM related
parser.add_argument('--n-particles', type=int, default=100, metavar='M',
help='number of persistent Markov chains')
parser.add_argument('--max-mf-updates', type=int, default=50, metavar='N',
help='maximum number of mean-field updates per weight update')
parser.add_argument('--mf-tol', type=float, default=1e-11, metavar='TOL',
help='mean-field tolerance')
parser.add_argument('--max-norm', type=float, default=4., metavar='C',
help='maximum norm constraint')
# MLP related
parser.add_argument('--mlp-no-init', action='store_true',
help='if enabled, use random initialization')
parser.add_argument('--mlp-l2', type=float, default=1e-4, metavar='L2',
help='L2 weight decay coefficient')
parser.add_argument('--mlp-lrm', type=float, default=(0.1, 1.), metavar='LRM', nargs='+',
help='learning rate multipliers of 1e-3')
parser.add_argument('--mlp-epochs', type=int, default=100, metavar='N',
help='number of epochs to train')
parser.add_argument('--mlp-val-metric', type=str, default='val_acc', metavar='S',
help="metric on validation set to perform early stopping, {'val_acc', 'val_loss'}")
parser.add_argument('--mlp-batch-size', type=int, default=128, metavar='N',
help='input batch size for training')
parser.add_argument('--mlp-dropout', type=float, default=0.64, metavar='P',
help='probability of visible units being set to zero')
parser.add_argument('--mlp-save-prefix', type=str, default='../data/grbm_naive_', metavar='PREFIX',
help='prefix to save MLP predictions and targets')
# parse and check params
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
for x, m in (
(args.n_gibbs_steps, 3),
(args.lr, 3),
(args.epochs, 3),
(args.batch_size, 3),
(args.l2, 3),
(args.random_seed, 3),
):
if len(x) == 1:
x *= m
# prepare data (load + scale + split)
print "\nPreparing data ..."
X, y = load_cifar10(mode='train', path=args.data_path)
X = X.astype(np.float32)
X /= 255.
RNG(seed=42).shuffle(X)
RNG(seed=42).shuffle(y)
n_train = min(len(X), args.n_train)
n_val = min(len(X), args.n_val)
X_train = X[:n_train]
X_val = X[-n_val:]
y_train = y[:n_train]
y_val = y[-n_val:]
# remove 1000 least significant singular values
X_train = make_smoothing(X_train, n_train, args)
print X_train.shape
# center and normalize training data
X_s_mean = X_train.mean(axis=0)
X_s_std = X_train.std(axis=0)
mean_path = os.path.join(args.data_path, 'X_s_mean.npy')
std_path = os.path.join(args.data_path, 'X_s_std.npy')
if not os.path.isfile(mean_path):
np.save(mean_path, X_s_mean)
if not os.path.isfile(std_path):
np.save(std_path, X_s_std)
X_train -= X_s_mean
X_train /= X_s_std
X_val -= X_s_mean
X_val /= X_s_std
print "Mean: ({0:.3f}, ...); std: ({1:.3f}, ...)".format(X_train.mean(axis=0)[0],
X_train.std(axis=0)[0])
print "Range: ({0:.3f}, {1:.3f})\n\n".format(X_train.min(), X_train.max())
# pre-train Gaussian RBM
grbm = make_grbm((X_train, X_val), args)
# extract features Q = p_{G-RBM}(h|v=X)
print "\nExtracting features from G-RBM ...\n\n"
Q_train, Q_val = None, None
if not os.path.isdir(args.mrbm_dirpath) or not os.path.isdir(args.dbm_dirpath):
Q_train_path = os.path.join(args.data_path, 'Q_train_cifar_naive.npy')
Q_train = make_rbm_transform(grbm, X_train, Q_train_path)
if not os.path.isdir(args.mrbm_dirpath):
Q_val_path = os.path.join(args.data_path, 'Q_val_cifar_naive.npy')
Q_val = make_rbm_transform(grbm, X_val, Q_val_path)
# pre-train Multinomial RBM (M-RBM)
mrbm = make_mrbm((Q_train, Q_val), args)
# extract features G = p_{M-RBM}(h|v=Q)
print "\nExtracting features from M-RBM ...\n\n"
Q, G = None, None
if not os.path.isdir(args.dbm_dirpath):
Q = Q_train[:args.n_particles]
G_path = os.path.join(args.data_path, 'G_train_cifar_naive.npy')
G = make_rbm_transform(mrbm, Q, G_path)
# jointly train DBM
dbm = make_dbm((X_train, X_val), (grbm, mrbm), (Q, G), args)
# load test data
X_test, y_test = load_cifar10(mode='test', path=args.data_path)
X_test /= 255.
X_test -= X_s_mean
X_test /= X_s_std
# G-RBM discriminative fine-tuning:
# initialize MLP with learned weights,
# add FC layer and train using backprop
print "\nG-RBM Discriminative fine-tuning ...\n\n"
W, hb = None, None
if not args.mlp_no_init:
weights = grbm.get_tf_params(scope='weights')
W = weights['W']
hb = weights['hb']
make_mlp((X_train, y_train), (X_val, y_val), (X_test, y_test),
(W, hb), args)
if __name__ == '__main__':
main()
|
en
| 0.842494
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Train 3072-5000-1000 Gaussian-Bernoulli-Multinomial DBM with pre-training on "smoothed" CIFAR-10 (with 1000 least significant singular values removed), as suggested in [1]. Per sample validation mean reconstruction error for DBM monotonically decreases during training from ~0.99 to (only) ~0.5 after 1500 epochs. The training took approx. 47m + 119m + 22h 40m ~ 1d 1h 30m on GTX 1060. Note that DBM is trained without centering. After models are trained, Gaussian RBM is discriminatively fine-tuned. It achieves 59.78% accuracy on a test set. References ---------- [1] <NAME> and <NAME>. Learning multiple layers of features from tine images. 2009. # save to disk # !!! # !!! # !!! # define and initialize MLP model # train and evaluate classifier # save predictions, targets, and fine-tuned weights # training settings # general # data # common for RBMs and DBM # save dirpaths # DBM related # MLP related # parse and check params # prepare data (load + scale + split) # remove 1000 least significant singular values # center and normalize training data # pre-train Gaussian RBM # extract features Q = p_{G-RBM}(h|v=X) # pre-train Multinomial RBM (M-RBM) # extract features G = p_{M-RBM}(h|v=Q) # jointly train DBM # load test data # G-RBM discriminative fine-tuning: # initialize MLP with learned weights, # add FC layer and train using backprop
| 2.481646
| 2
|
insights/formats/_yaml.py
|
dehort/insights-core
| 0
|
6625975
|
import yaml
from insights.formats import EvaluatorFormatter
class YamlFormatter(EvaluatorFormatter):
def dump(self, data):
return yaml.dump(data)
|
import yaml
from insights.formats import EvaluatorFormatter
class YamlFormatter(EvaluatorFormatter):
def dump(self, data):
return yaml.dump(data)
|
none
| 1
| 2.241846
| 2
|
|
networkapi/plugins/Cisco/NXOS/plugin.py
|
vinicius-marinho/GloboNetworkAPI
| 73
|
6625976
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from ...base import BasePlugin
from networkapi.plugins.Cisco.NXOS.BGP.Cli import Generic as BGP
from networkapi.util.decorators import mock_return
# from time import sleep
# import string
# import unicodedata
# from networkapi.api_rest import exceptions as api_exceptions
# from networkapi.plugins import exceptions as base_exceptions
log = logging.getLogger(__name__)
class NXOS(BasePlugin):
admin_privileges = -1
management_vrf = 'management'
VALID_TFTP_GET_MESSAGE = 'Copy complete.|Copy complete, now saving to disk'
ERROR_REGEX = '[Ee][Rr][Rr][Oo][Rr]|[Ff]ail|utility is occupied'
def bgp(self):
return BGP(equipment=self.equipment)
@mock_return('')
def create_svi(self, svi_number, svi_description='no description'):
"""
Create SVI in switch
"""
self.ensure_privilege_level(self)
self.channel.send('terminal length 0\nconfigure terminal\n \
interface Vlan%s \n description %s \n end \n' % (svi_number, svi_description))
recv = self.waitString('#')
return recv
@mock_return('')
def copyScriptFileToConfig(self, filename, use_vrf=None, destination='running-config'):
"""
Copy file from TFTP server to destination
By default, plugin should apply file in running configuration (active)
"""
#1.1 this should be removed in the future, we have to prepare db entries first
# use_vrf is not used when this method is called
if use_vrf is None:
use_vrf = self.management_vrf
#1.2 only this check should be left - use_vrf must be used
if use_vrf:
command = 'copy tftp://%s/%s %s vrf %s\n\n' % (
self.tftpserver, filename, destination, use_vrf)
else:
command = 'copy tftp://%s/%s %s\n\n' % (
self.tftpserver, filename, destination)
log.info('sending command: %s' % command)
self.channel.send('%s\n' % command)
recv = self.waitString(self.VALID_TFTP_GET_MESSAGE)
return recv
@mock_return('')
def ensure_privilege_level(self, privilege_level=None):
if privilege_level is None:
privilege_level = self.admin_privileges
recv = self.waitString('>|#')
self.channel.send('show privilege\n')
recv = self.waitString('Current privilege level:')
level = re.search(
'Current privilege level: (-?[0-9]+?).*', recv, re.DOTALL).group(1)
level = (level.split(' '))[-1]
if int(level) < privilege_level:
self.channel.send('enable\n')
recv = self.waitString('Password:')
self.channel.send('%s\n' % self.equipment_access.enable_pass)
recv = self.waitString('#')
@mock_return('')
def remove_svi(self, svi_number):
"""
Delete SVI from switch
"""
self.ensure_privilege_level()
self.channel.send(
'terminal length 0\nconfigure terminal\nno interface Vlan%s \n end \n' % (svi_number))
recv = self.waitString('#')
return recv
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from ...base import BasePlugin
from networkapi.plugins.Cisco.NXOS.BGP.Cli import Generic as BGP
from networkapi.util.decorators import mock_return
# from time import sleep
# import string
# import unicodedata
# from networkapi.api_rest import exceptions as api_exceptions
# from networkapi.plugins import exceptions as base_exceptions
log = logging.getLogger(__name__)
class NXOS(BasePlugin):
admin_privileges = -1
management_vrf = 'management'
VALID_TFTP_GET_MESSAGE = 'Copy complete.|Copy complete, now saving to disk'
ERROR_REGEX = '[Ee][Rr][Rr][Oo][Rr]|[Ff]ail|utility is occupied'
def bgp(self):
return BGP(equipment=self.equipment)
@mock_return('')
def create_svi(self, svi_number, svi_description='no description'):
"""
Create SVI in switch
"""
self.ensure_privilege_level(self)
self.channel.send('terminal length 0\nconfigure terminal\n \
interface Vlan%s \n description %s \n end \n' % (svi_number, svi_description))
recv = self.waitString('#')
return recv
@mock_return('')
def copyScriptFileToConfig(self, filename, use_vrf=None, destination='running-config'):
"""
Copy file from TFTP server to destination
By default, plugin should apply file in running configuration (active)
"""
#1.1 this should be removed in the future, we have to prepare db entries first
# use_vrf is not used when this method is called
if use_vrf is None:
use_vrf = self.management_vrf
#1.2 only this check should be left - use_vrf must be used
if use_vrf:
command = 'copy tftp://%s/%s %s vrf %s\n\n' % (
self.tftpserver, filename, destination, use_vrf)
else:
command = 'copy tftp://%s/%s %s\n\n' % (
self.tftpserver, filename, destination)
log.info('sending command: %s' % command)
self.channel.send('%s\n' % command)
recv = self.waitString(self.VALID_TFTP_GET_MESSAGE)
return recv
@mock_return('')
def ensure_privilege_level(self, privilege_level=None):
if privilege_level is None:
privilege_level = self.admin_privileges
recv = self.waitString('>|#')
self.channel.send('show privilege\n')
recv = self.waitString('Current privilege level:')
level = re.search(
'Current privilege level: (-?[0-9]+?).*', recv, re.DOTALL).group(1)
level = (level.split(' '))[-1]
if int(level) < privilege_level:
self.channel.send('enable\n')
recv = self.waitString('Password:')
self.channel.send('%s\n' % self.equipment_access.enable_pass)
recv = self.waitString('#')
@mock_return('')
def remove_svi(self, svi_number):
"""
Delete SVI from switch
"""
self.ensure_privilege_level()
self.channel.send(
'terminal length 0\nconfigure terminal\nno interface Vlan%s \n end \n' % (svi_number))
recv = self.waitString('#')
return recv
|
en
| 0.845558
|
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from time import sleep # import string # import unicodedata # from networkapi.api_rest import exceptions as api_exceptions # from networkapi.plugins import exceptions as base_exceptions Create SVI in switch Copy file from TFTP server to destination By default, plugin should apply file in running configuration (active) #1.1 this should be removed in the future, we have to prepare db entries first # use_vrf is not used when this method is called #1.2 only this check should be left - use_vrf must be used #') Delete SVI from switch
| 1.739264
| 2
|
scripts/create_fluseverity_figs_v5/S_seasonRR_benchmark_v5.py
|
eclee25/flu-SDI-exploratory-age
| 3
|
6625977
|
#!/usr/bin/python
##############################################
###Python template
###Author: <NAME>
###Date: 1/20/15
###Function: relative risk of adult ILI to child ILI visits for the entire season vs. CDC benchmark index, mean Thanksgiving-based early zOR metric vs. CDC benchmark index.
# 7/20/15: update beta
# 10/8/15: rm lines, color points, add p-values
###Import data: /home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index.csv, SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop_age.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv
###Command Line: python S_seasonRR_benchmark_v5.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
## local modules ##
import functions_v5 as fxn
### data structures ###
### local functions ###
def entireSeasonRR(dict_ageILIadj_season, dict_pop, seasonnum):
''' Calculate relative risk based off of adjusted ILI visits from weeks 40 through 20 in flu season.
'''
ILI_ratio = sum(dict_ageILIadj_season[(seasonnum,'A')][:fw])/sum(dict_ageILIadj_season[(seasonnum,'C')][:fw])
pop_ratio = (dict_pop[(seasonnum, 'C')])/(dict_pop[(seasonnum, 'A')])
return ILI_ratio * pop_ratio
def tightSeasonRR(dict_ageILIadj_season, dict_pop, seasonnum):
''' Calculate relative risk based off of adjusted ILI visits from weeks 50 through 12 in flu season.
'''
ILI_ratio = sum(dict_ageILIadj_season[(seasonnum,'A')][10:fw-7])/sum(dict_ageILIadj_season[(seasonnum,'C')][10:fw-7])
pop_ratio = (dict_pop[(seasonnum, 'C')])/(dict_pop[(seasonnum, 'A')])
return ILI_ratio * pop_ratio
def nonfluSeasonRR(dict_ageILIadj_season, dict_pop, seasonnum):
''' Calculate relative risk based off of adjusted ILI visits from weeks 21 to 39, which occurs during the summer after the flu season.
'''
ILI_ratio = sum(dict_ageILIadj_season[(seasonnum,'A')][fw:])/sum(dict_ageILIadj_season[(seasonnum,'C')][fw:])
pop_ratio = (dict_pop[(seasonnum, 'C')])/(dict_pop[(seasonnum, 'A')])
return ILI_ratio * pop_ratio
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
ixin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/benchmark_ixTavg_altnorm_comparisons.csv','r')
ixin.readline()
ix = csv.reader(ixin, delimiter=',')
ix2in = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/benchmark_ixTavg_altnorm_comparisons.csv','r')
ix2in.readline()
ix2 = csv.reader(ix2in, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
fw = fxn.gp_fluweeks
bench_ix, q_ix = 1, 7
sevCol = fxn.gp_mild_severe_colors
### program ###
# import data
# d_benchmark[seasonnum] = CDC benchmark index value
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_benchmark = fxn.benchmark_import(ix, bench_ix)
d_qual_classif = fxn.benchmark_import(ix2, q_ix)
# dict_wk[wk] = seasonnum
# dict_totIncid53ls[s] = [incid rate per 100000 wk40,... incid rate per 100000 wk 39] (unadjusted ILI incidence)
# dict_totIncidAdj53ls[s] = [adjusted incid rate per 100000 wk 40, ...adj incid wk 39] (total population adjusted for coverage and ILI care-seeking behavior)
# dict_ageILIadj_season[(season, age)] = [ILI * (visits in flu season 9)/(visits in flu season #)/(ILI care-seeking behavior) wk 40, ...wk 39]
# dict_RR53ls[s] = [RR wk 40,... RR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
# dict_zRR53ls[s] = [zRR wk 40,... zRR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
d_wk, d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season = fxn.week_OR_processing(incid, pop)
d_totIncid53ls, d_totIncidAdj53ls, d_RR53ls, d_zRR53ls = fxn.week_RR_processing_part2(d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zRR_processing(d_wk, d_totIncidAdj53ls, d_zRR53ls)
# plot values
benchmark = [d_benchmark[s] for s in ps]
fluSeason_RR = [entireSeasonRR(d_ageILIadj_season, d_pop, s) for s in ps]
nonfluSeason_RR = [nonfluSeasonRR(d_ageILIadj_season, d_pop, s) for s in ps]
tightfluSeason_RR = [tightSeasonRR(d_ageILIadj_season, d_pop, s) for s in ps]
vals = zip(benchmark, fluSeason_RR, nonfluSeason_RR, tightfluSeason_RR)
d_plotData = dict(zip(ps, vals))
d_plotCol = fxn.gp_CDCclassif_ix
# updated 10/8/15
print 'entire flu season (40 to 20) corr coef', scipy.stats.pearsonr(benchmark, fluSeason_RR) # R = 0.789, p-value = 0.020
print 'non flu season corr coef', scipy.stats.pearsonr(benchmark, nonfluSeason_RR) # R = 0.217, p-value = 0.606
print 'tight flu season (50 to 12) corr coef', scipy.stats.pearsonr(benchmark, tightfluSeason_RR) # R = 0.825, p-value = 0.012
# draw plots
# fig1 = plt.figure()
# ax1 = fig1.add_subplot(1,1,1)
# # flu season RR vs. benchmark index
# for key in d_plotCol:
# ax1.plot([d_plotData[k][0] for k in d_plotCol[key]], [d_plotData[k][1] for k in d_plotCol[key]], marker = 'o', color = key, linestyle = 'None')
# ax1.annotate('Mild', xy=(-1.4,0.1), fontsize=fssml)
# ax1.annotate('Severe', xy=(1.1,0.5), fontsize=fssml)
# for s, x, y in zip(sl, benchmark, fluSeason_RR):
# ax1.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
# ax1.set_ylabel('Flu Season RR (R=0.79)', fontsize=fs)
# ax1.set_xlabel(fxn.gp_benchmark, fontsize=fs)
# ax1.tick_params(axis='both', labelsize=fssml)
# ax1.set_xlim([-1.5,1.5])
# ax1.set_ylim([0,0.6])
# plt.savefig('/home/elee/Dropbox (Bansal Lab)/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission3_ID/SIFigures/seasonRR_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
# plt.close()
# # plt.show()
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
# nonflu season vs. benchmark index
for key in d_plotCol:
ax2.plot([d_plotData[k][0] for k in d_plotCol[key]], [d_plotData[k][2] for k in d_plotCol[key]], marker = 'o', color = key, linestyle = 'None')
ax2.annotate('Mild', xy=(-1.4,0.1), fontsize=fssml, color = sevCol[0])
ax2.annotate('Severe', xy=(1.1,0.5), fontsize=fssml, color = sevCol[1])
for s, x, y in zip(sl, benchmark, nonfluSeason_RR):
ax2.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax2.set_ylabel('Weeks 21 to 39 RR, adult:child', fontsize=fs)
ax2.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax2.tick_params(axis='both', labelsize=fssml)
ax2.set_xlim([-1.5,1.5])
ax2.set_ylim([0,0.6])
plt.savefig('/home/elee/Dropbox (Bansal Lab)/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission3_ID/SIFigures/nonfluseasonRR_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
fig3 = plt.figure()
ax3 = fig3.add_subplot(1,1,1)
# tight flu season RR vs. benchmark index
for key in d_plotCol:
ax3.plot([d_plotData[k][0] for k in d_plotCol[key]], [d_plotData[k][3] for k in d_plotCol[key]], marker = 'o', color = key, linestyle = 'None')
ax3.annotate('Mild', xy=(-1.4,0.1), fontsize=fssml, color = sevCol[0])
ax3.annotate('Severe', xy=(1.1,0.5), fontsize=fssml, color = sevCol[1])
for s, x, y in zip(sl, benchmark, tightfluSeason_RR):
ax3.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax3.set_ylabel('Weeks 50 to 12 RR, adult:child', fontsize=fs)
ax3.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax3.tick_params(axis='both', labelsize=fssml)
ax3.set_xlim([-1.5,1.5])
ax3.set_ylim([0,0.6])
plt.savefig('/home/elee/Dropbox (Bansal Lab)/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission3_ID/SIFigures/tightseasonRR_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
|
#!/usr/bin/python
##############################################
###Python template
###Author: <NAME>
###Date: 1/20/15
###Function: relative risk of adult ILI to child ILI visits for the entire season vs. CDC benchmark index, mean Thanksgiving-based early zOR metric vs. CDC benchmark index.
# 7/20/15: update beta
# 10/8/15: rm lines, color points, add p-values
###Import data: /home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index.csv, SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop_age.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv
###Command Line: python S_seasonRR_benchmark_v5.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
## local modules ##
import functions_v5 as fxn
### data structures ###
### local functions ###
def entireSeasonRR(dict_ageILIadj_season, dict_pop, seasonnum):
''' Calculate relative risk based off of adjusted ILI visits from weeks 40 through 20 in flu season.
'''
ILI_ratio = sum(dict_ageILIadj_season[(seasonnum,'A')][:fw])/sum(dict_ageILIadj_season[(seasonnum,'C')][:fw])
pop_ratio = (dict_pop[(seasonnum, 'C')])/(dict_pop[(seasonnum, 'A')])
return ILI_ratio * pop_ratio
def tightSeasonRR(dict_ageILIadj_season, dict_pop, seasonnum):
''' Calculate relative risk based off of adjusted ILI visits from weeks 50 through 12 in flu season.
'''
ILI_ratio = sum(dict_ageILIadj_season[(seasonnum,'A')][10:fw-7])/sum(dict_ageILIadj_season[(seasonnum,'C')][10:fw-7])
pop_ratio = (dict_pop[(seasonnum, 'C')])/(dict_pop[(seasonnum, 'A')])
return ILI_ratio * pop_ratio
def nonfluSeasonRR(dict_ageILIadj_season, dict_pop, seasonnum):
''' Calculate relative risk based off of adjusted ILI visits from weeks 21 to 39, which occurs during the summer after the flu season.
'''
ILI_ratio = sum(dict_ageILIadj_season[(seasonnum,'A')][fw:])/sum(dict_ageILIadj_season[(seasonnum,'C')][fw:])
pop_ratio = (dict_pop[(seasonnum, 'C')])/(dict_pop[(seasonnum, 'A')])
return ILI_ratio * pop_ratio
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
ixin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/benchmark_ixTavg_altnorm_comparisons.csv','r')
ixin.readline()
ix = csv.reader(ixin, delimiter=',')
ix2in = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/R_export/benchmark_ixTavg_altnorm_comparisons.csv','r')
ix2in.readline()
ix2 = csv.reader(ix2in, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fs = 24
fssml = 16
fw = fxn.gp_fluweeks
bench_ix, q_ix = 1, 7
sevCol = fxn.gp_mild_severe_colors
### program ###
# import data
# d_benchmark[seasonnum] = CDC benchmark index value
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_benchmark = fxn.benchmark_import(ix, bench_ix)
d_qual_classif = fxn.benchmark_import(ix2, q_ix)
# dict_wk[wk] = seasonnum
# dict_totIncid53ls[s] = [incid rate per 100000 wk40,... incid rate per 100000 wk 39] (unadjusted ILI incidence)
# dict_totIncidAdj53ls[s] = [adjusted incid rate per 100000 wk 40, ...adj incid wk 39] (total population adjusted for coverage and ILI care-seeking behavior)
# dict_ageILIadj_season[(season, age)] = [ILI * (visits in flu season 9)/(visits in flu season #)/(ILI care-seeking behavior) wk 40, ...wk 39]
# dict_RR53ls[s] = [RR wk 40,... RR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
# dict_zRR53ls[s] = [zRR wk 40,... zRR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
d_wk, d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season = fxn.week_OR_processing(incid, pop)
d_totIncid53ls, d_totIncidAdj53ls, d_RR53ls, d_zRR53ls = fxn.week_RR_processing_part2(d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season)
# d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR)
d_classifzOR = fxn.classif_zRR_processing(d_wk, d_totIncidAdj53ls, d_zRR53ls)
# plot values
benchmark = [d_benchmark[s] for s in ps]
fluSeason_RR = [entireSeasonRR(d_ageILIadj_season, d_pop, s) for s in ps]
nonfluSeason_RR = [nonfluSeasonRR(d_ageILIadj_season, d_pop, s) for s in ps]
tightfluSeason_RR = [tightSeasonRR(d_ageILIadj_season, d_pop, s) for s in ps]
vals = zip(benchmark, fluSeason_RR, nonfluSeason_RR, tightfluSeason_RR)
d_plotData = dict(zip(ps, vals))
d_plotCol = fxn.gp_CDCclassif_ix
# updated 10/8/15
print 'entire flu season (40 to 20) corr coef', scipy.stats.pearsonr(benchmark, fluSeason_RR) # R = 0.789, p-value = 0.020
print 'non flu season corr coef', scipy.stats.pearsonr(benchmark, nonfluSeason_RR) # R = 0.217, p-value = 0.606
print 'tight flu season (50 to 12) corr coef', scipy.stats.pearsonr(benchmark, tightfluSeason_RR) # R = 0.825, p-value = 0.012
# draw plots
# fig1 = plt.figure()
# ax1 = fig1.add_subplot(1,1,1)
# # flu season RR vs. benchmark index
# for key in d_plotCol:
# ax1.plot([d_plotData[k][0] for k in d_plotCol[key]], [d_plotData[k][1] for k in d_plotCol[key]], marker = 'o', color = key, linestyle = 'None')
# ax1.annotate('Mild', xy=(-1.4,0.1), fontsize=fssml)
# ax1.annotate('Severe', xy=(1.1,0.5), fontsize=fssml)
# for s, x, y in zip(sl, benchmark, fluSeason_RR):
# ax1.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
# ax1.set_ylabel('Flu Season RR (R=0.79)', fontsize=fs)
# ax1.set_xlabel(fxn.gp_benchmark, fontsize=fs)
# ax1.tick_params(axis='both', labelsize=fssml)
# ax1.set_xlim([-1.5,1.5])
# ax1.set_ylim([0,0.6])
# plt.savefig('/home/elee/Dropbox (Bansal Lab)/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission3_ID/SIFigures/seasonRR_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
# plt.close()
# # plt.show()
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
# nonflu season vs. benchmark index
for key in d_plotCol:
ax2.plot([d_plotData[k][0] for k in d_plotCol[key]], [d_plotData[k][2] for k in d_plotCol[key]], marker = 'o', color = key, linestyle = 'None')
ax2.annotate('Mild', xy=(-1.4,0.1), fontsize=fssml, color = sevCol[0])
ax2.annotate('Severe', xy=(1.1,0.5), fontsize=fssml, color = sevCol[1])
for s, x, y in zip(sl, benchmark, nonfluSeason_RR):
ax2.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax2.set_ylabel('Weeks 21 to 39 RR, adult:child', fontsize=fs)
ax2.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax2.tick_params(axis='both', labelsize=fssml)
ax2.set_xlim([-1.5,1.5])
ax2.set_ylim([0,0.6])
plt.savefig('/home/elee/Dropbox (Bansal Lab)/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission3_ID/SIFigures/nonfluseasonRR_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
fig3 = plt.figure()
ax3 = fig3.add_subplot(1,1,1)
# tight flu season RR vs. benchmark index
for key in d_plotCol:
ax3.plot([d_plotData[k][0] for k in d_plotCol[key]], [d_plotData[k][3] for k in d_plotCol[key]], marker = 'o', color = key, linestyle = 'None')
ax3.annotate('Mild', xy=(-1.4,0.1), fontsize=fssml, color = sevCol[0])
ax3.annotate('Severe', xy=(1.1,0.5), fontsize=fssml, color = sevCol[1])
for s, x, y in zip(sl, benchmark, tightfluSeason_RR):
ax3.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml)
ax3.set_ylabel('Weeks 50 to 12 RR, adult:child', fontsize=fs)
ax3.set_xlabel(fxn.gp_benchmark, fontsize=fs)
ax3.tick_params(axis='both', labelsize=fssml)
ax3.set_xlim([-1.5,1.5])
ax3.set_ylim([0,0.6])
plt.savefig('/home/elee/Dropbox (Bansal Lab)/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission3_ID/SIFigures/tightseasonRR_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
|
en
| 0.590021
|
#!/usr/bin/python ############################################## ###Python template ###Author: <NAME> ###Date: 1/20/15 ###Function: relative risk of adult ILI to child ILI visits for the entire season vs. CDC benchmark index, mean Thanksgiving-based early zOR metric vs. CDC benchmark index. # 7/20/15: update beta # 10/8/15: rm lines, color points, add p-values ###Import data: /home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index.csv, SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop_age.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv ###Command Line: python S_seasonRR_benchmark_v5.py ############################################## ### notes ### ### packages/modules ### ## local modules ## ### data structures ### ### local functions ### Calculate relative risk based off of adjusted ILI visits from weeks 40 through 20 in flu season. Calculate relative risk based off of adjusted ILI visits from weeks 50 through 12 in flu season. Calculate relative risk based off of adjusted ILI visits from weeks 21 to 39, which occurs during the summer after the flu season. ### data files ### ### called/local plotting parameters ### ### program ### # import data # d_benchmark[seasonnum] = CDC benchmark index value # d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR) # dict_wk[wk] = seasonnum # dict_totIncid53ls[s] = [incid rate per 100000 wk40,... incid rate per 100000 wk 39] (unadjusted ILI incidence) # dict_totIncidAdj53ls[s] = [adjusted incid rate per 100000 wk 40, ...adj incid wk 39] (total population adjusted for coverage and ILI care-seeking behavior) # dict_ageILIadj_season[(season, age)] = [ILI * (visits in flu season 9)/(visits in flu season #)/(ILI care-seeking behavior) wk 40, ...wk 39] # dict_RR53ls[s] = [RR wk 40,... RR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior) # dict_zRR53ls[s] = [zRR wk 40,... zRR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior) # d_classifzOR[seasonnum] = (mean retrospective zOR, mean early warning zOR) # plot values # updated 10/8/15 # R = 0.789, p-value = 0.020 # R = 0.217, p-value = 0.606 # R = 0.825, p-value = 0.012 # draw plots # fig1 = plt.figure() # ax1 = fig1.add_subplot(1,1,1) # # flu season RR vs. benchmark index # for key in d_plotCol: # ax1.plot([d_plotData[k][0] for k in d_plotCol[key]], [d_plotData[k][1] for k in d_plotCol[key]], marker = 'o', color = key, linestyle = 'None') # ax1.annotate('Mild', xy=(-1.4,0.1), fontsize=fssml) # ax1.annotate('Severe', xy=(1.1,0.5), fontsize=fssml) # for s, x, y in zip(sl, benchmark, fluSeason_RR): # ax1.annotate(s, xy=(x,y), xytext=(-10,5), textcoords='offset points', fontsize=fssml) # ax1.set_ylabel('Flu Season RR (R=0.79)', fontsize=fs) # ax1.set_xlabel(fxn.gp_benchmark, fontsize=fs) # ax1.tick_params(axis='both', labelsize=fssml) # ax1.set_xlim([-1.5,1.5]) # ax1.set_ylim([0,0.6]) # plt.savefig('/home/elee/Dropbox (Bansal Lab)/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission3_ID/SIFigures/seasonRR_benchmark.png', transparent=False, bbox_inches='tight', pad_inches=0) # plt.close() # # plt.show() # nonflu season vs. benchmark index # tight flu season RR vs. benchmark index
| 1.844682
| 2
|
clickhouse_driver/settings/types.py
|
vsmaxim/clickhouse-driver
| 17
|
6625978
|
<gh_stars>10-100
from ..varint import write_varint
from ..writer import write_binary_str
class SettingType(object):
@classmethod
def write(cls, value, buf):
raise NotImplementedError
class SettingUInt64(SettingType):
@classmethod
def write(cls, value, buf):
write_varint(int(value), buf)
class SettingBool(SettingType):
@classmethod
def write(cls, value, buf):
write_varint(bool(value), buf)
class SettingString(SettingType):
@classmethod
def write(cls, value, buf):
write_binary_str(value, buf)
class SettingChar(SettingType):
@classmethod
def write(cls, value, buf):
write_binary_str(value[0], buf)
class SettingFloat(SettingType):
@classmethod
def write(cls, value, buf):
"""
Float is written in string representation.
"""
write_binary_str(str(value), buf)
class SettingMaxThreads(SettingUInt64):
@classmethod
def write(cls, value, buf):
if value == 'auto':
value = 0
super(SettingMaxThreads, cls).write(value, buf)
|
from ..varint import write_varint
from ..writer import write_binary_str
class SettingType(object):
@classmethod
def write(cls, value, buf):
raise NotImplementedError
class SettingUInt64(SettingType):
@classmethod
def write(cls, value, buf):
write_varint(int(value), buf)
class SettingBool(SettingType):
@classmethod
def write(cls, value, buf):
write_varint(bool(value), buf)
class SettingString(SettingType):
@classmethod
def write(cls, value, buf):
write_binary_str(value, buf)
class SettingChar(SettingType):
@classmethod
def write(cls, value, buf):
write_binary_str(value[0], buf)
class SettingFloat(SettingType):
@classmethod
def write(cls, value, buf):
"""
Float is written in string representation.
"""
write_binary_str(str(value), buf)
class SettingMaxThreads(SettingUInt64):
@classmethod
def write(cls, value, buf):
if value == 'auto':
value = 0
super(SettingMaxThreads, cls).write(value, buf)
|
en
| 0.950204
|
Float is written in string representation.
| 2.866163
| 3
|
tests/test_query.py
|
Mc01/graphene-pydantic
| 0
|
6625979
|
<filename>tests/test_query.py
import uuid
import pydantic
import graphene
from graphene_pydantic_fix import PydanticObjectType
class FooModel(pydantic.BaseModel):
id: uuid.UUID
name: str
class Foo(PydanticObjectType):
class Meta:
model = FooModel
class Query(graphene.ObjectType):
list_foos = graphene.List(Foo)
def resolve_list_foos(self, info):
"""Dummy resolver that creates a list of Pydantic objects"""
return [
FooModel(id=uuid.uuid4(), name="foo"),
FooModel(id=uuid.uuid4(), name="bar"),
]
def test_query():
from graphql.execution.executors.sync import SyncExecutor
schema = graphene.Schema(query=Query)
result = schema.execute(
"""
query {
listFoos {
id
name
}
}
""",
executor=SyncExecutor(),
return_promise=False,
)
assert result.errors is None
assert result.data is not None
assert [x["name"] for x in result.data["listFoos"]] == ["foo", "bar"]
|
<filename>tests/test_query.py
import uuid
import pydantic
import graphene
from graphene_pydantic_fix import PydanticObjectType
class FooModel(pydantic.BaseModel):
id: uuid.UUID
name: str
class Foo(PydanticObjectType):
class Meta:
model = FooModel
class Query(graphene.ObjectType):
list_foos = graphene.List(Foo)
def resolve_list_foos(self, info):
"""Dummy resolver that creates a list of Pydantic objects"""
return [
FooModel(id=uuid.uuid4(), name="foo"),
FooModel(id=uuid.uuid4(), name="bar"),
]
def test_query():
from graphql.execution.executors.sync import SyncExecutor
schema = graphene.Schema(query=Query)
result = schema.execute(
"""
query {
listFoos {
id
name
}
}
""",
executor=SyncExecutor(),
return_promise=False,
)
assert result.errors is None
assert result.data is not None
assert [x["name"] for x in result.data["listFoos"]] == ["foo", "bar"]
|
en
| 0.618568
|
Dummy resolver that creates a list of Pydantic objects query { listFoos { id name } }
| 2.61833
| 3
|
warehouse/utils/webauthn.py
|
pradyunsg/warehouse
| 1
|
6625980
|
<reponame>pradyunsg/warehouse
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import webauthn as pywebauthn
from webauthn.helpers import base64url_to_bytes, generate_challenge
from webauthn.helpers.exceptions import (
InvalidAuthenticationResponse,
InvalidRegistrationResponse,
)
from webauthn.helpers.options_to_json import options_to_json
from webauthn.helpers.structs import (
AttestationConveyancePreference,
AuthenticationCredential,
AuthenticatorSelectionCriteria,
AuthenticatorTransport,
PublicKeyCredentialDescriptor,
RegistrationCredential,
UserVerificationRequirement,
)
class AuthenticationRejectedError(Exception):
pass
class RegistrationRejectedError(Exception):
pass
def _get_webauthn_user_public_key_credential_descriptors(user, *, rp_id):
"""
Returns a webauthn.WebAuthnUser instance corresponding
to the given user model, with properties suitable for
usage within the webauthn API.
"""
return [
PublicKeyCredentialDescriptor(
id=base64url_to_bytes(credential.credential_id),
transports=[
AuthenticatorTransport.USB,
AuthenticatorTransport.NFC,
AuthenticatorTransport.BLE,
AuthenticatorTransport.INTERNAL,
],
)
for credential in user.webauthn
]
def _get_webauthn_user_public_keys(user, *, rp_id):
return [
(
base64url_to_bytes(credential.public_key),
credential.sign_count,
)
for credential in user.webauthn
]
def _webauthn_b64encode(source):
return base64.urlsafe_b64encode(source).rstrip(b"=")
def generate_webauthn_challenge():
"""
Returns a random challenge suitable for use within
Webauthn's credential and configuration option objects.
See: https://w3c.github.io/webauthn/#cryptographic-challenges
"""
return generate_challenge()
def get_credential_options(user, *, challenge, rp_name, rp_id):
"""
Returns a dictionary of options for credential creation
on the client side.
"""
_authenticator_selection = AuthenticatorSelectionCriteria()
_authenticator_selection.user_verification = UserVerificationRequirement.DISCOURAGED
options = pywebauthn.generate_registration_options(
rp_id=rp_id,
rp_name=rp_name,
user_id=str(user.id),
user_name=user.username,
user_display_name=user.name or user.username,
challenge=challenge,
attestation=AttestationConveyancePreference.NONE,
authenticator_selection=_authenticator_selection,
)
return json.loads(options_to_json(options))
def get_assertion_options(user, *, challenge, rp_id):
"""
Returns a dictionary of options for assertion retrieval
on the client side.
"""
options = pywebauthn.generate_authentication_options(
rp_id=rp_id,
challenge=challenge,
allow_credentials=_get_webauthn_user_public_key_credential_descriptors(
user, rp_id=rp_id
),
user_verification=UserVerificationRequirement.DISCOURAGED,
)
return json.loads(options_to_json(options))
def verify_registration_response(response, challenge, *, rp_id, origin):
"""
Validates the challenge and attestation information
sent from the client during device registration.
Returns a WebAuthnCredential on success.
Raises RegistrationRejectedError on failire.
"""
# NOTE: We re-encode the challenge below, because our
# response's clientData.challenge is encoded twice:
# first for the entire clientData payload, and then again
# for the individual challenge.
encoded_challenge = _webauthn_b64encode(challenge)
try:
_credential = RegistrationCredential.parse_raw(response)
return pywebauthn.verify_registration_response(
credential=_credential,
expected_challenge=encoded_challenge,
expected_rp_id=rp_id,
expected_origin=origin,
require_user_verification=False,
)
except InvalidRegistrationResponse as e:
raise RegistrationRejectedError(str(e))
def verify_assertion_response(assertion, *, challenge, user, origin, rp_id):
"""
Validates the challenge and assertion information
sent from the client during authentication.
Returns an updated signage count on success.
Raises AuthenticationRejectedError on failure.
"""
# NOTE: We re-encode the challenge below, because our
# response's clientData.challenge is encoded twice:
# first for the entire clientData payload, and then again
# for the individual challenge.
encoded_challenge = _webauthn_b64encode(challenge)
webauthn_user_public_keys = _get_webauthn_user_public_keys(user, rp_id=rp_id)
for public_key, current_sign_count in webauthn_user_public_keys:
try:
_credential = AuthenticationCredential.parse_raw(assertion)
return pywebauthn.verify_authentication_response(
credential=_credential,
expected_challenge=encoded_challenge,
expected_rp_id=rp_id,
expected_origin=origin,
credential_public_key=public_key,
credential_current_sign_count=current_sign_count,
require_user_verification=False,
)
except InvalidAuthenticationResponse:
pass
# If we exit the loop, then we've failed to verify the assertion against
# any of the user's WebAuthn credentials. Fail.
raise AuthenticationRejectedError("Invalid WebAuthn credential")
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import webauthn as pywebauthn
from webauthn.helpers import base64url_to_bytes, generate_challenge
from webauthn.helpers.exceptions import (
InvalidAuthenticationResponse,
InvalidRegistrationResponse,
)
from webauthn.helpers.options_to_json import options_to_json
from webauthn.helpers.structs import (
AttestationConveyancePreference,
AuthenticationCredential,
AuthenticatorSelectionCriteria,
AuthenticatorTransport,
PublicKeyCredentialDescriptor,
RegistrationCredential,
UserVerificationRequirement,
)
class AuthenticationRejectedError(Exception):
pass
class RegistrationRejectedError(Exception):
pass
def _get_webauthn_user_public_key_credential_descriptors(user, *, rp_id):
"""
Returns a webauthn.WebAuthnUser instance corresponding
to the given user model, with properties suitable for
usage within the webauthn API.
"""
return [
PublicKeyCredentialDescriptor(
id=base64url_to_bytes(credential.credential_id),
transports=[
AuthenticatorTransport.USB,
AuthenticatorTransport.NFC,
AuthenticatorTransport.BLE,
AuthenticatorTransport.INTERNAL,
],
)
for credential in user.webauthn
]
def _get_webauthn_user_public_keys(user, *, rp_id):
return [
(
base64url_to_bytes(credential.public_key),
credential.sign_count,
)
for credential in user.webauthn
]
def _webauthn_b64encode(source):
return base64.urlsafe_b64encode(source).rstrip(b"=")
def generate_webauthn_challenge():
"""
Returns a random challenge suitable for use within
Webauthn's credential and configuration option objects.
See: https://w3c.github.io/webauthn/#cryptographic-challenges
"""
return generate_challenge()
def get_credential_options(user, *, challenge, rp_name, rp_id):
"""
Returns a dictionary of options for credential creation
on the client side.
"""
_authenticator_selection = AuthenticatorSelectionCriteria()
_authenticator_selection.user_verification = UserVerificationRequirement.DISCOURAGED
options = pywebauthn.generate_registration_options(
rp_id=rp_id,
rp_name=rp_name,
user_id=str(user.id),
user_name=user.username,
user_display_name=user.name or user.username,
challenge=challenge,
attestation=AttestationConveyancePreference.NONE,
authenticator_selection=_authenticator_selection,
)
return json.loads(options_to_json(options))
def get_assertion_options(user, *, challenge, rp_id):
"""
Returns a dictionary of options for assertion retrieval
on the client side.
"""
options = pywebauthn.generate_authentication_options(
rp_id=rp_id,
challenge=challenge,
allow_credentials=_get_webauthn_user_public_key_credential_descriptors(
user, rp_id=rp_id
),
user_verification=UserVerificationRequirement.DISCOURAGED,
)
return json.loads(options_to_json(options))
def verify_registration_response(response, challenge, *, rp_id, origin):
"""
Validates the challenge and attestation information
sent from the client during device registration.
Returns a WebAuthnCredential on success.
Raises RegistrationRejectedError on failire.
"""
# NOTE: We re-encode the challenge below, because our
# response's clientData.challenge is encoded twice:
# first for the entire clientData payload, and then again
# for the individual challenge.
encoded_challenge = _webauthn_b64encode(challenge)
try:
_credential = RegistrationCredential.parse_raw(response)
return pywebauthn.verify_registration_response(
credential=_credential,
expected_challenge=encoded_challenge,
expected_rp_id=rp_id,
expected_origin=origin,
require_user_verification=False,
)
except InvalidRegistrationResponse as e:
raise RegistrationRejectedError(str(e))
def verify_assertion_response(assertion, *, challenge, user, origin, rp_id):
"""
Validates the challenge and assertion information
sent from the client during authentication.
Returns an updated signage count on success.
Raises AuthenticationRejectedError on failure.
"""
# NOTE: We re-encode the challenge below, because our
# response's clientData.challenge is encoded twice:
# first for the entire clientData payload, and then again
# for the individual challenge.
encoded_challenge = _webauthn_b64encode(challenge)
webauthn_user_public_keys = _get_webauthn_user_public_keys(user, rp_id=rp_id)
for public_key, current_sign_count in webauthn_user_public_keys:
try:
_credential = AuthenticationCredential.parse_raw(assertion)
return pywebauthn.verify_authentication_response(
credential=_credential,
expected_challenge=encoded_challenge,
expected_rp_id=rp_id,
expected_origin=origin,
credential_public_key=public_key,
credential_current_sign_count=current_sign_count,
require_user_verification=False,
)
except InvalidAuthenticationResponse:
pass
# If we exit the loop, then we've failed to verify the assertion against
# any of the user's WebAuthn credentials. Fail.
raise AuthenticationRejectedError("Invalid WebAuthn credential")
|
en
| 0.818275
|
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Returns a webauthn.WebAuthnUser instance corresponding to the given user model, with properties suitable for usage within the webauthn API. Returns a random challenge suitable for use within Webauthn's credential and configuration option objects. See: https://w3c.github.io/webauthn/#cryptographic-challenges Returns a dictionary of options for credential creation on the client side. Returns a dictionary of options for assertion retrieval on the client side. Validates the challenge and attestation information sent from the client during device registration. Returns a WebAuthnCredential on success. Raises RegistrationRejectedError on failire. # NOTE: We re-encode the challenge below, because our # response's clientData.challenge is encoded twice: # first for the entire clientData payload, and then again # for the individual challenge. Validates the challenge and assertion information sent from the client during authentication. Returns an updated signage count on success. Raises AuthenticationRejectedError on failure. # NOTE: We re-encode the challenge below, because our # response's clientData.challenge is encoded twice: # first for the entire clientData payload, and then again # for the individual challenge. # If we exit the loop, then we've failed to verify the assertion against # any of the user's WebAuthn credentials. Fail.
| 2.175672
| 2
|
jsk_recognition/jsk_pcl_ros/scripts/install_trained_data.py
|
VT-ASIM-LAB/autoware.ai
| 0
|
6625981
|
<filename>jsk_recognition/jsk_pcl_ros/scripts/install_trained_data.py
#!/usr/bin/env python
import argparse
import multiprocessing
import jsk_data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', dest='quiet', action='store_false')
args = parser.parse_args()
quiet = args.quiet
def download_data(**kwargs):
kwargs['pkg_name'] = 'jsk_pcl_ros'
kwargs['quiet'] = quiet
p = multiprocessing.Process(
target=jsk_data.download_data,
kwargs=kwargs)
p.start()
download_data(
path='trained_data/linemod_template.tgz',
url='https://drive.google.com/uc?id=1nQzjrpvLojzPrQDWElxDCTVjLzeDi70p',
md5='2c9bd31c6c6ddd5f36698fb36040c71c',
extract=True,
)
if __name__ == '__main__':
main()
|
<filename>jsk_recognition/jsk_pcl_ros/scripts/install_trained_data.py
#!/usr/bin/env python
import argparse
import multiprocessing
import jsk_data
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', dest='quiet', action='store_false')
args = parser.parse_args()
quiet = args.quiet
def download_data(**kwargs):
kwargs['pkg_name'] = 'jsk_pcl_ros'
kwargs['quiet'] = quiet
p = multiprocessing.Process(
target=jsk_data.download_data,
kwargs=kwargs)
p.start()
download_data(
path='trained_data/linemod_template.tgz',
url='https://drive.google.com/uc?id=1nQzjrpvLojzPrQDWElxDCTVjLzeDi70p',
md5='2c9bd31c6c6ddd5f36698fb36040c71c',
extract=True,
)
if __name__ == '__main__':
main()
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.132307
| 2
|
elegy/__init__.py
|
srcolinas/elegy
| 0
|
6625982
|
<filename>elegy/__init__.py
__version__ = "0.1.3"
from . import losses, metrics, model, regularizers, callbacks
from .losses import Loss
from .metrics import Metric
from .model import Model
from .module import Module
|
<filename>elegy/__init__.py
__version__ = "0.1.3"
from . import losses, metrics, model, regularizers, callbacks
from .losses import Loss
from .metrics import Metric
from .model import Model
from .module import Module
|
none
| 1
| 1.01748
| 1
|
|
pocean/tests/dsg/test_new.py
|
axiom-data-science/pocean-core
| 13
|
6625983
|
<gh_stars>10-100
# -*- coding: utf-8 -*-
from os.path import join as jn
from os.path import dirname as dn
import pytest
from pocean.cf import CFDataset
from pocean.utils import all_subclasses
from pocean.dsg import *
import logging
from pocean import logger
logger.level = logging.INFO
logger.handlers = [logging.StreamHandler()]
@pytest.mark.parametrize("klass,fp", [
(OrthogonalMultidimensionalProfile, jn(dn(__file__), 'profile', 'resources', 'om-single.nc')),
(OrthogonalMultidimensionalProfile, jn(dn(__file__), 'profile', 'resources', 'om-multiple.nc')),
(OrthogonalMultidimensionalProfile, jn(dn(__file__), 'profile', 'resources', 'om-1dy11.nc')),
(IncompleteMultidimensionalProfile, jn(dn(__file__), 'profile', 'resources', 'im-multiple.nc')),
(IncompleteMultidimensionalTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'im-single.nc')),
(IncompleteMultidimensionalTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'im-multiple.nc')),
(IncompleteMultidimensionalTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'im-multiple-nonstring.nc')),
(IncompleteMultidimensionalTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'wave-glider-int-attrs.nc')),
(ContiguousRaggedTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'cr-multiple.nc')),
(ContiguousRaggedTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'cr-oot-A.nc')),
(ContiguousRaggedTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'cr-oot-B.nc')),
(ContiguousRaggedTrajectoryProfile, jn(dn(__file__), 'trajectoryProfile', 'resources', 'cr-single.nc')),
(ContiguousRaggedTrajectoryProfile, jn(dn(__file__), 'trajectoryProfile', 'resources', 'cr-multiple.nc')),
(ContiguousRaggedTrajectoryProfile, jn(dn(__file__), 'trajectoryProfile', 'resources', 'cr-missing-time.nc')),
(IncompleteMultidimensionalTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'im-multiple.nc')),
(OrthogonalMultidimensionalTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'om-single.nc')),
(OrthogonalMultidimensionalTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'om-multiple.nc')),
#(IndexedRaggedTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'cr-multiple.nc')),
#(ContiguousRaggedTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'cr-multiple.nc')),
(OrthogonalMultidimensionalTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'om-multiple.nc')),
(IncompleteMultidimensionalTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'im-single.nc')),
(IncompleteMultidimensionalTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'im-multiple.nc')),
(RaggedTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'r-single.nc')),
(RaggedTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'r-multiple.nc')),
])
def test_is_mine(klass, fp):
with CFDataset.load(fp) as dsg:
assert dsg.__class__ == klass
allsubs = list(all_subclasses(CFDataset))
subs = [ s for s in allsubs if s != klass ]
with CFDataset(fp) as dsg:
logger.debug('\nTesting {}'.format(klass.__name__))
assert klass.is_mine(dsg, strict=True) is True
for s in subs:
if hasattr(s, 'is_mine'):
logger.debug(' * Trying {}...'.format(s.__name__))
assert s.is_mine(dsg) is False
|
# -*- coding: utf-8 -*-
from os.path import join as jn
from os.path import dirname as dn
import pytest
from pocean.cf import CFDataset
from pocean.utils import all_subclasses
from pocean.dsg import *
import logging
from pocean import logger
logger.level = logging.INFO
logger.handlers = [logging.StreamHandler()]
@pytest.mark.parametrize("klass,fp", [
(OrthogonalMultidimensionalProfile, jn(dn(__file__), 'profile', 'resources', 'om-single.nc')),
(OrthogonalMultidimensionalProfile, jn(dn(__file__), 'profile', 'resources', 'om-multiple.nc')),
(OrthogonalMultidimensionalProfile, jn(dn(__file__), 'profile', 'resources', 'om-1dy11.nc')),
(IncompleteMultidimensionalProfile, jn(dn(__file__), 'profile', 'resources', 'im-multiple.nc')),
(IncompleteMultidimensionalTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'im-single.nc')),
(IncompleteMultidimensionalTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'im-multiple.nc')),
(IncompleteMultidimensionalTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'im-multiple-nonstring.nc')),
(IncompleteMultidimensionalTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'wave-glider-int-attrs.nc')),
(ContiguousRaggedTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'cr-multiple.nc')),
(ContiguousRaggedTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'cr-oot-A.nc')),
(ContiguousRaggedTrajectory, jn(dn(__file__), 'trajectory', 'resources', 'cr-oot-B.nc')),
(ContiguousRaggedTrajectoryProfile, jn(dn(__file__), 'trajectoryProfile', 'resources', 'cr-single.nc')),
(ContiguousRaggedTrajectoryProfile, jn(dn(__file__), 'trajectoryProfile', 'resources', 'cr-multiple.nc')),
(ContiguousRaggedTrajectoryProfile, jn(dn(__file__), 'trajectoryProfile', 'resources', 'cr-missing-time.nc')),
(IncompleteMultidimensionalTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'im-multiple.nc')),
(OrthogonalMultidimensionalTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'om-single.nc')),
(OrthogonalMultidimensionalTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'om-multiple.nc')),
#(IndexedRaggedTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'cr-multiple.nc')),
#(ContiguousRaggedTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'cr-multiple.nc')),
(OrthogonalMultidimensionalTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'om-multiple.nc')),
(IncompleteMultidimensionalTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'im-single.nc')),
(IncompleteMultidimensionalTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'im-multiple.nc')),
(RaggedTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'r-single.nc')),
(RaggedTimeseriesProfile, jn(dn(__file__), 'timeseriesProfile', 'resources', 'r-multiple.nc')),
])
def test_is_mine(klass, fp):
with CFDataset.load(fp) as dsg:
assert dsg.__class__ == klass
allsubs = list(all_subclasses(CFDataset))
subs = [ s for s in allsubs if s != klass ]
with CFDataset(fp) as dsg:
logger.debug('\nTesting {}'.format(klass.__name__))
assert klass.is_mine(dsg, strict=True) is True
for s in subs:
if hasattr(s, 'is_mine'):
logger.debug(' * Trying {}...'.format(s.__name__))
assert s.is_mine(dsg) is False
|
en
| 0.239596
|
# -*- coding: utf-8 -*- #(IndexedRaggedTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'cr-multiple.nc')), #(ContiguousRaggedTimeseries, jn(dn(__file__), 'timeseries', 'resources', 'cr-multiple.nc')),
| 1.816717
| 2
|
client/commands/v2/restart.py
|
pradeep90/pyre-check
| 0
|
6625984
|
<filename>client/commands/v2/restart.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
from ... import command_arguments, commands, configuration as configuration_module
from . import incremental, server_connection, start, stop
LOG: logging.Logger = logging.getLogger(__name__)
def _stop_server_if_needed(configuration: configuration_module.Configuration) -> None:
try:
socket_path = server_connection.get_default_socket_path(
log_directory=Path(configuration.log_directory)
)
LOG.info("Stopping the server if needed...")
stop.stop_server(socket_path)
LOG.info(f"Stopped server at `{start.get_server_identifier(configuration)}`")
except server_connection.ConnectionFailure:
# This usually means there's no server running
pass
def run(
configuration: configuration_module.Configuration,
incremental_arguments: command_arguments.IncrementalArguments,
) -> commands.ExitCode:
try:
_stop_server_if_needed(configuration)
incremental.run_incremental(configuration, incremental_arguments)
return commands.ExitCode.SUCCESS
except Exception as error:
raise commands.ClientException(
f"Exception occured during pyre restart: {error}"
) from error
|
<filename>client/commands/v2/restart.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from pathlib import Path
from ... import command_arguments, commands, configuration as configuration_module
from . import incremental, server_connection, start, stop
LOG: logging.Logger = logging.getLogger(__name__)
def _stop_server_if_needed(configuration: configuration_module.Configuration) -> None:
try:
socket_path = server_connection.get_default_socket_path(
log_directory=Path(configuration.log_directory)
)
LOG.info("Stopping the server if needed...")
stop.stop_server(socket_path)
LOG.info(f"Stopped server at `{start.get_server_identifier(configuration)}`")
except server_connection.ConnectionFailure:
# This usually means there's no server running
pass
def run(
configuration: configuration_module.Configuration,
incremental_arguments: command_arguments.IncrementalArguments,
) -> commands.ExitCode:
try:
_stop_server_if_needed(configuration)
incremental.run_incremental(configuration, incremental_arguments)
return commands.ExitCode.SUCCESS
except Exception as error:
raise commands.ClientException(
f"Exception occured during pyre restart: {error}"
) from error
|
en
| 0.946521
|
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # This usually means there's no server running
| 2.591408
| 3
|
colossalai/nn/layer/wrapper/lambda_wrapper.py
|
RichardoLuo/ColossalAI
| 1,630
|
6625985
|
<filename>colossalai/nn/layer/wrapper/lambda_wrapper.py<gh_stars>1000+
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch.nn as nn
from colossalai.builder import build_layer
from colossalai.registry import LAYERS
@LAYERS.register_module
class LambdaWrapper(nn.Module):
"""Wrap a function to nn.Module, which takes a config of layers and can fully access them.
Args:
func (``Callable``): User customed function.
layers_cfg (dict, optional): Config of layers, defaults to None.
"""
def __init__(self, func, layers_cfg: dict = None):
super().__init__()
self.func = func
self.layers = self._build_layers(layers_cfg)
def _build_layers(self, layers_cfg: dict):
if layers_cfg is None:
return None
else:
layers = []
for cfg in layers_cfg:
layer = build_layer(cfg)
layers.append(layer)
return layers
def forward(self, *args, **kwargs):
return self.func(self, *args, **kwargs)
|
<filename>colossalai/nn/layer/wrapper/lambda_wrapper.py<gh_stars>1000+
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch.nn as nn
from colossalai.builder import build_layer
from colossalai.registry import LAYERS
@LAYERS.register_module
class LambdaWrapper(nn.Module):
"""Wrap a function to nn.Module, which takes a config of layers and can fully access them.
Args:
func (``Callable``): User customed function.
layers_cfg (dict, optional): Config of layers, defaults to None.
"""
def __init__(self, func, layers_cfg: dict = None):
super().__init__()
self.func = func
self.layers = self._build_layers(layers_cfg)
def _build_layers(self, layers_cfg: dict):
if layers_cfg is None:
return None
else:
layers = []
for cfg in layers_cfg:
layer = build_layer(cfg)
layers.append(layer)
return layers
def forward(self, *args, **kwargs):
return self.func(self, *args, **kwargs)
|
en
| 0.71427
|
#!/usr/bin/env python # -*- encoding: utf-8 -*- Wrap a function to nn.Module, which takes a config of layers and can fully access them. Args: func (``Callable``): User customed function. layers_cfg (dict, optional): Config of layers, defaults to None.
| 2.302575
| 2
|
ubivar/test/resources/test_event_last_id.py
|
oriskami/oriskami-python
| 4
|
6625986
|
import os
import ubivar
import warnings
from ubivar.test.helper import (UbivarTestCase)
class UbivarAPIResourcesTests(UbivarTestCase):
def test_event_last_id_list(self):
response = ubivar.EventLastId.list()
self.assertEqual(len(response.data), 1)
lastId = response.data[0]["id"]
self.assertEqual(str(lastId), str(3))
|
import os
import ubivar
import warnings
from ubivar.test.helper import (UbivarTestCase)
class UbivarAPIResourcesTests(UbivarTestCase):
def test_event_last_id_list(self):
response = ubivar.EventLastId.list()
self.assertEqual(len(response.data), 1)
lastId = response.data[0]["id"]
self.assertEqual(str(lastId), str(3))
|
none
| 1
| 2.665706
| 3
|
|
sdk/python/pulumi_aws/neptune/cluster_instance.py
|
johnktims/pulumi-aws
| 0
|
6625987
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class ClusterInstance(pulumi.CustomResource):
address: pulumi.Output[str]
"""
The hostname of the instance. See also `endpoint` and `port`.
"""
apply_immediately: pulumi.Output[bool]
"""
Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
"""
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN) of neptune instance
"""
auto_minor_version_upgrade: pulumi.Output[bool]
"""
Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
"""
availability_zone: pulumi.Output[str]
"""
The EC2 Availability Zone that the neptune instance is created in.
"""
cluster_identifier: pulumi.Output[str]
"""
The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance.
"""
dbi_resource_id: pulumi.Output[str]
"""
The region-unique, immutable identifier for the neptune instance.
"""
endpoint: pulumi.Output[str]
"""
The connection endpoint in `address:port` format.
"""
engine: pulumi.Output[str]
"""
The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
"""
engine_version: pulumi.Output[str]
"""
The neptune engine version.
"""
identifier: pulumi.Output[str]
"""
The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
"""
identifier_prefix: pulumi.Output[str]
"""
Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
"""
instance_class: pulumi.Output[str]
"""
The instance class to use.
"""
kms_key_arn: pulumi.Output[str]
"""
The ARN for the KMS encryption key if one is set to the neptune cluster.
"""
neptune_parameter_group_name: pulumi.Output[str]
"""
The name of the neptune parameter group to associate with this instance.
"""
neptune_subnet_group_name: pulumi.Output[str]
"""
A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html).
"""
port: pulumi.Output[float]
"""
The port on which the DB accepts connections. Defaults to `8182`.
"""
preferred_backup_window: pulumi.Output[str]
"""
The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
"""
preferred_maintenance_window: pulumi.Output[str]
"""
The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
"""
promotion_tier: pulumi.Output[float]
"""
Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
"""
publicly_accessible: pulumi.Output[bool]
"""
Bool to control if instance is publicly accessible. Default is `false`.
"""
storage_encrypted: pulumi.Output[bool]
"""
Specifies whether the neptune cluster is encrypted.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the instance.
"""
writer: pulumi.Output[bool]
"""
Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
def __init__(__self__, resource_name, opts=None, apply_immediately=None, auto_minor_version_upgrade=None, availability_zone=None, cluster_identifier=None, engine=None, engine_version=None, identifier=None, identifier_prefix=None, instance_class=None, neptune_parameter_group_name=None, neptune_subnet_group_name=None, port=None, preferred_backup_window=None, preferred_maintenance_window=None, promotion_tier=None, publicly_accessible=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
A Cluster Instance Resource defines attributes that are specific to a single instance in a Neptune Cluster.
You can simply add neptune instances and Neptune manages the replication. You can use the [count][1]
meta-parameter to make multiple instances and join them all to the same Neptune Cluster, or you may specify different Cluster Instance resources with various `instance_class` sizes.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] cluster_identifier: The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html).
:param pulumi.Input[float] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[float] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the instance.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['apply_immediately'] = apply_immediately
__props__['auto_minor_version_upgrade'] = auto_minor_version_upgrade
__props__['availability_zone'] = availability_zone
if cluster_identifier is None:
raise TypeError("Missing required property 'cluster_identifier'")
__props__['cluster_identifier'] = cluster_identifier
__props__['engine'] = engine
__props__['engine_version'] = engine_version
__props__['identifier'] = identifier
__props__['identifier_prefix'] = identifier_prefix
if instance_class is None:
raise TypeError("Missing required property 'instance_class'")
__props__['instance_class'] = instance_class
__props__['neptune_parameter_group_name'] = neptune_parameter_group_name
__props__['neptune_subnet_group_name'] = neptune_subnet_group_name
__props__['port'] = port
__props__['preferred_backup_window'] = preferred_backup_window
__props__['preferred_maintenance_window'] = preferred_maintenance_window
__props__['promotion_tier'] = promotion_tier
__props__['publicly_accessible'] = publicly_accessible
__props__['tags'] = tags
__props__['address'] = None
__props__['arn'] = None
__props__['dbi_resource_id'] = None
__props__['endpoint'] = None
__props__['kms_key_arn'] = None
__props__['storage_encrypted'] = None
__props__['writer'] = None
super(ClusterInstance, __self__).__init__(
'aws:neptune/clusterInstance:ClusterInstance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, address=None, apply_immediately=None, arn=None, auto_minor_version_upgrade=None, availability_zone=None, cluster_identifier=None, dbi_resource_id=None, endpoint=None, engine=None, engine_version=None, identifier=None, identifier_prefix=None, instance_class=None, kms_key_arn=None, neptune_parameter_group_name=None, neptune_subnet_group_name=None, port=None, preferred_backup_window=None, preferred_maintenance_window=None, promotion_tier=None, publicly_accessible=None, storage_encrypted=None, tags=None, writer=None):
"""
Get an existing ClusterInstance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: The hostname of the instance. See also `endpoint` and `port`.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of neptune instance
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] cluster_identifier: The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance.
:param pulumi.Input[str] dbi_resource_id: The region-unique, immutable identifier for the neptune instance.
:param pulumi.Input[str] endpoint: The connection endpoint in `address:port` format.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[str] kms_key_arn: The ARN for the KMS encryption key if one is set to the neptune cluster.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html).
:param pulumi.Input[float] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[float] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[bool] storage_encrypted: Specifies whether the neptune cluster is encrypted.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the instance.
:param pulumi.Input[bool] writer: Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address"] = address
__props__["apply_immediately"] = apply_immediately
__props__["arn"] = arn
__props__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__["availability_zone"] = availability_zone
__props__["cluster_identifier"] = cluster_identifier
__props__["dbi_resource_id"] = dbi_resource_id
__props__["endpoint"] = endpoint
__props__["engine"] = engine
__props__["engine_version"] = engine_version
__props__["identifier"] = identifier
__props__["identifier_prefix"] = identifier_prefix
__props__["instance_class"] = instance_class
__props__["kms_key_arn"] = kms_key_arn
__props__["neptune_parameter_group_name"] = neptune_parameter_group_name
__props__["neptune_subnet_group_name"] = neptune_subnet_group_name
__props__["port"] = port
__props__["preferred_backup_window"] = preferred_backup_window
__props__["preferred_maintenance_window"] = preferred_maintenance_window
__props__["promotion_tier"] = promotion_tier
__props__["publicly_accessible"] = publicly_accessible
__props__["storage_encrypted"] = storage_encrypted
__props__["tags"] = tags
__props__["writer"] = writer
return ClusterInstance(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class ClusterInstance(pulumi.CustomResource):
address: pulumi.Output[str]
"""
The hostname of the instance. See also `endpoint` and `port`.
"""
apply_immediately: pulumi.Output[bool]
"""
Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
"""
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN) of neptune instance
"""
auto_minor_version_upgrade: pulumi.Output[bool]
"""
Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
"""
availability_zone: pulumi.Output[str]
"""
The EC2 Availability Zone that the neptune instance is created in.
"""
cluster_identifier: pulumi.Output[str]
"""
The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance.
"""
dbi_resource_id: pulumi.Output[str]
"""
The region-unique, immutable identifier for the neptune instance.
"""
endpoint: pulumi.Output[str]
"""
The connection endpoint in `address:port` format.
"""
engine: pulumi.Output[str]
"""
The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
"""
engine_version: pulumi.Output[str]
"""
The neptune engine version.
"""
identifier: pulumi.Output[str]
"""
The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
"""
identifier_prefix: pulumi.Output[str]
"""
Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
"""
instance_class: pulumi.Output[str]
"""
The instance class to use.
"""
kms_key_arn: pulumi.Output[str]
"""
The ARN for the KMS encryption key if one is set to the neptune cluster.
"""
neptune_parameter_group_name: pulumi.Output[str]
"""
The name of the neptune parameter group to associate with this instance.
"""
neptune_subnet_group_name: pulumi.Output[str]
"""
A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html).
"""
port: pulumi.Output[float]
"""
The port on which the DB accepts connections. Defaults to `8182`.
"""
preferred_backup_window: pulumi.Output[str]
"""
The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
"""
preferred_maintenance_window: pulumi.Output[str]
"""
The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
"""
promotion_tier: pulumi.Output[float]
"""
Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
"""
publicly_accessible: pulumi.Output[bool]
"""
Bool to control if instance is publicly accessible. Default is `false`.
"""
storage_encrypted: pulumi.Output[bool]
"""
Specifies whether the neptune cluster is encrypted.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the instance.
"""
writer: pulumi.Output[bool]
"""
Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
def __init__(__self__, resource_name, opts=None, apply_immediately=None, auto_minor_version_upgrade=None, availability_zone=None, cluster_identifier=None, engine=None, engine_version=None, identifier=None, identifier_prefix=None, instance_class=None, neptune_parameter_group_name=None, neptune_subnet_group_name=None, port=None, preferred_backup_window=None, preferred_maintenance_window=None, promotion_tier=None, publicly_accessible=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
A Cluster Instance Resource defines attributes that are specific to a single instance in a Neptune Cluster.
You can simply add neptune instances and Neptune manages the replication. You can use the [count][1]
meta-parameter to make multiple instances and join them all to the same Neptune Cluster, or you may specify different Cluster Instance resources with various `instance_class` sizes.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] cluster_identifier: The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html).
:param pulumi.Input[float] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[float] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the instance.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['apply_immediately'] = apply_immediately
__props__['auto_minor_version_upgrade'] = auto_minor_version_upgrade
__props__['availability_zone'] = availability_zone
if cluster_identifier is None:
raise TypeError("Missing required property 'cluster_identifier'")
__props__['cluster_identifier'] = cluster_identifier
__props__['engine'] = engine
__props__['engine_version'] = engine_version
__props__['identifier'] = identifier
__props__['identifier_prefix'] = identifier_prefix
if instance_class is None:
raise TypeError("Missing required property 'instance_class'")
__props__['instance_class'] = instance_class
__props__['neptune_parameter_group_name'] = neptune_parameter_group_name
__props__['neptune_subnet_group_name'] = neptune_subnet_group_name
__props__['port'] = port
__props__['preferred_backup_window'] = preferred_backup_window
__props__['preferred_maintenance_window'] = preferred_maintenance_window
__props__['promotion_tier'] = promotion_tier
__props__['publicly_accessible'] = publicly_accessible
__props__['tags'] = tags
__props__['address'] = None
__props__['arn'] = None
__props__['dbi_resource_id'] = None
__props__['endpoint'] = None
__props__['kms_key_arn'] = None
__props__['storage_encrypted'] = None
__props__['writer'] = None
super(ClusterInstance, __self__).__init__(
'aws:neptune/clusterInstance:ClusterInstance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, address=None, apply_immediately=None, arn=None, auto_minor_version_upgrade=None, availability_zone=None, cluster_identifier=None, dbi_resource_id=None, endpoint=None, engine=None, engine_version=None, identifier=None, identifier_prefix=None, instance_class=None, kms_key_arn=None, neptune_parameter_group_name=None, neptune_subnet_group_name=None, port=None, preferred_backup_window=None, preferred_maintenance_window=None, promotion_tier=None, publicly_accessible=None, storage_encrypted=None, tags=None, writer=None):
"""
Get an existing ClusterInstance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: The hostname of the instance. See also `endpoint` and `port`.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of neptune instance
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] cluster_identifier: The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance.
:param pulumi.Input[str] dbi_resource_id: The region-unique, immutable identifier for the neptune instance.
:param pulumi.Input[str] endpoint: The connection endpoint in `address:port` format.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[str] kms_key_arn: The ARN for the KMS encryption key if one is set to the neptune cluster.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html).
:param pulumi.Input[float] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[float] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[bool] storage_encrypted: Specifies whether the neptune cluster is encrypted.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the instance.
:param pulumi.Input[bool] writer: Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address"] = address
__props__["apply_immediately"] = apply_immediately
__props__["arn"] = arn
__props__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__["availability_zone"] = availability_zone
__props__["cluster_identifier"] = cluster_identifier
__props__["dbi_resource_id"] = dbi_resource_id
__props__["endpoint"] = endpoint
__props__["engine"] = engine
__props__["engine_version"] = engine_version
__props__["identifier"] = identifier
__props__["identifier_prefix"] = identifier_prefix
__props__["instance_class"] = instance_class
__props__["kms_key_arn"] = kms_key_arn
__props__["neptune_parameter_group_name"] = neptune_parameter_group_name
__props__["neptune_subnet_group_name"] = neptune_subnet_group_name
__props__["port"] = port
__props__["preferred_backup_window"] = preferred_backup_window
__props__["preferred_maintenance_window"] = preferred_maintenance_window
__props__["promotion_tier"] = promotion_tier
__props__["publicly_accessible"] = publicly_accessible
__props__["storage_encrypted"] = storage_encrypted
__props__["tags"] = tags
__props__["writer"] = writer
return ClusterInstance(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
en
| 0.657589
|
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The hostname of the instance. See also `endpoint` and `port`. Specifies whether any instance modifications are applied immediately, or during the next maintenance window. Default is`false`. Amazon Resource Name (ARN) of neptune instance Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`. The EC2 Availability Zone that the neptune instance is created in. The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance. The region-unique, immutable identifier for the neptune instance. The connection endpoint in `address:port` format. The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`. The neptune engine version. The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier. Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`. The instance class to use. The ARN for the KMS encryption key if one is set to the neptune cluster. The name of the neptune parameter group to associate with this instance. A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html). The port on which the DB accepts connections. Defaults to `8182`. The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00" The window to perform maintenance in. Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer. Bool to control if instance is publicly accessible. Default is `false`. Specifies whether the neptune cluster is encrypted. A mapping of tags to assign to the instance. Boolean indicating if this instance is writable. `False` indicates this instance is a read replica. A Cluster Instance Resource defines attributes that are specific to a single instance in a Neptune Cluster. You can simply add neptune instances and Neptune manages the replication. You can use the [count][1] meta-parameter to make multiple instances and join them all to the same Neptune Cluster, or you may specify different Cluster Instance resources with various `instance_class` sizes. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications are applied immediately, or during the next maintenance window. Default is`false`. :param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`. :param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in. :param pulumi.Input[str] cluster_identifier: The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance. :param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`. :param pulumi.Input[str] engine_version: The neptune engine version. :param pulumi.Input[str] identifier: The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier. :param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`. :param pulumi.Input[str] instance_class: The instance class to use. :param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance. :param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html). :param pulumi.Input[float] port: The port on which the DB accepts connections. Defaults to `8182`. :param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00" :param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in. Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". :param pulumi.Input[float] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer. :param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`. :param pulumi.Input[dict] tags: A mapping of tags to assign to the instance. Get an existing ClusterInstance resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param str id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] address: The hostname of the instance. See also `endpoint` and `port`. :param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications are applied immediately, or during the next maintenance window. Default is`false`. :param pulumi.Input[str] arn: Amazon Resource Name (ARN) of neptune instance :param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`. :param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in. :param pulumi.Input[str] cluster_identifier: The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance. :param pulumi.Input[str] dbi_resource_id: The region-unique, immutable identifier for the neptune instance. :param pulumi.Input[str] endpoint: The connection endpoint in `address:port` format. :param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`. :param pulumi.Input[str] engine_version: The neptune engine version. :param pulumi.Input[str] identifier: The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier. :param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`. :param pulumi.Input[str] instance_class: The instance class to use. :param pulumi.Input[str] kms_key_arn: The ARN for the KMS encryption key if one is set to the neptune cluster. :param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance. :param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html). :param pulumi.Input[float] port: The port on which the DB accepts connections. Defaults to `8182`. :param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00" :param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in. Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". :param pulumi.Input[float] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer. :param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`. :param pulumi.Input[bool] storage_encrypted: Specifies whether the neptune cluster is encrypted. :param pulumi.Input[dict] tags: A mapping of tags to assign to the instance. :param pulumi.Input[bool] writer: Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
| 1.815458
| 2
|
telemetry/telemetry/internal/backends/chrome/fuchsia_browser_finder.py
|
Martijnve23/catapult
| 1,894
|
6625988
|
<reponame>Martijnve23/catapult<filename>telemetry/telemetry/internal/backends/chrome/fuchsia_browser_finder.py
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds Fuchsia browsers that can be started and controlled by telemetry."""
from __future__ import absolute_import
import os
import platform
from telemetry.core import fuchsia_interface
from telemetry.core import platform as telemetry_platform
from telemetry.internal.backends.chrome import fuchsia_browser_backend
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import fuchsia_device
from telemetry.internal.backends.chrome import chrome_startup_args
from telemetry.internal.util import local_first_binary_manager
class UnsupportedExtensionException(Exception):
pass
class PossibleFuchsiaBrowser(possible_browser.PossibleBrowser):
def __init__(self, browser_type, finder_options, fuchsia_platform):
del finder_options
super(PossibleFuchsiaBrowser, self).__init__(browser_type, 'fuchsia', True)
self._platform = fuchsia_platform
self._platform_backend = (
fuchsia_platform._platform_backend) # pylint: disable=protected-access
# Like CrOS, there's no way to automatically determine the build directory,
# so use the manually set output directory if possible.
self._build_dir = os.environ.get('CHROMIUM_OUTPUT_DIR')
def __repr__(self):
return 'PossibleFuchsiaBrowser(app_type=%s)' % self.browser_type
@property
def browser_directory(self):
return None
@property
def profile_directory(self):
return None
def _InitPlatformIfNeeded(self):
pass
def _GetPathsForOsPageCacheFlushing(self):
# There is no page write-back on Fuchsia, so there is nothing to flush.
return []
def Create(self):
"""Start the browser process."""
if local_first_binary_manager.LocalFirstBinaryManager.NeedsInit():
local_first_binary_manager.LocalFirstBinaryManager.Init(
self._build_dir, None, 'linux', platform.machine())
startup_args = chrome_startup_args.GetFromBrowserOptions(
self._browser_options)
browser_backend = fuchsia_browser_backend.FuchsiaBrowserBackend(
self._platform_backend, self._browser_options,
self.browser_directory, self.profile_directory)
try:
return browser.Browser(
browser_backend, self._platform_backend, startup_args,
find_existing=False)
except Exception:
browser_backend.Close()
raise
def CleanUpEnvironment(self):
if self._browser_options is None:
return # No environment to clean up.
try:
self._TearDownEnvironment()
finally:
self._browser_options = None
def SupportsOptions(self, browser_options):
if len(browser_options.extensions_to_load) > 0:
raise UnsupportedExtensionException(
'Fuchsia browsers do not support extensions.')
return True
def UpdateExecutableIfNeeded(self):
# Updating the browser is currently handled in the Chromium repository
# instead of Catapult.
pass
@property
def last_modification_time(self):
return -1
def SelectDefaultBrowser(possible_browsers):
for b in possible_browsers:
if b.browser_type == 'web-engine-shell':
return b
return None
def FindAllBrowserTypes():
return fuchsia_interface.FUCHSIA_BROWSERS
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all available Fuchsia browsers."""
browsers = []
if not isinstance(device, fuchsia_device.FuchsiaDevice):
return browsers
fuchsia_platform = telemetry_platform.GetPlatformForDevice(device,
finder_options)
browsers.extend([
PossibleFuchsiaBrowser(
'web-engine-shell', finder_options, fuchsia_platform)
])
return browsers
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds Fuchsia browsers that can be started and controlled by telemetry."""
from __future__ import absolute_import
import os
import platform
from telemetry.core import fuchsia_interface
from telemetry.core import platform as telemetry_platform
from telemetry.internal.backends.chrome import fuchsia_browser_backend
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import fuchsia_device
from telemetry.internal.backends.chrome import chrome_startup_args
from telemetry.internal.util import local_first_binary_manager
class UnsupportedExtensionException(Exception):
pass
class PossibleFuchsiaBrowser(possible_browser.PossibleBrowser):
def __init__(self, browser_type, finder_options, fuchsia_platform):
del finder_options
super(PossibleFuchsiaBrowser, self).__init__(browser_type, 'fuchsia', True)
self._platform = fuchsia_platform
self._platform_backend = (
fuchsia_platform._platform_backend) # pylint: disable=protected-access
# Like CrOS, there's no way to automatically determine the build directory,
# so use the manually set output directory if possible.
self._build_dir = os.environ.get('CHROMIUM_OUTPUT_DIR')
def __repr__(self):
return 'PossibleFuchsiaBrowser(app_type=%s)' % self.browser_type
@property
def browser_directory(self):
return None
@property
def profile_directory(self):
return None
def _InitPlatformIfNeeded(self):
pass
def _GetPathsForOsPageCacheFlushing(self):
# There is no page write-back on Fuchsia, so there is nothing to flush.
return []
def Create(self):
"""Start the browser process."""
if local_first_binary_manager.LocalFirstBinaryManager.NeedsInit():
local_first_binary_manager.LocalFirstBinaryManager.Init(
self._build_dir, None, 'linux', platform.machine())
startup_args = chrome_startup_args.GetFromBrowserOptions(
self._browser_options)
browser_backend = fuchsia_browser_backend.FuchsiaBrowserBackend(
self._platform_backend, self._browser_options,
self.browser_directory, self.profile_directory)
try:
return browser.Browser(
browser_backend, self._platform_backend, startup_args,
find_existing=False)
except Exception:
browser_backend.Close()
raise
def CleanUpEnvironment(self):
if self._browser_options is None:
return # No environment to clean up.
try:
self._TearDownEnvironment()
finally:
self._browser_options = None
def SupportsOptions(self, browser_options):
if len(browser_options.extensions_to_load) > 0:
raise UnsupportedExtensionException(
'Fuchsia browsers do not support extensions.')
return True
def UpdateExecutableIfNeeded(self):
# Updating the browser is currently handled in the Chromium repository
# instead of Catapult.
pass
@property
def last_modification_time(self):
return -1
def SelectDefaultBrowser(possible_browsers):
for b in possible_browsers:
if b.browser_type == 'web-engine-shell':
return b
return None
def FindAllBrowserTypes():
return fuchsia_interface.FUCHSIA_BROWSERS
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all available Fuchsia browsers."""
browsers = []
if not isinstance(device, fuchsia_device.FuchsiaDevice):
return browsers
fuchsia_platform = telemetry_platform.GetPlatformForDevice(device,
finder_options)
browsers.extend([
PossibleFuchsiaBrowser(
'web-engine-shell', finder_options, fuchsia_platform)
])
return browsers
|
en
| 0.869057
|
# Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Finds Fuchsia browsers that can be started and controlled by telemetry. # pylint: disable=protected-access # Like CrOS, there's no way to automatically determine the build directory, # so use the manually set output directory if possible. # There is no page write-back on Fuchsia, so there is nothing to flush. Start the browser process. # No environment to clean up. # Updating the browser is currently handled in the Chromium repository # instead of Catapult. Finds all available Fuchsia browsers.
| 1.948555
| 2
|
Django/comments/models.py
|
xuhaer/FlaskWeb
| 0
|
6625989
|
<reponame>xuhaer/FlaskWeb
from django.db import models
# Create your models here.
class Comment(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=255, blank=True)
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
article = models.ForeignKey('blog.Article', on_delete=models.CASCADE)
def __str__(self):
return self.text[:20]
|
from django.db import models
# Create your models here.
class Comment(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=255, blank=True)
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
article = models.ForeignKey('blog.Article', on_delete=models.CASCADE)
def __str__(self):
return self.text[:20]
|
en
| 0.963489
|
# Create your models here.
| 2.403186
| 2
|
python/import/obj/district/nagreg.py
|
dudung/cookbook
| 0
|
6625990
|
<filename>python/import/obj/district/nagreg.py
level = 3
name = 'Nagreg'
capital = 'Ganjarsabar'
area = 49.3
|
<filename>python/import/obj/district/nagreg.py
level = 3
name = 'Nagreg'
capital = 'Ganjarsabar'
area = 49.3
|
none
| 1
| 1.266746
| 1
|
|
algorithms/GEM/GEM_main.py
|
YangLiangwei/DGFraud
| 447
|
6625991
|
'''
This code is due to <NAME> (@yutongD), <NAME> (@YingtongDou) and UIC BDSC Lab
DGFraud (A Deep Graph-based Toolbox for Fraud Detection)
https://github.com/safe-graph/DGFraud
'''
import tensorflow as tf
import argparse
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), '../..')))
from algorithms.GEM.GEM import GEM
import time
from utils.data_loader import *
from utils.utils import *
# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
# init the common args, expect the model specific args
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='dblp', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
# GEM
parser.add_argument('--hop', default=1, help='hop number')
parser.add_argument('--k', default=16, help='gem layer unit')
args = parser.parse_args()
return args
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
# get batch data
def get_data(ix, int_batch, train_size):
if ix + int_batch >= train_size:
ix = train_size - int_batch
end = train_size
else:
end = ix + int_batch
return train_data[ix:end], train_label[ix:end]
def load_data(args):
if args.dataset_str == 'dblp':
adj_list, features, train_data, train_label, test_data, test_label = load_data_dblp()
if args.dataset_str == 'example':
adj_list, features, train_data, train_label, test_data, test_label = load_example_gem()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
return adj_list, features, train_data, train_label, test_data, test_label, paras
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
adj_data = adj_list
meta_size = len(adj_list) # device num
net = GEM(session=sess, class_size=paras[2], encoding=args.k,
meta=meta_size, nodes=paras[0], embedding=paras[1], hop=args.hop)
sess.run(tf.global_variables_initializer())
# net.load(sess)
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
batch_data, batch_label = get_data(index, args.batch_size, paras[3])
loss, acc, pred, prob = net.train(features, adj_data, batch_label,
batch_data, args.learning_rate,
args.momentum)
print("batch loss: {:.4f}, batch acc: {:.4f}".format(loss, acc))
# print(prob, pred)
train_loss += loss
train_acc += acc
count += 1
train_loss = train_loss / count
train_acc = train_acc / count
print("epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}".format(epoch, train_loss, train_acc))
# net.save(sess)
t_end = time.clock()
print("train time=", "{:.5f}".format(t_end - t_start))
print("Train end!")
test_acc, test_pred, test_probabilities, test_tags = net.test(features, adj_data, test_label,
test_data)
print("test acc:", test_acc)
if __name__ == "__main__":
args = arg_parser()
set_env(args)
adj_list, features, train_data, train_label, test_data, test_label, paras = load_data(args)
train(args, adj_list, features, train_data, train_label, test_data, test_label, paras)
|
'''
This code is due to <NAME> (@yutongD), <NAME> (@YingtongDou) and UIC BDSC Lab
DGFraud (A Deep Graph-based Toolbox for Fraud Detection)
https://github.com/safe-graph/DGFraud
'''
import tensorflow as tf
import argparse
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), '../..')))
from algorithms.GEM.GEM import GEM
import time
from utils.data_loader import *
from utils.utils import *
# os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
# init the common args, expect the model specific args
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=123, help='Random seed.')
parser.add_argument('--dataset_str', type=str, default='dblp', help="['dblp','example']")
parser.add_argument('--epoch_num', type=int, default=30, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=1000)
parser.add_argument('--momentum', type=int, default=0.9)
parser.add_argument('--learning_rate', default=0.001, help='the ratio of training set in whole dataset.')
# GEM
parser.add_argument('--hop', default=1, help='hop number')
parser.add_argument('--k', default=16, help='gem layer unit')
args = parser.parse_args()
return args
def set_env(args):
tf.reset_default_graph()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
# get batch data
def get_data(ix, int_batch, train_size):
if ix + int_batch >= train_size:
ix = train_size - int_batch
end = train_size
else:
end = ix + int_batch
return train_data[ix:end], train_label[ix:end]
def load_data(args):
if args.dataset_str == 'dblp':
adj_list, features, train_data, train_label, test_data, test_label = load_data_dblp()
if args.dataset_str == 'example':
adj_list, features, train_data, train_label, test_data, test_label = load_example_gem()
node_size = features.shape[0]
node_embedding = features.shape[1]
class_size = train_label.shape[1]
train_size = len(train_data)
paras = [node_size, node_embedding, class_size, train_size]
return adj_list, features, train_data, train_label, test_data, test_label, paras
def train(args, adj_list, features, train_data, train_label, test_data, test_label, paras):
with tf.Session() as sess:
adj_data = adj_list
meta_size = len(adj_list) # device num
net = GEM(session=sess, class_size=paras[2], encoding=args.k,
meta=meta_size, nodes=paras[0], embedding=paras[1], hop=args.hop)
sess.run(tf.global_variables_initializer())
# net.load(sess)
t_start = time.clock()
for epoch in range(args.epoch_num):
train_loss = 0
train_acc = 0
count = 0
for index in range(0, paras[3], args.batch_size):
batch_data, batch_label = get_data(index, args.batch_size, paras[3])
loss, acc, pred, prob = net.train(features, adj_data, batch_label,
batch_data, args.learning_rate,
args.momentum)
print("batch loss: {:.4f}, batch acc: {:.4f}".format(loss, acc))
# print(prob, pred)
train_loss += loss
train_acc += acc
count += 1
train_loss = train_loss / count
train_acc = train_acc / count
print("epoch{:d} : train_loss: {:.4f}, train_acc: {:.4f}".format(epoch, train_loss, train_acc))
# net.save(sess)
t_end = time.clock()
print("train time=", "{:.5f}".format(t_end - t_start))
print("Train end!")
test_acc, test_pred, test_probabilities, test_tags = net.test(features, adj_data, test_label,
test_data)
print("test acc:", test_acc)
if __name__ == "__main__":
args = arg_parser()
set_env(args)
adj_list, features, train_data, train_label, test_data, test_label, paras = load_data(args)
train(args, adj_list, features, train_data, train_label, test_data, test_label, paras)
|
en
| 0.708114
|
This code is due to <NAME> (@yutongD), <NAME> (@YingtongDou) and UIC BDSC Lab DGFraud (A Deep Graph-based Toolbox for Fraud Detection) https://github.com/safe-graph/DGFraud # os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' # init the common args, expect the model specific args # GEM # get batch data # device num # net.load(sess) # print(prob, pred) # net.save(sess)
| 2.267005
| 2
|
dataworkspace/dataworkspace/apps/core/migrations/0008_newslettersubscription.py
|
uktrade/analysis-workspace
| 1
|
6625992
|
<filename>dataworkspace/dataworkspace/apps/core/migrations/0008_newslettersubscription.py
# Generated by Django 3.2.13 on 2022-05-26 13:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("core", "0007_auto_20220404_1519"),
]
operations = [
migrations.CreateModel(
name="NewsletterSubscription",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("modified_date", models.DateTimeField(auto_now=True)),
("is_active", models.BooleanField(default=False)),
("email_address", models.CharField(max_length=256)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="newsletter_signups",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
]
|
<filename>dataworkspace/dataworkspace/apps/core/migrations/0008_newslettersubscription.py
# Generated by Django 3.2.13 on 2022-05-26 13:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("core", "0007_auto_20220404_1519"),
]
operations = [
migrations.CreateModel(
name="NewsletterSubscription",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("modified_date", models.DateTimeField(auto_now=True)),
("is_active", models.BooleanField(default=False)),
("email_address", models.CharField(max_length=256)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="newsletter_signups",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"abstract": False,
},
),
]
|
en
| 0.813077
|
# Generated by Django 3.2.13 on 2022-05-26 13:39
| 1.564085
| 2
|
mail/migrations/0012_auto_20210627_1049.py
|
prabin-acharya/mail-Gmail
| 1
|
6625993
|
# Generated by Django 3.2.4 on 2021-06-27 05:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mail', '0011_alter_email_subject'),
]
operations = [
migrations.AddField(
model_name='email',
name='recipients_email',
field=models.EmailField(blank=True, max_length=254),
),
migrations.AddField(
model_name='email',
name='sender_email',
field=models.EmailField(blank=True, max_length=254),
),
]
|
# Generated by Django 3.2.4 on 2021-06-27 05:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mail', '0011_alter_email_subject'),
]
operations = [
migrations.AddField(
model_name='email',
name='recipients_email',
field=models.EmailField(blank=True, max_length=254),
),
migrations.AddField(
model_name='email',
name='sender_email',
field=models.EmailField(blank=True, max_length=254),
),
]
|
en
| 0.875375
|
# Generated by Django 3.2.4 on 2021-06-27 05:04
| 1.568998
| 2
|
app/core/models.py
|
duks500/recipe-app-api
| 0
|
6625994
|
import uuid
import os
from django.db import models
# what we need to extand the user base model
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
# Return the extention of the file name
ext = filename.split('.')[-1]
# Create a new name using the uuid
filename = f'{uuid.uuid4()}.{ext}'
# A relable method that allowed us to join 2 strings into a vaild path
return os.path.join('uploads/recipe/', filename)
# extends the BaseUserManager
# Helo manage user and superuser
class UserManager(BaseUserManager):
def create_user(self, email, password=<PASSWORD>, **extra_fields):
"""Create and save a new user"""
# Rasie an error if the email is empty
if not email:
raise ValueError('User must have an email address')
# Make the email to be lower case for every new user
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Create and saves a new super user"""
# Create a new user using create_user
user = self.create_user(email, password)
user.is_staff = True
# Make the user to be a superuser
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that suppors using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
# create new user manager for the objects
objects = UserManager()
# make the default username to be email insead of name
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
# The model for the foreignKey
settings.AUTH_USER_MODEL,
# on_delete= What to do after deleting the user
# In this case, delete the tag
on_delete=models.CASCADE,
)
def __str__(self):
# return the string representation
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
# The model for the foreignKey
settings.AUTH_USER_MODEL,
# on_delete= What to do after deleting the user
# In this case, delete the tag
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
# ManyToManyField = we could have many tags for example for one recipe
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
|
import uuid
import os
from django.db import models
# what we need to extand the user base model
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
# Return the extention of the file name
ext = filename.split('.')[-1]
# Create a new name using the uuid
filename = f'{uuid.uuid4()}.{ext}'
# A relable method that allowed us to join 2 strings into a vaild path
return os.path.join('uploads/recipe/', filename)
# extends the BaseUserManager
# Helo manage user and superuser
class UserManager(BaseUserManager):
def create_user(self, email, password=<PASSWORD>, **extra_fields):
"""Create and save a new user"""
# Rasie an error if the email is empty
if not email:
raise ValueError('User must have an email address')
# Make the email to be lower case for every new user
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Create and saves a new super user"""
# Create a new user using create_user
user = self.create_user(email, password)
user.is_staff = True
# Make the user to be a superuser
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that suppors using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
# create new user manager for the objects
objects = UserManager()
# make the default username to be email insead of name
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""Tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
# The model for the foreignKey
settings.AUTH_USER_MODEL,
# on_delete= What to do after deleting the user
# In this case, delete the tag
on_delete=models.CASCADE,
)
def __str__(self):
# return the string representation
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
# The model for the foreignKey
settings.AUTH_USER_MODEL,
# on_delete= What to do after deleting the user
# In this case, delete the tag
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
# ManyToManyField = we could have many tags for example for one recipe
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
|
en
| 0.872428
|
# what we need to extand the user base model Generate file path for new recipe image # Return the extention of the file name # Create a new name using the uuid # A relable method that allowed us to join 2 strings into a vaild path # extends the BaseUserManager # Helo manage user and superuser Create and save a new user # Rasie an error if the email is empty # Make the email to be lower case for every new user Create and saves a new super user # Create a new user using create_user # Make the user to be a superuser Custom user model that suppors using email instead of username # create new user manager for the objects # make the default username to be email insead of name Tag to be used for a recipe # The model for the foreignKey # on_delete= What to do after deleting the user # In this case, delete the tag # return the string representation Ingredient to be used in a recipe # The model for the foreignKey # on_delete= What to do after deleting the user # In this case, delete the tag Recipe object # ManyToManyField = we could have many tags for example for one recipe
| 2.791622
| 3
|
RecoTracker/IterativeTracking/python/PixelPairStep_cff.py
|
pasmuss/cmssw
| 0
|
6625995
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Modifier_tracker_apv_vfp30_2016_cff import tracker_apv_vfp30_2016 as _tracker_apv_vfp30_2016
import RecoTracker.IterativeTracking.iterativeTkConfig as _cfg
# NEW CLUSTERS (remove previously used clusters)
pixelPairStepClusters = _cfg.clusterRemoverForIter("PixelPairStep")
for _eraName, _postfix, _era in _cfg.nonDefaultEras():
_era.toReplaceWith(pixelPairStepClusters, _cfg.clusterRemoverForIter("PixelPairStep", _eraName, _postfix))
# SEEDING LAYERS
pixelPairStepSeedLayers = cms.EDProducer("SeedingLayersEDProducer",
layerList = cms.vstring('BPix1+BPix2', 'BPix1+BPix3', 'BPix2+BPix3',
'BPix1+FPix1_pos', 'BPix1+FPix1_neg',
'BPix2+FPix1_pos', 'BPix2+FPix1_neg',
'FPix1_pos+FPix2_pos', 'FPix1_neg+FPix2_neg'),
BPix = cms.PSet(
TTRHBuilder = cms.string('WithTrackAngle'),
HitProducer = cms.string('siPixelRecHits'),
skipClusters = cms.InputTag('pixelPairStepClusters')
),
FPix = cms.PSet(
TTRHBuilder = cms.string('WithTrackAngle'),
HitProducer = cms.string('siPixelRecHits'),
skipClusters = cms.InputTag('pixelPairStepClusters')
)
)
# layers covering the region not covered by quadruplets (so it is
# just acting as backup of triplets)
_layerListForPhase1 = [
'BPix1+BPix2', 'BPix1+BPix3', 'BPix2+BPix3',
'BPix1+FPix1_pos', 'BPix1+FPix1_neg',
'BPix2+FPix1_pos', 'BPix2+FPix1_neg',
]
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
from Configuration.Eras.Modifier_trackingPhase1QuadProp_cff import trackingPhase1QuadProp
trackingPhase1.toModify(pixelPairStepSeedLayers, layerList = _layerListForPhase1)
trackingPhase1QuadProp.toModify(pixelPairStepSeedLayers, layerList = _layerListForPhase1)
# only layers covering the region not covered by quadruplets
# (so it is just acting as backup of triplets)
_layerListForPhase2 = [
'BPix1+BPix2', 'BPix1+BPix3', 'BPix2+BPix3',
'BPix1+FPix1_pos', 'BPix1+FPix1_neg',
'BPix2+FPix1_pos', 'BPix2+FPix1_neg'
]
# modifing these errors seems to make no difference
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase2PU140.toModify(pixelPairStepSeedLayers,
layerList = _layerListForPhase2,
BPix = dict(
useErrorsFromParam = cms.bool(True),
hitErrorRPhi = cms.double(0.0016),
hitErrorRZ = cms.double(0.0035),
TTRHBuilder = cms.string('TTRHBuilderWithoutAngle4PixelPairs'),
),
FPix = dict(
useErrorsFromParam = cms.bool(True),
hitErrorRPhi = cms.double(0.0030),
hitErrorRZ = cms.double(0.0020),
TTRHBuilder = cms.string('TTRHBuilderWithoutAngle4PixelPairs'),
)
)
# TrackingRegion
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
pixelPairStepTrackingRegions = _globalTrackingRegionWithVertices.clone(RegionPSet = dict(
ptMin = 0.6,
originRadius = 0.015,
fixedError = 0.03,
useMultipleScattering = True,
))
from Configuration.Eras.Modifier_trackingLowPU_cff import trackingLowPU
trackingLowPU.toModify(pixelPairStepTrackingRegions, RegionPSet=dict(useMultipleScattering=False))
_region_Phase1 = dict(
useMultipleScattering = False,
maxNVertices = 5,
)
trackingPhase1.toModify(pixelPairStepTrackingRegions, RegionPSet=_region_Phase1)
trackingPhase1QuadProp.toModify(pixelPairStepTrackingRegions, RegionPSet=_region_Phase1)
trackingPhase2PU140.toModify(pixelPairStepTrackingRegions, RegionPSet=_region_Phase1)
# SEEDS
from RecoTracker.TkHitPairs.hitPairEDProducer_cfi import hitPairEDProducer as _hitPairEDProducer
pixelPairStepHitDoublets = _hitPairEDProducer.clone(
seedingLayers = "pixelPairStepSeedLayers",
trackingRegions = "pixelPairStepTrackingRegions",
produceSeedingHitSets = True,
)
from RecoTracker.TkSeedGenerator.seedCreatorFromRegionConsecutiveHitsEDProducer_cff import seedCreatorFromRegionConsecutiveHitsEDProducer as _seedCreatorFromRegionConsecutiveHitsEDProducer
pixelPairStepSeeds = _seedCreatorFromRegionConsecutiveHitsEDProducer.clone(
seedingHitSets = "pixelPairStepHitDoublets",
SeedComparitorPSet = dict(# FIXME: is this defined in any cfi that could be imported instead of copy-paste?
ComponentName = 'PixelClusterShapeSeedComparitor',
FilterAtHelixStage = cms.bool(True),
FilterPixelHits = cms.bool(True),
FilterStripHits = cms.bool(False),
ClusterShapeHitFilterName = cms.string('ClusterShapeHitFilter'),
ClusterShapeCacheSrc = cms.InputTag('siPixelClusterShapeCache'),
)
)
# Clone for the phase1 recovery mode
pixelPairStepSeedsA = pixelPairStepSeeds.clone()
# Recovery for L2L3
pixelPairStepSeedLayersB = pixelPairStepSeedLayers.clone(
layerList = [
'BPix1+BPix4',
]
)
from RecoTracker.TkTrackingRegions.pointSeededTrackingRegion_cfi import pointSeededTrackingRegion as _pointSeededTrackingRegion
pixelPairStepTrackingRegionsB = _pointSeededTrackingRegion.clone(
RegionPSet = dict(
ptMin = 0.6,
originRadius = 0.015,
mode = "VerticesFixed",
zErrorVetex = 0.03,
vertexCollection = "firstStepPrimaryVertices",
beamSpot = "offlineBeamSpot",
maxNVertices = 5,
maxNRegions = 5,
whereToUseMeasurementTracker = "Never",
deltaEta = 1.2,
deltaPhi = 0.5,
points = dict(
eta = [0.0],
phi = [3.0],
)
)
)
pixelPairStepHitDoubletsB = pixelPairStepHitDoublets.clone(
seedingLayers = "pixelPairStepSeedLayersB",
trackingRegions = "pixelPairStepTrackingRegionsB",
)
pixelPairStepSeedsB = pixelPairStepSeedsA.clone(seedingHitSets = "pixelPairStepHitDoubletsB")
# Merge
from RecoTracker.TkSeedGenerator.GlobalCombinedSeeds_cfi import globalCombinedSeeds as _globalCombinedSeeds
_pixelPairStepSeedsMerged = _globalCombinedSeeds.clone(
seedCollections = ["pixelPairStepSeedsA", "pixelPairStepSeedsB"],
)
trackingPhase1.toReplaceWith(pixelPairStepSeeds, _pixelPairStepSeedsMerged)
trackingPhase1QuadProp.toReplaceWith(pixelPairStepSeeds, _pixelPairStepSeedsMerged)
# QUALITY CUTS DURING TRACK BUILDING
import TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff
_pixelPairStepTrajectoryFilterBase = TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff.CkfBaseTrajectoryFilter_block.clone(
minimumNumberOfHits = 3,
minPt = 0.1,
)
pixelPairStepTrajectoryFilterBase = _pixelPairStepTrajectoryFilterBase.clone(
seedPairPenalty =0,
maxCCCLostHits = 0,
minGoodStripCharge = cms.PSet(refToPSet_ = cms.string('SiStripClusterChargeCutLoose'))
)
from Configuration.Eras.Modifier_tracker_apv_vfp30_2016_cff import tracker_apv_vfp30_2016
_tracker_apv_vfp30_2016.toModify(pixelPairStepTrajectoryFilterBase, maxCCCLostHits = 2)
trackingLowPU.toReplaceWith(pixelPairStepTrajectoryFilterBase, _pixelPairStepTrajectoryFilterBase)
trackingPhase1.toModify(pixelPairStepTrajectoryFilterBase, minimumNumberOfHits = 4)
trackingPhase1QuadProp.toModify(pixelPairStepTrajectoryFilterBase, minimumNumberOfHits = 4)
trackingPhase2PU140.toReplaceWith(pixelPairStepTrajectoryFilterBase, _pixelPairStepTrajectoryFilterBase.clone(
minimumNumberOfHits = 4,
maxLostHitsFraction = 1./10.,
constantValueForLostHitsFractionFilter = 0.701,
))
import RecoPixelVertexing.PixelLowPtUtilities.StripSubClusterShapeTrajectoryFilter_cfi
pixelPairStepTrajectoryFilterShape = RecoPixelVertexing.PixelLowPtUtilities.StripSubClusterShapeTrajectoryFilter_cfi.StripSubClusterShapeTrajectoryFilterTIX12.clone()
pixelPairStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string('CompositeTrajectoryFilter'),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string('pixelPairStepTrajectoryFilterBase')),
# cms.PSet( refToPSet_ = cms.string('pixelPairStepTrajectoryFilterShape'))
),
)
from RecoPixelVertexing.PixelLowPtUtilities.ClusterShapeTrajectoryFilter_cfi import *
trackingPhase2PU140.toModify(pixelPairStepTrajectoryFilter,
filters = pixelPairStepTrajectoryFilter.filters + [cms.PSet(refToPSet_ = cms.string('ClusterShapeTrajectoryFilter'))]
)
pixelPairStepTrajectoryFilterInOut = pixelPairStepTrajectoryFilterBase.clone(
minimumNumberOfHits = 4,
seedExtension = 1,
strictSeedExtension = False, # allow inactive
pixelSeedExtension = False,
)
import RecoTracker.MeasurementDet.Chi2ChargeMeasurementEstimator_cfi
pixelPairStepChi2Est = RecoTracker.MeasurementDet.Chi2ChargeMeasurementEstimator_cfi.Chi2ChargeMeasurementEstimator.clone(
ComponentName = cms.string('pixelPairStepChi2Est'),
nSigma = cms.double(3.0),
MaxChi2 = cms.double(9.0),
clusterChargeCut = cms.PSet(refToPSet_ = cms.string('SiStripClusterChargeCutLoose')),
pTChargeCutThreshold = cms.double(15.)
)
_tracker_apv_vfp30_2016.toModify(pixelPairStepChi2Est,
clusterChargeCut = dict(refToPSet_ = "SiStripClusterChargeCutTiny")
)
trackingLowPU.toModify(pixelPairStepChi2Est,
clusterChargeCut = dict(refToPSet_ = 'SiStripClusterChargeCutTiny'),
)
# TRACK BUILDING
import RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi
pixelPairStepTrajectoryBuilder = RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi.GroupedCkfTrajectoryBuilder.clone(
MeasurementTrackerName = '',
trajectoryFilter = cms.PSet(refToPSet_ = cms.string('pixelPairStepTrajectoryFilter')),
maxCand = 3,
estimator = cms.string('pixelPairStepChi2Est'),
maxDPhiForLooperReconstruction = cms.double(2.0),
maxPtForLooperReconstruction = cms.double(0.7)
)
trackingLowPU.toModify(pixelPairStepTrajectoryBuilder, maxCand = 2)
_seedExtension = dict(
inOutTrajectoryFilter = dict(refToPSet_ = "pixelPairStepTrajectoryFilterInOut"),
useSameTrajFilter = False,
)
trackingPhase1.toModify(pixelPairStepTrajectoryBuilder, **_seedExtension)
trackingPhase1QuadProp.toModify(pixelPairStepTrajectoryBuilder, **_seedExtension)
trackingPhase2PU140.toModify(pixelPairStepTrajectoryBuilder, **_seedExtension)
# MAKING OF TRACK CANDIDATES
import RecoTracker.CkfPattern.CkfTrackCandidates_cfi
pixelPairStepTrackCandidates = RecoTracker.CkfPattern.CkfTrackCandidates_cfi.ckfTrackCandidates.clone(
src = cms.InputTag('pixelPairStepSeeds'),
clustersToSkip = cms.InputTag('pixelPairStepClusters'),
TrajectoryBuilderPSet = cms.PSet(refToPSet_ = cms.string('pixelPairStepTrajectoryBuilder')),
### these two parameters are relevant only for the CachingSeedCleanerBySharedInput
numHitsForSeedCleaner = cms.int32(50),
onlyPixelHitsForSeedCleaner = cms.bool(True),
)
trackingPhase2PU140.toModify(pixelPairStepTrackCandidates,
clustersToSkip = None,
phase2clustersToSkip = cms.InputTag("pixelPairStepClusters"),
TrajectoryCleaner = "pixelPairStepTrajectoryCleanerBySharedHits"
)
from TrackingTools.TrajectoryCleaning.TrajectoryCleanerBySharedHits_cfi import trajectoryCleanerBySharedHits as _trajectoryCleanerBySharedHits
pixelPairStepTrajectoryCleanerBySharedHits = _trajectoryCleanerBySharedHits.clone(
ComponentName = 'pixelPairStepTrajectoryCleanerBySharedHits',
fractionShared = 0.095,
allowSharedFirstHit = True
)
trackingPhase2PU140.toModify(pixelPairStepTrajectoryCleanerBySharedHits, fractionShared = 0.09)
# TRACK FITTING
import RecoTracker.TrackProducer.TrackProducer_cfi
pixelPairStepTracks = RecoTracker.TrackProducer.TrackProducer_cfi.TrackProducer.clone(
AlgorithmName = cms.string('pixelPairStep'),
src = 'pixelPairStepTrackCandidates',
Fitter = cms.string('FlexibleKFFittingSmoother')
)
# Final selection
from RecoTracker.FinalTrackSelectors.TrackMVAClassifierPrompt_cfi import *
pixelPairStep = TrackMVAClassifierPrompt.clone()
pixelPairStep.src = 'pixelPairStepTracks'
pixelPairStep.GBRForestLabel = 'MVASelectorIter2_13TeV'
pixelPairStep.qualityCuts = [-0.2,0.0,0.3]
# For LowPU and Phase2PU140
import RecoTracker.IterativeTracking.LowPtTripletStep_cff
import RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi
pixelPairStepSelector = RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.multiTrackSelector.clone(
src='pixelPairStepTracks',
useAnyMVA = cms.bool(True),
GBRForestLabel = cms.string('MVASelectorIter2'),
trackSelectors= cms.VPSet(
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.looseMTS.clone(
name = 'pixelPairStepLoose',
), #end of pset
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.tightMTS.clone(
name = 'pixelPairStepTight',
preFilterName = 'pixelPairStepLoose',
),
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.highpurityMTS.clone(
name = 'QualityMasks',
preFilterName = 'pixelPairStepTight',
),
),
vertices = cms.InputTag("pixelVertices")#end of vpset
) #end of clone
trackingPhase2PU140.toModify(pixelPairStepSelector,
useAnyMVA = None,
GBRForestLabel = None,
trackSelectors= cms.VPSet(
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.looseMTS.clone(
name = 'pixelPairStepLoose',
chi2n_par = 0.7,
res_par = ( 0.003, 0.002 ),
minNumberLayers = 3,
maxNumberLostLayers = 2,
minNumber3DLayers = 3,
d0_par1 = ( 0.4, 4.0 ),
dz_par1 = ( 0.4, 4.0 ),
d0_par2 = ( 0.6, 4.0 ),
dz_par2 = ( 0.45, 4.0 )
), #end of pset
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.tightMTS.clone(
name = 'pixelPairStepTight',
preFilterName = 'pixelPairStepLoose',
chi2n_par = 0.6,
res_par = ( 0.003, 0.002 ),
minNumberLayers = 4,
maxNumberLostLayers = 2,
minNumber3DLayers = 3,
d0_par1 = ( 0.35, 4.0 ),
dz_par1 = ( 0.35, 4.0 ),
d0_par2 = ( 0.5, 4.0 ),
dz_par2 = ( 0.4, 4.0 )
),
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.highpurityMTS.clone(
name = 'pixelPairStep',
preFilterName = 'pixelPairStepTight',
chi2n_par = 0.5,
res_par = ( 0.003, 0.001 ),
minNumberLayers = 5,
maxNumberLostLayers = 2,
minNumber3DLayers = 4,
d0_par1 = ( 0.3, 4.0 ),
dz_par1 = ( 0.3, 4.0 ),
d0_par2 = ( 0.45, 4.0 ),
dz_par2 = ( 0.35, 4.0 )
),
), #end of vpset
vertices = "firstStepPrimaryVertices"
) #end of clone
# Final sequence
PixelPairStep = cms.Sequence(pixelPairStepClusters*
pixelPairStepSeedLayers*
pixelPairStepTrackingRegions*
pixelPairStepHitDoublets*
pixelPairStepSeeds*
pixelPairStepTrackCandidates*
pixelPairStepTracks*
pixelPairStep)
_PixelPairStep_LowPU_Phase2PU140 = PixelPairStep.copy()
_PixelPairStep_LowPU_Phase2PU140.replace(pixelPairStep, pixelPairStepSelector)
trackingLowPU.toReplaceWith(PixelPairStep, _PixelPairStep_LowPU_Phase2PU140)
trackingPhase2PU140.toReplaceWith(PixelPairStep, _PixelPairStep_LowPU_Phase2PU140)
_PixelPairStep_Phase1 = PixelPairStep.copy()
_PixelPairStep_Phase1.replace(pixelPairStepSeeds,
pixelPairStepSeedsA *
pixelPairStepSeedLayersB*pixelPairStepTrackingRegionsB*pixelPairStepHitDoubletsB*pixelPairStepSeedsB*
pixelPairStepSeeds)
trackingPhase1.toReplaceWith(PixelPairStep, _PixelPairStep_Phase1)
trackingPhase1QuadProp.toReplaceWith(PixelPairStep, _PixelPairStep_Phase1)
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Modifier_tracker_apv_vfp30_2016_cff import tracker_apv_vfp30_2016 as _tracker_apv_vfp30_2016
import RecoTracker.IterativeTracking.iterativeTkConfig as _cfg
# NEW CLUSTERS (remove previously used clusters)
pixelPairStepClusters = _cfg.clusterRemoverForIter("PixelPairStep")
for _eraName, _postfix, _era in _cfg.nonDefaultEras():
_era.toReplaceWith(pixelPairStepClusters, _cfg.clusterRemoverForIter("PixelPairStep", _eraName, _postfix))
# SEEDING LAYERS
pixelPairStepSeedLayers = cms.EDProducer("SeedingLayersEDProducer",
layerList = cms.vstring('BPix1+BPix2', 'BPix1+BPix3', 'BPix2+BPix3',
'BPix1+FPix1_pos', 'BPix1+FPix1_neg',
'BPix2+FPix1_pos', 'BPix2+FPix1_neg',
'FPix1_pos+FPix2_pos', 'FPix1_neg+FPix2_neg'),
BPix = cms.PSet(
TTRHBuilder = cms.string('WithTrackAngle'),
HitProducer = cms.string('siPixelRecHits'),
skipClusters = cms.InputTag('pixelPairStepClusters')
),
FPix = cms.PSet(
TTRHBuilder = cms.string('WithTrackAngle'),
HitProducer = cms.string('siPixelRecHits'),
skipClusters = cms.InputTag('pixelPairStepClusters')
)
)
# layers covering the region not covered by quadruplets (so it is
# just acting as backup of triplets)
_layerListForPhase1 = [
'BPix1+BPix2', 'BPix1+BPix3', 'BPix2+BPix3',
'BPix1+FPix1_pos', 'BPix1+FPix1_neg',
'BPix2+FPix1_pos', 'BPix2+FPix1_neg',
]
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
from Configuration.Eras.Modifier_trackingPhase1QuadProp_cff import trackingPhase1QuadProp
trackingPhase1.toModify(pixelPairStepSeedLayers, layerList = _layerListForPhase1)
trackingPhase1QuadProp.toModify(pixelPairStepSeedLayers, layerList = _layerListForPhase1)
# only layers covering the region not covered by quadruplets
# (so it is just acting as backup of triplets)
_layerListForPhase2 = [
'BPix1+BPix2', 'BPix1+BPix3', 'BPix2+BPix3',
'BPix1+FPix1_pos', 'BPix1+FPix1_neg',
'BPix2+FPix1_pos', 'BPix2+FPix1_neg'
]
# modifing these errors seems to make no difference
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase2PU140.toModify(pixelPairStepSeedLayers,
layerList = _layerListForPhase2,
BPix = dict(
useErrorsFromParam = cms.bool(True),
hitErrorRPhi = cms.double(0.0016),
hitErrorRZ = cms.double(0.0035),
TTRHBuilder = cms.string('TTRHBuilderWithoutAngle4PixelPairs'),
),
FPix = dict(
useErrorsFromParam = cms.bool(True),
hitErrorRPhi = cms.double(0.0030),
hitErrorRZ = cms.double(0.0020),
TTRHBuilder = cms.string('TTRHBuilderWithoutAngle4PixelPairs'),
)
)
# TrackingRegion
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cff import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
pixelPairStepTrackingRegions = _globalTrackingRegionWithVertices.clone(RegionPSet = dict(
ptMin = 0.6,
originRadius = 0.015,
fixedError = 0.03,
useMultipleScattering = True,
))
from Configuration.Eras.Modifier_trackingLowPU_cff import trackingLowPU
trackingLowPU.toModify(pixelPairStepTrackingRegions, RegionPSet=dict(useMultipleScattering=False))
_region_Phase1 = dict(
useMultipleScattering = False,
maxNVertices = 5,
)
trackingPhase1.toModify(pixelPairStepTrackingRegions, RegionPSet=_region_Phase1)
trackingPhase1QuadProp.toModify(pixelPairStepTrackingRegions, RegionPSet=_region_Phase1)
trackingPhase2PU140.toModify(pixelPairStepTrackingRegions, RegionPSet=_region_Phase1)
# SEEDS
from RecoTracker.TkHitPairs.hitPairEDProducer_cfi import hitPairEDProducer as _hitPairEDProducer
pixelPairStepHitDoublets = _hitPairEDProducer.clone(
seedingLayers = "pixelPairStepSeedLayers",
trackingRegions = "pixelPairStepTrackingRegions",
produceSeedingHitSets = True,
)
from RecoTracker.TkSeedGenerator.seedCreatorFromRegionConsecutiveHitsEDProducer_cff import seedCreatorFromRegionConsecutiveHitsEDProducer as _seedCreatorFromRegionConsecutiveHitsEDProducer
pixelPairStepSeeds = _seedCreatorFromRegionConsecutiveHitsEDProducer.clone(
seedingHitSets = "pixelPairStepHitDoublets",
SeedComparitorPSet = dict(# FIXME: is this defined in any cfi that could be imported instead of copy-paste?
ComponentName = 'PixelClusterShapeSeedComparitor',
FilterAtHelixStage = cms.bool(True),
FilterPixelHits = cms.bool(True),
FilterStripHits = cms.bool(False),
ClusterShapeHitFilterName = cms.string('ClusterShapeHitFilter'),
ClusterShapeCacheSrc = cms.InputTag('siPixelClusterShapeCache'),
)
)
# Clone for the phase1 recovery mode
pixelPairStepSeedsA = pixelPairStepSeeds.clone()
# Recovery for L2L3
pixelPairStepSeedLayersB = pixelPairStepSeedLayers.clone(
layerList = [
'BPix1+BPix4',
]
)
from RecoTracker.TkTrackingRegions.pointSeededTrackingRegion_cfi import pointSeededTrackingRegion as _pointSeededTrackingRegion
pixelPairStepTrackingRegionsB = _pointSeededTrackingRegion.clone(
RegionPSet = dict(
ptMin = 0.6,
originRadius = 0.015,
mode = "VerticesFixed",
zErrorVetex = 0.03,
vertexCollection = "firstStepPrimaryVertices",
beamSpot = "offlineBeamSpot",
maxNVertices = 5,
maxNRegions = 5,
whereToUseMeasurementTracker = "Never",
deltaEta = 1.2,
deltaPhi = 0.5,
points = dict(
eta = [0.0],
phi = [3.0],
)
)
)
pixelPairStepHitDoubletsB = pixelPairStepHitDoublets.clone(
seedingLayers = "pixelPairStepSeedLayersB",
trackingRegions = "pixelPairStepTrackingRegionsB",
)
pixelPairStepSeedsB = pixelPairStepSeedsA.clone(seedingHitSets = "pixelPairStepHitDoubletsB")
# Merge
from RecoTracker.TkSeedGenerator.GlobalCombinedSeeds_cfi import globalCombinedSeeds as _globalCombinedSeeds
_pixelPairStepSeedsMerged = _globalCombinedSeeds.clone(
seedCollections = ["pixelPairStepSeedsA", "pixelPairStepSeedsB"],
)
trackingPhase1.toReplaceWith(pixelPairStepSeeds, _pixelPairStepSeedsMerged)
trackingPhase1QuadProp.toReplaceWith(pixelPairStepSeeds, _pixelPairStepSeedsMerged)
# QUALITY CUTS DURING TRACK BUILDING
import TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff
_pixelPairStepTrajectoryFilterBase = TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff.CkfBaseTrajectoryFilter_block.clone(
minimumNumberOfHits = 3,
minPt = 0.1,
)
pixelPairStepTrajectoryFilterBase = _pixelPairStepTrajectoryFilterBase.clone(
seedPairPenalty =0,
maxCCCLostHits = 0,
minGoodStripCharge = cms.PSet(refToPSet_ = cms.string('SiStripClusterChargeCutLoose'))
)
from Configuration.Eras.Modifier_tracker_apv_vfp30_2016_cff import tracker_apv_vfp30_2016
_tracker_apv_vfp30_2016.toModify(pixelPairStepTrajectoryFilterBase, maxCCCLostHits = 2)
trackingLowPU.toReplaceWith(pixelPairStepTrajectoryFilterBase, _pixelPairStepTrajectoryFilterBase)
trackingPhase1.toModify(pixelPairStepTrajectoryFilterBase, minimumNumberOfHits = 4)
trackingPhase1QuadProp.toModify(pixelPairStepTrajectoryFilterBase, minimumNumberOfHits = 4)
trackingPhase2PU140.toReplaceWith(pixelPairStepTrajectoryFilterBase, _pixelPairStepTrajectoryFilterBase.clone(
minimumNumberOfHits = 4,
maxLostHitsFraction = 1./10.,
constantValueForLostHitsFractionFilter = 0.701,
))
import RecoPixelVertexing.PixelLowPtUtilities.StripSubClusterShapeTrajectoryFilter_cfi
pixelPairStepTrajectoryFilterShape = RecoPixelVertexing.PixelLowPtUtilities.StripSubClusterShapeTrajectoryFilter_cfi.StripSubClusterShapeTrajectoryFilterTIX12.clone()
pixelPairStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string('CompositeTrajectoryFilter'),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string('pixelPairStepTrajectoryFilterBase')),
# cms.PSet( refToPSet_ = cms.string('pixelPairStepTrajectoryFilterShape'))
),
)
from RecoPixelVertexing.PixelLowPtUtilities.ClusterShapeTrajectoryFilter_cfi import *
trackingPhase2PU140.toModify(pixelPairStepTrajectoryFilter,
filters = pixelPairStepTrajectoryFilter.filters + [cms.PSet(refToPSet_ = cms.string('ClusterShapeTrajectoryFilter'))]
)
pixelPairStepTrajectoryFilterInOut = pixelPairStepTrajectoryFilterBase.clone(
minimumNumberOfHits = 4,
seedExtension = 1,
strictSeedExtension = False, # allow inactive
pixelSeedExtension = False,
)
import RecoTracker.MeasurementDet.Chi2ChargeMeasurementEstimator_cfi
pixelPairStepChi2Est = RecoTracker.MeasurementDet.Chi2ChargeMeasurementEstimator_cfi.Chi2ChargeMeasurementEstimator.clone(
ComponentName = cms.string('pixelPairStepChi2Est'),
nSigma = cms.double(3.0),
MaxChi2 = cms.double(9.0),
clusterChargeCut = cms.PSet(refToPSet_ = cms.string('SiStripClusterChargeCutLoose')),
pTChargeCutThreshold = cms.double(15.)
)
_tracker_apv_vfp30_2016.toModify(pixelPairStepChi2Est,
clusterChargeCut = dict(refToPSet_ = "SiStripClusterChargeCutTiny")
)
trackingLowPU.toModify(pixelPairStepChi2Est,
clusterChargeCut = dict(refToPSet_ = 'SiStripClusterChargeCutTiny'),
)
# TRACK BUILDING
import RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi
pixelPairStepTrajectoryBuilder = RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi.GroupedCkfTrajectoryBuilder.clone(
MeasurementTrackerName = '',
trajectoryFilter = cms.PSet(refToPSet_ = cms.string('pixelPairStepTrajectoryFilter')),
maxCand = 3,
estimator = cms.string('pixelPairStepChi2Est'),
maxDPhiForLooperReconstruction = cms.double(2.0),
maxPtForLooperReconstruction = cms.double(0.7)
)
trackingLowPU.toModify(pixelPairStepTrajectoryBuilder, maxCand = 2)
_seedExtension = dict(
inOutTrajectoryFilter = dict(refToPSet_ = "pixelPairStepTrajectoryFilterInOut"),
useSameTrajFilter = False,
)
trackingPhase1.toModify(pixelPairStepTrajectoryBuilder, **_seedExtension)
trackingPhase1QuadProp.toModify(pixelPairStepTrajectoryBuilder, **_seedExtension)
trackingPhase2PU140.toModify(pixelPairStepTrajectoryBuilder, **_seedExtension)
# MAKING OF TRACK CANDIDATES
import RecoTracker.CkfPattern.CkfTrackCandidates_cfi
pixelPairStepTrackCandidates = RecoTracker.CkfPattern.CkfTrackCandidates_cfi.ckfTrackCandidates.clone(
src = cms.InputTag('pixelPairStepSeeds'),
clustersToSkip = cms.InputTag('pixelPairStepClusters'),
TrajectoryBuilderPSet = cms.PSet(refToPSet_ = cms.string('pixelPairStepTrajectoryBuilder')),
### these two parameters are relevant only for the CachingSeedCleanerBySharedInput
numHitsForSeedCleaner = cms.int32(50),
onlyPixelHitsForSeedCleaner = cms.bool(True),
)
trackingPhase2PU140.toModify(pixelPairStepTrackCandidates,
clustersToSkip = None,
phase2clustersToSkip = cms.InputTag("pixelPairStepClusters"),
TrajectoryCleaner = "pixelPairStepTrajectoryCleanerBySharedHits"
)
from TrackingTools.TrajectoryCleaning.TrajectoryCleanerBySharedHits_cfi import trajectoryCleanerBySharedHits as _trajectoryCleanerBySharedHits
pixelPairStepTrajectoryCleanerBySharedHits = _trajectoryCleanerBySharedHits.clone(
ComponentName = 'pixelPairStepTrajectoryCleanerBySharedHits',
fractionShared = 0.095,
allowSharedFirstHit = True
)
trackingPhase2PU140.toModify(pixelPairStepTrajectoryCleanerBySharedHits, fractionShared = 0.09)
# TRACK FITTING
import RecoTracker.TrackProducer.TrackProducer_cfi
pixelPairStepTracks = RecoTracker.TrackProducer.TrackProducer_cfi.TrackProducer.clone(
AlgorithmName = cms.string('pixelPairStep'),
src = 'pixelPairStepTrackCandidates',
Fitter = cms.string('FlexibleKFFittingSmoother')
)
# Final selection
from RecoTracker.FinalTrackSelectors.TrackMVAClassifierPrompt_cfi import *
pixelPairStep = TrackMVAClassifierPrompt.clone()
pixelPairStep.src = 'pixelPairStepTracks'
pixelPairStep.GBRForestLabel = 'MVASelectorIter2_13TeV'
pixelPairStep.qualityCuts = [-0.2,0.0,0.3]
# For LowPU and Phase2PU140
import RecoTracker.IterativeTracking.LowPtTripletStep_cff
import RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi
pixelPairStepSelector = RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.multiTrackSelector.clone(
src='pixelPairStepTracks',
useAnyMVA = cms.bool(True),
GBRForestLabel = cms.string('MVASelectorIter2'),
trackSelectors= cms.VPSet(
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.looseMTS.clone(
name = 'pixelPairStepLoose',
), #end of pset
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.tightMTS.clone(
name = 'pixelPairStepTight',
preFilterName = 'pixelPairStepLoose',
),
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.highpurityMTS.clone(
name = 'QualityMasks',
preFilterName = 'pixelPairStepTight',
),
),
vertices = cms.InputTag("pixelVertices")#end of vpset
) #end of clone
trackingPhase2PU140.toModify(pixelPairStepSelector,
useAnyMVA = None,
GBRForestLabel = None,
trackSelectors= cms.VPSet(
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.looseMTS.clone(
name = 'pixelPairStepLoose',
chi2n_par = 0.7,
res_par = ( 0.003, 0.002 ),
minNumberLayers = 3,
maxNumberLostLayers = 2,
minNumber3DLayers = 3,
d0_par1 = ( 0.4, 4.0 ),
dz_par1 = ( 0.4, 4.0 ),
d0_par2 = ( 0.6, 4.0 ),
dz_par2 = ( 0.45, 4.0 )
), #end of pset
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.tightMTS.clone(
name = 'pixelPairStepTight',
preFilterName = 'pixelPairStepLoose',
chi2n_par = 0.6,
res_par = ( 0.003, 0.002 ),
minNumberLayers = 4,
maxNumberLostLayers = 2,
minNumber3DLayers = 3,
d0_par1 = ( 0.35, 4.0 ),
dz_par1 = ( 0.35, 4.0 ),
d0_par2 = ( 0.5, 4.0 ),
dz_par2 = ( 0.4, 4.0 )
),
RecoTracker.FinalTrackSelectors.multiTrackSelector_cfi.highpurityMTS.clone(
name = 'pixelPairStep',
preFilterName = 'pixelPairStepTight',
chi2n_par = 0.5,
res_par = ( 0.003, 0.001 ),
minNumberLayers = 5,
maxNumberLostLayers = 2,
minNumber3DLayers = 4,
d0_par1 = ( 0.3, 4.0 ),
dz_par1 = ( 0.3, 4.0 ),
d0_par2 = ( 0.45, 4.0 ),
dz_par2 = ( 0.35, 4.0 )
),
), #end of vpset
vertices = "firstStepPrimaryVertices"
) #end of clone
# Final sequence
PixelPairStep = cms.Sequence(pixelPairStepClusters*
pixelPairStepSeedLayers*
pixelPairStepTrackingRegions*
pixelPairStepHitDoublets*
pixelPairStepSeeds*
pixelPairStepTrackCandidates*
pixelPairStepTracks*
pixelPairStep)
_PixelPairStep_LowPU_Phase2PU140 = PixelPairStep.copy()
_PixelPairStep_LowPU_Phase2PU140.replace(pixelPairStep, pixelPairStepSelector)
trackingLowPU.toReplaceWith(PixelPairStep, _PixelPairStep_LowPU_Phase2PU140)
trackingPhase2PU140.toReplaceWith(PixelPairStep, _PixelPairStep_LowPU_Phase2PU140)
_PixelPairStep_Phase1 = PixelPairStep.copy()
_PixelPairStep_Phase1.replace(pixelPairStepSeeds,
pixelPairStepSeedsA *
pixelPairStepSeedLayersB*pixelPairStepTrackingRegionsB*pixelPairStepHitDoubletsB*pixelPairStepSeedsB*
pixelPairStepSeeds)
trackingPhase1.toReplaceWith(PixelPairStep, _PixelPairStep_Phase1)
trackingPhase1QuadProp.toReplaceWith(PixelPairStep, _PixelPairStep_Phase1)
|
en
| 0.838553
|
# NEW CLUSTERS (remove previously used clusters) # SEEDING LAYERS # layers covering the region not covered by quadruplets (so it is # just acting as backup of triplets) # only layers covering the region not covered by quadruplets # (so it is just acting as backup of triplets) # modifing these errors seems to make no difference # TrackingRegion # SEEDS # FIXME: is this defined in any cfi that could be imported instead of copy-paste? # Clone for the phase1 recovery mode # Recovery for L2L3 # Merge # QUALITY CUTS DURING TRACK BUILDING # cms.PSet( refToPSet_ = cms.string('pixelPairStepTrajectoryFilterShape')) # allow inactive # TRACK BUILDING # MAKING OF TRACK CANDIDATES ### these two parameters are relevant only for the CachingSeedCleanerBySharedInput # TRACK FITTING # Final selection # For LowPU and Phase2PU140 #end of pset #end of vpset #end of clone #end of pset #end of vpset #end of clone # Final sequence
| 1.372906
| 1
|
lesson_planner/migrations/0027_auto_20200816_2057.py
|
Hogwarts250/lesson-discussion
| 0
|
6625996
|
<filename>lesson_planner/migrations/0027_auto_20200816_2057.py<gh_stars>0
# Generated by Django 3.1 on 2020-08-17 03:57
from django.db import migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('lesson_planner', '0026_auto_20200816_1943'),
]
operations = [
migrations.RemoveField(
model_name='lesson',
name='transactions',
),
migrations.AddField(
model_name='lesson',
name='confirmed_denied_datetime',
field=model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='status', null=True, when={'denied', 'confirmed'}),
),
migrations.AddField(
model_name='lesson',
name='status',
field=model_utils.fields.StatusField(choices=[(0, 'dummy')], default='pending', max_length=10, no_check_for_status=True),
),
]
|
<filename>lesson_planner/migrations/0027_auto_20200816_2057.py<gh_stars>0
# Generated by Django 3.1 on 2020-08-17 03:57
from django.db import migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('lesson_planner', '0026_auto_20200816_1943'),
]
operations = [
migrations.RemoveField(
model_name='lesson',
name='transactions',
),
migrations.AddField(
model_name='lesson',
name='confirmed_denied_datetime',
field=model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='status', null=True, when={'denied', 'confirmed'}),
),
migrations.AddField(
model_name='lesson',
name='status',
field=model_utils.fields.StatusField(choices=[(0, 'dummy')], default='pending', max_length=10, no_check_for_status=True),
),
]
|
en
| 0.753023
|
# Generated by Django 3.1 on 2020-08-17 03:57
| 1.538326
| 2
|
teatime/plugins/eth1/network.py
|
dmuhs/toaster
| 87
|
6625997
|
<filename>teatime/plugins/eth1/network.py
"""This module contains a plugin for network-related checks."""
from teatime.plugins import Context, JSONRPCPlugin, NodeType
from teatime.reporting import Issue, Severity
class NetworkListening(JSONRPCPlugin):
"""Check whether the node is listening for peers.
Severity: High
This plugin will use the :code:`net_listening` method to check
whether the node is listening to new peers. If that is not the
case, an issue will be logged.
"""
INTRUSIVE = False
def _check(self, context: Context) -> None:
node_listening = self.get_rpc_json(context.target, "net_listening")
# SCAN[HIGH]: Node not listening to peers
if not node_listening:
context.report.add_issue(
Issue(
title="Node not listening to peers",
description="The node is not listening to new peer requests",
raw_data=node_listening,
severity=Severity.HIGH,
)
)
class PeerCountStatus(JSONRPCPlugin):
"""Check whether the node has a certain peer count.
Severity: Medium
This plugin will use the :code:`net_peerCount` method to check the
node's peer count. If the value is lower than the user-specified
value of minimum peers, an issue will be logged.
"""
INTRUSIVE = False
def __init__(self, minimum_peercount: int):
self.minimum_peercount = minimum_peercount
def _check(self, context: Context) -> None:
current_peercount = int(self.get_rpc_json(context.target, "net_peerCount"), 16)
if self.minimum_peercount > current_peercount:
context.report.add_issue(
Issue(
title="Number of peers too low!",
description=(
f"Too few peers (current < minimum): "
f"{current_peercount} < {self.minimum_peercount}"
),
raw_data=current_peercount,
severity=Severity.MEDIUM,
)
)
class PeerlistManipulation(JSONRPCPlugin):
"""Try to add a peer to the node's peer list.
Severity: High
This plugin will attempt to add a given peer to the node's peer
list.
"""
INTRUSIVE = True
def __init__(self, test_enode: str):
self.test_enode = test_enode
def _check(self, context: Context) -> None:
if context.node_type == NodeType.GETH:
payload = self.get_rpc_json(
context.target, method="admin_addPeer", params=[self.test_enode]
)
if payload:
context.report.add_issue(
Issue(
title="Peer list manipulation",
description=(
"Arbitrary peers can be added using "
"the admin_addPeer RPC call."
),
raw_data=payload,
severity=Severity.HIGH,
)
)
elif context.node_type == NodeType.PARITY:
payload = self.get_rpc_json(
context.target,
method="parity_addReservedPeer",
params=[self.test_enode],
)
if payload:
context.report.add_issue(
Issue(
title="Peer list manipulation",
description=(
"Reserved peers can be added to the node's "
"peer list using the parity_addReservedPeer RPC call"
),
raw_data=payload,
severity=Severity.HIGH,
)
)
class ParityDropPeers(JSONRPCPlugin):
"""Try to remove non-reserved peers from the peer list.
Severity: Critical
This plugin will attempt to drop all non-reserved peer entries
from the node's peer table.
"""
INTRUSIVE = True
def _check(self, context: Context) -> None:
if context.node_type != NodeType.PARITY:
return
payload = self.get_rpc_json(
context.target, method="parity_dropNonReservedPeers"
)
if payload:
context.report.add_issue(
Issue(
title="Peer list manipulation",
description=(
"Anyone can drop the non-reserved peerlist on the "
"node using the parity_dropNonReservedPeers RPC call."
),
raw_data=payload,
severity=Severity.CRITICAL,
)
)
|
<filename>teatime/plugins/eth1/network.py
"""This module contains a plugin for network-related checks."""
from teatime.plugins import Context, JSONRPCPlugin, NodeType
from teatime.reporting import Issue, Severity
class NetworkListening(JSONRPCPlugin):
"""Check whether the node is listening for peers.
Severity: High
This plugin will use the :code:`net_listening` method to check
whether the node is listening to new peers. If that is not the
case, an issue will be logged.
"""
INTRUSIVE = False
def _check(self, context: Context) -> None:
node_listening = self.get_rpc_json(context.target, "net_listening")
# SCAN[HIGH]: Node not listening to peers
if not node_listening:
context.report.add_issue(
Issue(
title="Node not listening to peers",
description="The node is not listening to new peer requests",
raw_data=node_listening,
severity=Severity.HIGH,
)
)
class PeerCountStatus(JSONRPCPlugin):
"""Check whether the node has a certain peer count.
Severity: Medium
This plugin will use the :code:`net_peerCount` method to check the
node's peer count. If the value is lower than the user-specified
value of minimum peers, an issue will be logged.
"""
INTRUSIVE = False
def __init__(self, minimum_peercount: int):
self.minimum_peercount = minimum_peercount
def _check(self, context: Context) -> None:
current_peercount = int(self.get_rpc_json(context.target, "net_peerCount"), 16)
if self.minimum_peercount > current_peercount:
context.report.add_issue(
Issue(
title="Number of peers too low!",
description=(
f"Too few peers (current < minimum): "
f"{current_peercount} < {self.minimum_peercount}"
),
raw_data=current_peercount,
severity=Severity.MEDIUM,
)
)
class PeerlistManipulation(JSONRPCPlugin):
"""Try to add a peer to the node's peer list.
Severity: High
This plugin will attempt to add a given peer to the node's peer
list.
"""
INTRUSIVE = True
def __init__(self, test_enode: str):
self.test_enode = test_enode
def _check(self, context: Context) -> None:
if context.node_type == NodeType.GETH:
payload = self.get_rpc_json(
context.target, method="admin_addPeer", params=[self.test_enode]
)
if payload:
context.report.add_issue(
Issue(
title="Peer list manipulation",
description=(
"Arbitrary peers can be added using "
"the admin_addPeer RPC call."
),
raw_data=payload,
severity=Severity.HIGH,
)
)
elif context.node_type == NodeType.PARITY:
payload = self.get_rpc_json(
context.target,
method="parity_addReservedPeer",
params=[self.test_enode],
)
if payload:
context.report.add_issue(
Issue(
title="Peer list manipulation",
description=(
"Reserved peers can be added to the node's "
"peer list using the parity_addReservedPeer RPC call"
),
raw_data=payload,
severity=Severity.HIGH,
)
)
class ParityDropPeers(JSONRPCPlugin):
"""Try to remove non-reserved peers from the peer list.
Severity: Critical
This plugin will attempt to drop all non-reserved peer entries
from the node's peer table.
"""
INTRUSIVE = True
def _check(self, context: Context) -> None:
if context.node_type != NodeType.PARITY:
return
payload = self.get_rpc_json(
context.target, method="parity_dropNonReservedPeers"
)
if payload:
context.report.add_issue(
Issue(
title="Peer list manipulation",
description=(
"Anyone can drop the non-reserved peerlist on the "
"node using the parity_dropNonReservedPeers RPC call."
),
raw_data=payload,
severity=Severity.CRITICAL,
)
)
|
en
| 0.846335
|
This module contains a plugin for network-related checks. Check whether the node is listening for peers. Severity: High This plugin will use the :code:`net_listening` method to check whether the node is listening to new peers. If that is not the case, an issue will be logged. # SCAN[HIGH]: Node not listening to peers Check whether the node has a certain peer count. Severity: Medium This plugin will use the :code:`net_peerCount` method to check the node's peer count. If the value is lower than the user-specified value of minimum peers, an issue will be logged. Try to add a peer to the node's peer list. Severity: High This plugin will attempt to add a given peer to the node's peer list. Try to remove non-reserved peers from the peer list. Severity: Critical This plugin will attempt to drop all non-reserved peer entries from the node's peer table.
| 2.686689
| 3
|
ehome/libs/yuntongxun/xmltojson.py
|
gavinliu4011/eHome
| 4
|
6625998
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# python xml.etree.ElementTree
import os
import xml.etree.ElementTree as ET
from xml.dom import minidom
class xmltojson:
# global var
# show log
SHOW_LOG = True
# XML file
XML_PATH = None
a = {}
m = []
def get_root(self, path):
'''parse the XML file,and get the tree of the XML file
finally,return the root element of the tree.
if the XML file dose not exist,then print the information'''
# if os.path.exists(path):
# if SHOW_LOG:
# print('start to parse the file : [{}]'.format(path))
tree = ET.fromstring(path)
return tree
# else:
# print('the path [{}] dose not exist!'.format(path))
def get_element_tag(self, element):
'''return the element tag if the element is not None.'''
if element is not None:
return element.tag
else:
print('the element is None!')
def get_element_attrib(self, element):
'''return the element attrib if the element is not None.'''
if element is not None:
return element.attrib
else:
print('the element is None!')
def get_element_text(self, element):
'''return the text of the element.'''
if element is not None:
return element.text
else:
print('the element is None!')
def get_element_children(self, element):
'''return the element children if the element is not None.'''
if element is not None:
return [c for c in element]
else:
print('the element is None!')
def get_elements_tag(self, elements):
'''return the list of tags of element's tag'''
if elements is not None:
tags = []
for e in elements:
tags.append(e.tag)
return tags
else:
print('the elements is None!')
def get_elements_attrib(self, elements):
'''return the list of attribs of element's attrib'''
if elements is not None:
attribs = []
for a in elements:
attribs.append(a.attrib)
return attribs
else:
print('the elements is None!')
def get_elements_text(self, elements):
'''return the dict of element'''
if elements is not None:
text = []
for t in elements:
text.append(t.text)
return dict(list(zip(self.get_elements_tag(elements), text)))
else:
print('the elements is None!')
def main(self, xml):
# root
root = self.get_root(xml)
# children
children = self.get_element_children(root)
children_tags = self.get_elements_tag(children)
children_attribs = self.get_elements_attrib(children)
i = 0
# 获取二级元素的每一个子节点的名称和值
for c in children:
p = 0
c_children = self.get_element_children(c)
dict_text = self.get_elements_text(c_children)
if dict_text:
# print (children_tags[i])
if children_tags[i] == 'TemplateSMS':
self.a['templateSMS'] = dict_text
else:
if children_tags[i] == 'SubAccount':
k = 0
for x in children:
if children_tags[k] == 'totalCount':
self.m.append(dict_text)
self.a['SubAccount'] = self.m
p = 1
k = k + 1
if p == 0:
self.a[children_tags[i]] = dict_text
else:
self.a[children_tags[i]] = dict_text
else:
self.a[children_tags[i]] = c.text
i = i + 1
return self.a
def main2(self, xml):
# root
root = self.get_root(xml)
# children
children = self.get_element_children(root)
children_tags = self.get_elements_tag(children)
children_attribs = self.get_elements_attrib(children)
i = 0
# 获取二级元素的每一个子节点的名称和值
for c in children:
p = 0
c_children = self.get_element_children(c)
dict_text = self.get_elements_text(c_children)
if dict_text:
if children_tags[i] == 'TemplateSMS':
k = 0
for x in children:
if children_tags[k] == 'totalCount':
self.m.append(dict_text)
self.a['TemplateSMS'] = self.m
p = 1
k = k + 1
if p == 0:
self.a[children_tags[i]] = dict_text
else:
self.a[children_tags[i]] = dict_text
else:
self.a[children_tags[i]] = c.text
i = i + 1
return self.a
|
# -*- coding: utf-8 -*-
# python xml.etree.ElementTree
import os
import xml.etree.ElementTree as ET
from xml.dom import minidom
class xmltojson:
# global var
# show log
SHOW_LOG = True
# XML file
XML_PATH = None
a = {}
m = []
def get_root(self, path):
'''parse the XML file,and get the tree of the XML file
finally,return the root element of the tree.
if the XML file dose not exist,then print the information'''
# if os.path.exists(path):
# if SHOW_LOG:
# print('start to parse the file : [{}]'.format(path))
tree = ET.fromstring(path)
return tree
# else:
# print('the path [{}] dose not exist!'.format(path))
def get_element_tag(self, element):
'''return the element tag if the element is not None.'''
if element is not None:
return element.tag
else:
print('the element is None!')
def get_element_attrib(self, element):
'''return the element attrib if the element is not None.'''
if element is not None:
return element.attrib
else:
print('the element is None!')
def get_element_text(self, element):
'''return the text of the element.'''
if element is not None:
return element.text
else:
print('the element is None!')
def get_element_children(self, element):
'''return the element children if the element is not None.'''
if element is not None:
return [c for c in element]
else:
print('the element is None!')
def get_elements_tag(self, elements):
'''return the list of tags of element's tag'''
if elements is not None:
tags = []
for e in elements:
tags.append(e.tag)
return tags
else:
print('the elements is None!')
def get_elements_attrib(self, elements):
'''return the list of attribs of element's attrib'''
if elements is not None:
attribs = []
for a in elements:
attribs.append(a.attrib)
return attribs
else:
print('the elements is None!')
def get_elements_text(self, elements):
'''return the dict of element'''
if elements is not None:
text = []
for t in elements:
text.append(t.text)
return dict(list(zip(self.get_elements_tag(elements), text)))
else:
print('the elements is None!')
def main(self, xml):
# root
root = self.get_root(xml)
# children
children = self.get_element_children(root)
children_tags = self.get_elements_tag(children)
children_attribs = self.get_elements_attrib(children)
i = 0
# 获取二级元素的每一个子节点的名称和值
for c in children:
p = 0
c_children = self.get_element_children(c)
dict_text = self.get_elements_text(c_children)
if dict_text:
# print (children_tags[i])
if children_tags[i] == 'TemplateSMS':
self.a['templateSMS'] = dict_text
else:
if children_tags[i] == 'SubAccount':
k = 0
for x in children:
if children_tags[k] == 'totalCount':
self.m.append(dict_text)
self.a['SubAccount'] = self.m
p = 1
k = k + 1
if p == 0:
self.a[children_tags[i]] = dict_text
else:
self.a[children_tags[i]] = dict_text
else:
self.a[children_tags[i]] = c.text
i = i + 1
return self.a
def main2(self, xml):
# root
root = self.get_root(xml)
# children
children = self.get_element_children(root)
children_tags = self.get_elements_tag(children)
children_attribs = self.get_elements_attrib(children)
i = 0
# 获取二级元素的每一个子节点的名称和值
for c in children:
p = 0
c_children = self.get_element_children(c)
dict_text = self.get_elements_text(c_children)
if dict_text:
if children_tags[i] == 'TemplateSMS':
k = 0
for x in children:
if children_tags[k] == 'totalCount':
self.m.append(dict_text)
self.a['TemplateSMS'] = self.m
p = 1
k = k + 1
if p == 0:
self.a[children_tags[i]] = dict_text
else:
self.a[children_tags[i]] = dict_text
else:
self.a[children_tags[i]] = c.text
i = i + 1
return self.a
|
en
| 0.510416
|
# -*- coding: utf-8 -*- # python xml.etree.ElementTree # global var # show log # XML file parse the XML file,and get the tree of the XML file finally,return the root element of the tree. if the XML file dose not exist,then print the information # if os.path.exists(path): # if SHOW_LOG: # print('start to parse the file : [{}]'.format(path)) # else: # print('the path [{}] dose not exist!'.format(path)) return the element tag if the element is not None. return the element attrib if the element is not None. return the text of the element. return the element children if the element is not None. return the list of tags of element's tag return the list of attribs of element's attrib return the dict of element # root # children # 获取二级元素的每一个子节点的名称和值 # print (children_tags[i]) # root # children # 获取二级元素的每一个子节点的名称和值
| 3.446829
| 3
|
grr/server/grr_response_server/aff4_objects/standard.py
|
dekoder/grr
| 3
|
6625999
|
#!/usr/bin/env python
"""These are standard aff4 objects."""
from __future__ import division
from __future__ import unicode_literals
import io
from builtins import range # pylint: disable=redefined-builtin
from future.utils import iteritems
from future.utils import itervalues
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import flow
class VFSDirectory(aff4.AFF4Volume):
"""This represents a directory from the client."""
# We contain other objects within the tree.
_behaviours = frozenset(["Container"])
def Update(self, attribute=None):
"""Refresh an old attribute.
Note that refreshing the attribute is asynchronous. It does not change
anything about the current object - you need to reopen the same URN some
time later to get fresh data.
Attributes:
CONTAINS - Refresh the content of the directory listing.
Args:
attribute: An attribute object as listed above.
Returns:
The Flow ID that is pending
Raises:
IOError: If there has been an error starting the flow.
"""
# client id is the first path element
client_id = self.urn.Split()[0]
if attribute == "CONTAINS":
# Get the pathspec for this object
flow_id = flow.StartAFF4Flow(
client_id=client_id,
# Dependency loop: aff4_objects/aff4_grr.py depends on
# aff4_objects/standard.py that depends on flows/general/filesystem.py
# that eventually depends on aff4_objects/aff4_grr.py
# flow_name=filesystem.ListDirectory.__name__,
flow_name="ListDirectory",
pathspec=self.real_pathspec,
notify_to_user=False,
token=self.token)
return flow_id
class SchemaCls(aff4.AFF4Volume.SchemaCls):
"""Attributes specific to VFSDirectory."""
STAT = aff4.Attribute("aff4:stat", rdf_client_fs.StatEntry,
"A StatEntry describing this file.", "stat")
PATHSPEC = aff4.Attribute(
"aff4:pathspec", rdf_paths.PathSpec,
"The pathspec used to retrieve this object from the client.",
"pathspec")
class HashList(rdfvalue.RDFBytes):
"""A list of hashes."""
HASH_SIZE = 32
def __len__(self):
return len(self._value) // self.HASH_SIZE
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __getitem__(self, idx):
return rdfvalue.HashDigest(
self._value[idx * self.HASH_SIZE:(idx + 1) * self.HASH_SIZE])
class AFF4SparseImage(aff4.AFF4ImageBase):
"""A class to store partial files."""
_HASH_SIZE = 32
_READAHEAD = 10
chunksize = 512 * 1024
class SchemaCls(aff4.AFF4ImageBase.SchemaCls):
"""The schema class for AFF4SparseImage."""
PATHSPEC = VFSDirectory.SchemaCls.PATHSPEC
STAT = VFSDirectory.SchemaCls.STAT
_CHUNKSIZE = aff4.Attribute(
"aff4:chunksize",
rdfvalue.RDFInteger,
"Total size of each chunk.",
default=512 * 1024)
LAST_CHUNK = aff4.Attribute(
"aff4:lastchunk",
rdfvalue.RDFInteger,
"The highest numbered chunk in this object.",
default=-1)
def _ReadChunks(self, chunks):
chunk_hashes = self._ChunkNrsToHashes(chunks)
chunk_nrs = {}
for k, v in iteritems(chunk_hashes):
chunk_nrs.setdefault(v, []).append(k)
res = data_store.DB.ReadBlobs(
list(itervalues(chunk_hashes)), token=self.token)
for blob_hash, content in iteritems(res):
for chunk_nr in chunk_nrs[blob_hash]:
fd = io.BytesIO(content)
fd.dirty = False
fd.chunk = chunk_nr
self.chunk_cache.Put(chunk_nr, fd)
def _WriteChunk(self, chunk):
if chunk.dirty:
data_store.DB.StoreBlob(chunk.getvalue(), token=self.token)
def _ChunkNrToHash(self, chunk_nr):
return self._ChunkNrsToHashes([chunk_nr])[chunk_nr]
def _ChunkNrsToHashes(self, chunk_nrs):
chunk_names = {
self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_nr): chunk_nr
for chunk_nr in chunk_nrs
}
res = {}
for obj in aff4.FACTORY.MultiOpen(chunk_names, mode="r", token=self.token):
if isinstance(obj, aff4.AFF4Stream):
hsh = obj.read(self._HASH_SIZE)
if hsh:
res[chunk_names[obj.urn]] = hsh.encode("hex")
return res
def _GetChunkForReading(self, chunk):
"""Returns the relevant chunk from the datastore and reads ahead."""
try:
return self.chunk_cache.Get(chunk)
except KeyError:
pass
# We don't have this chunk already cached. The most common read
# access pattern is contiguous reading so since we have to go to
# the data store already, we read ahead to reduce round trips.
missing_chunks = []
for chunk_number in range(chunk, chunk + 10):
if chunk_number not in self.chunk_cache:
missing_chunks.append(chunk_number)
self._ReadChunks(missing_chunks)
# This should work now - otherwise we just give up.
try:
return self.chunk_cache.Get(chunk)
except KeyError:
raise aff4.ChunkNotFoundError("Cannot open chunk %s" % chunk)
def _GetChunkForWriting(self, chunk):
"""Returns the relevant chunk from the datastore."""
try:
chunk = self.chunk_cache.Get(chunk)
chunk.dirty = True
return chunk
except KeyError:
pass
try:
chunk = self._ReadChunk(chunk)
chunk.dirty = True
return chunk
except KeyError:
pass
fd = io.BytesIO()
fd.chunk = chunk
fd.dirty = True
self.chunk_cache.Put(chunk, fd)
# Keep track of the biggest chunk_number we've seen so far.
if chunk > self.last_chunk:
self.last_chunk = chunk
self._dirty = True
return fd
def _ReadPartial(self, length):
"""Read as much as possible, but not more than length."""
chunk = self.offset // self.chunksize
chunk_offset = self.offset % self.chunksize
# If we're past the end of the file, we don't have a chunk to read from, so
# we can't read anymore. We return the empty string here so we can read off
# the end of a file without raising, and get as much data as is there.
if chunk > self.last_chunk:
return ""
available_to_read = min(length, self.chunksize - chunk_offset)
fd = self._GetChunkForReading(chunk)
fd.seek(chunk_offset)
result = fd.read(available_to_read)
self.offset += len(result)
return result
def Read(self, length):
result = []
# Make sure we don't read past the "end" of the file. We say the end is the
# end of the last chunk. If we do try and read past the end, we should
# return an empty string.
# The end of the file is the *end* of the last chunk, so we add one here.
length = min(length, ((self.last_chunk + 1) * self.chunksize) - self.offset)
while length > 0:
data = self._ReadPartial(length)
if not data:
break
length -= len(data)
result.append(data)
return b"".join(result)
def Initialize(self):
super(AFF4SparseImage, self).Initialize()
if "r" in self.mode:
# pylint: disable=protected-access
self.chunksize = int(self.Get(self.Schema._CHUNKSIZE))
# pylint: enable=protected-access
self.content_last = self.Get(self.Schema.CONTENT_LAST)
# The chunk with the highest index we've seen so far. We'll use
# this to keep track of what the biggest possible size this file
# could be is.
self.last_chunk = self.Get(self.Schema.LAST_CHUNK)
else:
self.size = 0
self.content_last = None
self.last_chunk = -1
def Truncate(self, offset=0):
if offset != 0:
raise IOError("Non-zero truncation not supported for AFF4SparseImage")
super(AFF4SparseImage, self).Truncate(0)
def AddBlob(self, blob_hash, length, chunk_number):
"""Add another blob to this image using its hash."""
if len(blob_hash) != self._HASH_SIZE:
raise ValueError("Hash '%s' doesn't have correct length (%d)." %
(blob_hash, self._HASH_SIZE))
# If we're adding a new blob, we should increase the size. If we're just
# updating an existing blob, the size should stay the same.
# That is, if we read the index at the right offset and no hash is there, we
# must not have seen this blob before, so we say we're adding a new one and
# increase in size.
if not self.ChunkExists(chunk_number):
# We say that we've increased in size by the size of the blob,
# but really we only store its hash in the AFF4SparseImage.
self.size += length
self._dirty = True
# Keep track of the biggest chunk_number we've seen so far.
if chunk_number > self.last_chunk:
self.last_chunk = chunk_number
self._dirty = True
index_urn = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number)
# TODO(amoser): This opens a subobject for each AddBlob call :/
with aff4.FACTORY.Create(
index_urn, aff4.AFF4MemoryStream, token=self.token) as fd:
fd.write(blob_hash)
if chunk_number in self.chunk_cache:
self.chunk_cache.Pop(chunk_number)
def ChunkExists(self, chunk_number):
return self.ChunksExist([chunk_number])[chunk_number]
def ChunksExist(self, chunk_numbers):
"""Do we have this chunk in the index?"""
index_urns = {
self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number): chunk_number
for chunk_number in chunk_numbers
}
res = {chunk_number: False for chunk_number in chunk_numbers}
for metadata in aff4.FACTORY.Stat(index_urns):
res[index_urns[metadata["urn"]]] = True
return res
def ChunksMetadata(self, chunk_numbers):
index_urns = {
self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number): chunk_number
for chunk_number in chunk_numbers
}
res = {}
for metadata in aff4.FACTORY.Stat(index_urns):
res[index_urns[metadata["urn"]]] = metadata
return res
def Flush(self):
if self._dirty:
self.Set(self.Schema.LAST_CHUNK, rdfvalue.RDFInteger(self.last_chunk))
super(AFF4SparseImage, self).Flush()
class LabelSet(aff4.AFF4Object):
"""An aff4 object which manages a set of labels.
This object has no actual attributes, it simply manages the set.
"""
# We expect the set to be quite small, so we simply store it as a collection
# attributes of the form "index:label_<label>" all unversioned (ts = 0).
# Location of the default set of labels, used to keep tract of active labels
# for clients.
CLIENT_LABELS_URN = "aff4:/index/labels/client_set"
def __init__(self, urn, **kwargs):
super(LabelSet, self).__init__(urn=self.CLIENT_LABELS_URN, **kwargs)
self.to_set = set()
self.to_delete = set()
def Flush(self):
"""Flush the data to the index."""
super(LabelSet, self).Flush()
self.to_delete = self.to_delete.difference(self.to_set)
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.LabelUpdateLabels(
self.urn, self.to_set, to_delete=self.to_delete)
self.to_set = set()
self.to_delete = set()
def Close(self):
self.Flush()
super(LabelSet, self).Close()
def Add(self, label):
self.to_set.add(label)
def Remove(self, label):
self.to_delete.add(label)
def ListLabels(self):
# Flush, so that any pending changes are visible.
if self.to_set or self.to_delete:
self.Flush()
return data_store.DB.LabelFetchAll(self.urn)
|
#!/usr/bin/env python
"""These are standard aff4 objects."""
from __future__ import division
from __future__ import unicode_literals
import io
from builtins import range # pylint: disable=redefined-builtin
from future.utils import iteritems
from future.utils import itervalues
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import flow
class VFSDirectory(aff4.AFF4Volume):
"""This represents a directory from the client."""
# We contain other objects within the tree.
_behaviours = frozenset(["Container"])
def Update(self, attribute=None):
"""Refresh an old attribute.
Note that refreshing the attribute is asynchronous. It does not change
anything about the current object - you need to reopen the same URN some
time later to get fresh data.
Attributes:
CONTAINS - Refresh the content of the directory listing.
Args:
attribute: An attribute object as listed above.
Returns:
The Flow ID that is pending
Raises:
IOError: If there has been an error starting the flow.
"""
# client id is the first path element
client_id = self.urn.Split()[0]
if attribute == "CONTAINS":
# Get the pathspec for this object
flow_id = flow.StartAFF4Flow(
client_id=client_id,
# Dependency loop: aff4_objects/aff4_grr.py depends on
# aff4_objects/standard.py that depends on flows/general/filesystem.py
# that eventually depends on aff4_objects/aff4_grr.py
# flow_name=filesystem.ListDirectory.__name__,
flow_name="ListDirectory",
pathspec=self.real_pathspec,
notify_to_user=False,
token=self.token)
return flow_id
class SchemaCls(aff4.AFF4Volume.SchemaCls):
"""Attributes specific to VFSDirectory."""
STAT = aff4.Attribute("aff4:stat", rdf_client_fs.StatEntry,
"A StatEntry describing this file.", "stat")
PATHSPEC = aff4.Attribute(
"aff4:pathspec", rdf_paths.PathSpec,
"The pathspec used to retrieve this object from the client.",
"pathspec")
class HashList(rdfvalue.RDFBytes):
"""A list of hashes."""
HASH_SIZE = 32
def __len__(self):
return len(self._value) // self.HASH_SIZE
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __getitem__(self, idx):
return rdfvalue.HashDigest(
self._value[idx * self.HASH_SIZE:(idx + 1) * self.HASH_SIZE])
class AFF4SparseImage(aff4.AFF4ImageBase):
"""A class to store partial files."""
_HASH_SIZE = 32
_READAHEAD = 10
chunksize = 512 * 1024
class SchemaCls(aff4.AFF4ImageBase.SchemaCls):
"""The schema class for AFF4SparseImage."""
PATHSPEC = VFSDirectory.SchemaCls.PATHSPEC
STAT = VFSDirectory.SchemaCls.STAT
_CHUNKSIZE = aff4.Attribute(
"aff4:chunksize",
rdfvalue.RDFInteger,
"Total size of each chunk.",
default=512 * 1024)
LAST_CHUNK = aff4.Attribute(
"aff4:lastchunk",
rdfvalue.RDFInteger,
"The highest numbered chunk in this object.",
default=-1)
def _ReadChunks(self, chunks):
chunk_hashes = self._ChunkNrsToHashes(chunks)
chunk_nrs = {}
for k, v in iteritems(chunk_hashes):
chunk_nrs.setdefault(v, []).append(k)
res = data_store.DB.ReadBlobs(
list(itervalues(chunk_hashes)), token=self.token)
for blob_hash, content in iteritems(res):
for chunk_nr in chunk_nrs[blob_hash]:
fd = io.BytesIO(content)
fd.dirty = False
fd.chunk = chunk_nr
self.chunk_cache.Put(chunk_nr, fd)
def _WriteChunk(self, chunk):
if chunk.dirty:
data_store.DB.StoreBlob(chunk.getvalue(), token=self.token)
def _ChunkNrToHash(self, chunk_nr):
return self._ChunkNrsToHashes([chunk_nr])[chunk_nr]
def _ChunkNrsToHashes(self, chunk_nrs):
chunk_names = {
self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_nr): chunk_nr
for chunk_nr in chunk_nrs
}
res = {}
for obj in aff4.FACTORY.MultiOpen(chunk_names, mode="r", token=self.token):
if isinstance(obj, aff4.AFF4Stream):
hsh = obj.read(self._HASH_SIZE)
if hsh:
res[chunk_names[obj.urn]] = hsh.encode("hex")
return res
def _GetChunkForReading(self, chunk):
"""Returns the relevant chunk from the datastore and reads ahead."""
try:
return self.chunk_cache.Get(chunk)
except KeyError:
pass
# We don't have this chunk already cached. The most common read
# access pattern is contiguous reading so since we have to go to
# the data store already, we read ahead to reduce round trips.
missing_chunks = []
for chunk_number in range(chunk, chunk + 10):
if chunk_number not in self.chunk_cache:
missing_chunks.append(chunk_number)
self._ReadChunks(missing_chunks)
# This should work now - otherwise we just give up.
try:
return self.chunk_cache.Get(chunk)
except KeyError:
raise aff4.ChunkNotFoundError("Cannot open chunk %s" % chunk)
def _GetChunkForWriting(self, chunk):
"""Returns the relevant chunk from the datastore."""
try:
chunk = self.chunk_cache.Get(chunk)
chunk.dirty = True
return chunk
except KeyError:
pass
try:
chunk = self._ReadChunk(chunk)
chunk.dirty = True
return chunk
except KeyError:
pass
fd = io.BytesIO()
fd.chunk = chunk
fd.dirty = True
self.chunk_cache.Put(chunk, fd)
# Keep track of the biggest chunk_number we've seen so far.
if chunk > self.last_chunk:
self.last_chunk = chunk
self._dirty = True
return fd
def _ReadPartial(self, length):
"""Read as much as possible, but not more than length."""
chunk = self.offset // self.chunksize
chunk_offset = self.offset % self.chunksize
# If we're past the end of the file, we don't have a chunk to read from, so
# we can't read anymore. We return the empty string here so we can read off
# the end of a file without raising, and get as much data as is there.
if chunk > self.last_chunk:
return ""
available_to_read = min(length, self.chunksize - chunk_offset)
fd = self._GetChunkForReading(chunk)
fd.seek(chunk_offset)
result = fd.read(available_to_read)
self.offset += len(result)
return result
def Read(self, length):
result = []
# Make sure we don't read past the "end" of the file. We say the end is the
# end of the last chunk. If we do try and read past the end, we should
# return an empty string.
# The end of the file is the *end* of the last chunk, so we add one here.
length = min(length, ((self.last_chunk + 1) * self.chunksize) - self.offset)
while length > 0:
data = self._ReadPartial(length)
if not data:
break
length -= len(data)
result.append(data)
return b"".join(result)
def Initialize(self):
super(AFF4SparseImage, self).Initialize()
if "r" in self.mode:
# pylint: disable=protected-access
self.chunksize = int(self.Get(self.Schema._CHUNKSIZE))
# pylint: enable=protected-access
self.content_last = self.Get(self.Schema.CONTENT_LAST)
# The chunk with the highest index we've seen so far. We'll use
# this to keep track of what the biggest possible size this file
# could be is.
self.last_chunk = self.Get(self.Schema.LAST_CHUNK)
else:
self.size = 0
self.content_last = None
self.last_chunk = -1
def Truncate(self, offset=0):
if offset != 0:
raise IOError("Non-zero truncation not supported for AFF4SparseImage")
super(AFF4SparseImage, self).Truncate(0)
def AddBlob(self, blob_hash, length, chunk_number):
"""Add another blob to this image using its hash."""
if len(blob_hash) != self._HASH_SIZE:
raise ValueError("Hash '%s' doesn't have correct length (%d)." %
(blob_hash, self._HASH_SIZE))
# If we're adding a new blob, we should increase the size. If we're just
# updating an existing blob, the size should stay the same.
# That is, if we read the index at the right offset and no hash is there, we
# must not have seen this blob before, so we say we're adding a new one and
# increase in size.
if not self.ChunkExists(chunk_number):
# We say that we've increased in size by the size of the blob,
# but really we only store its hash in the AFF4SparseImage.
self.size += length
self._dirty = True
# Keep track of the biggest chunk_number we've seen so far.
if chunk_number > self.last_chunk:
self.last_chunk = chunk_number
self._dirty = True
index_urn = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number)
# TODO(amoser): This opens a subobject for each AddBlob call :/
with aff4.FACTORY.Create(
index_urn, aff4.AFF4MemoryStream, token=self.token) as fd:
fd.write(blob_hash)
if chunk_number in self.chunk_cache:
self.chunk_cache.Pop(chunk_number)
def ChunkExists(self, chunk_number):
return self.ChunksExist([chunk_number])[chunk_number]
def ChunksExist(self, chunk_numbers):
"""Do we have this chunk in the index?"""
index_urns = {
self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number): chunk_number
for chunk_number in chunk_numbers
}
res = {chunk_number: False for chunk_number in chunk_numbers}
for metadata in aff4.FACTORY.Stat(index_urns):
res[index_urns[metadata["urn"]]] = True
return res
def ChunksMetadata(self, chunk_numbers):
index_urns = {
self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number): chunk_number
for chunk_number in chunk_numbers
}
res = {}
for metadata in aff4.FACTORY.Stat(index_urns):
res[index_urns[metadata["urn"]]] = metadata
return res
def Flush(self):
if self._dirty:
self.Set(self.Schema.LAST_CHUNK, rdfvalue.RDFInteger(self.last_chunk))
super(AFF4SparseImage, self).Flush()
class LabelSet(aff4.AFF4Object):
"""An aff4 object which manages a set of labels.
This object has no actual attributes, it simply manages the set.
"""
# We expect the set to be quite small, so we simply store it as a collection
# attributes of the form "index:label_<label>" all unversioned (ts = 0).
# Location of the default set of labels, used to keep tract of active labels
# for clients.
CLIENT_LABELS_URN = "aff4:/index/labels/client_set"
def __init__(self, urn, **kwargs):
super(LabelSet, self).__init__(urn=self.CLIENT_LABELS_URN, **kwargs)
self.to_set = set()
self.to_delete = set()
def Flush(self):
"""Flush the data to the index."""
super(LabelSet, self).Flush()
self.to_delete = self.to_delete.difference(self.to_set)
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.LabelUpdateLabels(
self.urn, self.to_set, to_delete=self.to_delete)
self.to_set = set()
self.to_delete = set()
def Close(self):
self.Flush()
super(LabelSet, self).Close()
def Add(self, label):
self.to_set.add(label)
def Remove(self, label):
self.to_delete.add(label)
def ListLabels(self):
# Flush, so that any pending changes are visible.
if self.to_set or self.to_delete:
self.Flush()
return data_store.DB.LabelFetchAll(self.urn)
|
en
| 0.89946
|
#!/usr/bin/env python These are standard aff4 objects. # pylint: disable=redefined-builtin This represents a directory from the client. # We contain other objects within the tree. Refresh an old attribute. Note that refreshing the attribute is asynchronous. It does not change anything about the current object - you need to reopen the same URN some time later to get fresh data. Attributes: CONTAINS - Refresh the content of the directory listing. Args: attribute: An attribute object as listed above. Returns: The Flow ID that is pending Raises: IOError: If there has been an error starting the flow. # client id is the first path element # Get the pathspec for this object # Dependency loop: aff4_objects/aff4_grr.py depends on # aff4_objects/standard.py that depends on flows/general/filesystem.py # that eventually depends on aff4_objects/aff4_grr.py # flow_name=filesystem.ListDirectory.__name__, Attributes specific to VFSDirectory. A list of hashes. A class to store partial files. The schema class for AFF4SparseImage. Returns the relevant chunk from the datastore and reads ahead. # We don't have this chunk already cached. The most common read # access pattern is contiguous reading so since we have to go to # the data store already, we read ahead to reduce round trips. # This should work now - otherwise we just give up. Returns the relevant chunk from the datastore. # Keep track of the biggest chunk_number we've seen so far. Read as much as possible, but not more than length. # If we're past the end of the file, we don't have a chunk to read from, so # we can't read anymore. We return the empty string here so we can read off # the end of a file without raising, and get as much data as is there. # Make sure we don't read past the "end" of the file. We say the end is the # end of the last chunk. If we do try and read past the end, we should # return an empty string. # The end of the file is the *end* of the last chunk, so we add one here. # pylint: disable=protected-access # pylint: enable=protected-access # The chunk with the highest index we've seen so far. We'll use # this to keep track of what the biggest possible size this file # could be is. Add another blob to this image using its hash. # If we're adding a new blob, we should increase the size. If we're just # updating an existing blob, the size should stay the same. # That is, if we read the index at the right offset and no hash is there, we # must not have seen this blob before, so we say we're adding a new one and # increase in size. # We say that we've increased in size by the size of the blob, # but really we only store its hash in the AFF4SparseImage. # Keep track of the biggest chunk_number we've seen so far. # TODO(amoser): This opens a subobject for each AddBlob call :/ Do we have this chunk in the index? An aff4 object which manages a set of labels. This object has no actual attributes, it simply manages the set. # We expect the set to be quite small, so we simply store it as a collection # attributes of the form "index:label_<label>" all unversioned (ts = 0). # Location of the default set of labels, used to keep tract of active labels # for clients. Flush the data to the index. # Flush, so that any pending changes are visible.
| 1.915063
| 2
|
KM.py
|
ziranl16/UROP_KMTE
| 1
|
6626000
|
<reponame>ziranl16/UROP_KMTE
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from lifelines import KaplanMeierFitter
YEARMIN = -50
YEARMAX = 3000
# Useful for printing plots in Jupyter
# calculate survival function for worksAt
def survival_find_0(filename):
r = "3"
triple_time = np.array([[0, 0]])
with open(filename, 'r') as filein:
for line in filein:
relation = line.split()[1].strip()
start = line.split()[3].split('-')[0]
end = line.split()[4].split('-')[0]
if start == '####':
start = YEARMIN
elif start.find('#') != -1 or len(start) != 4:
continue
if end == '####':
end = YEARMAX
elif end.find('#') != -1 or len(end) != 4:
continue
start = int(start)
end = int(end)
if start > end:
end = YEARMAX
if end >= start:
if relation == r:
triple_time = np.append(triple_time, np.array([[0, 0]]), axis=0)
triple_time = np.append(triple_time, np.array([[end - start, 1]]), axis=0)
df = pd.DataFrame({'T': triple_time[:, 0], 'E': triple_time[:, 1]})
T = df['T']
E = df['E']
kmf = KaplanMeierFitter()
kmf.fit(T, E)
kmf.plot()
plt.title("Kaplan Meier estimates relation <isMarriedTo>")
plt.xlabel("Years after relation <isMarriedTo>")
plt.ylabel("Survival Rate")
plt.xlim(0, 30)
plt.show()
p = kmf.survival_function_at_times(3).values[0]
print(p)
if __name__ == '__main__':
survival_find_0("data/yago/large/train.txt")
|
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from lifelines import KaplanMeierFitter
YEARMIN = -50
YEARMAX = 3000
# Useful for printing plots in Jupyter
# calculate survival function for worksAt
def survival_find_0(filename):
r = "3"
triple_time = np.array([[0, 0]])
with open(filename, 'r') as filein:
for line in filein:
relation = line.split()[1].strip()
start = line.split()[3].split('-')[0]
end = line.split()[4].split('-')[0]
if start == '####':
start = YEARMIN
elif start.find('#') != -1 or len(start) != 4:
continue
if end == '####':
end = YEARMAX
elif end.find('#') != -1 or len(end) != 4:
continue
start = int(start)
end = int(end)
if start > end:
end = YEARMAX
if end >= start:
if relation == r:
triple_time = np.append(triple_time, np.array([[0, 0]]), axis=0)
triple_time = np.append(triple_time, np.array([[end - start, 1]]), axis=0)
df = pd.DataFrame({'T': triple_time[:, 0], 'E': triple_time[:, 1]})
T = df['T']
E = df['E']
kmf = KaplanMeierFitter()
kmf.fit(T, E)
kmf.plot()
plt.title("Kaplan Meier estimates relation <isMarriedTo>")
plt.xlabel("Years after relation <isMarriedTo>")
plt.ylabel("Survival Rate")
plt.xlim(0, 30)
plt.show()
p = kmf.survival_function_at_times(3).values[0]
print(p)
if __name__ == '__main__':
survival_find_0("data/yago/large/train.txt")
|
en
| 0.652783
|
# Useful for printing plots in Jupyter # calculate survival function for worksAt ###': ###':
| 3.406482
| 3
|
nest_box_helper/admin.py
|
natalieehaas/nestboxhelper
| 0
|
6626001
|
from django.contrib import admin
from nest_box_helper.models import Account, Sheet, Park, Box, Attempt, UserParks
admin.site.register(Account)
admin.site.register(Sheet)
admin.site.register(Park)
admin.site.register(Box)
admin.site.register(Attempt)
admin.site.register(UserParks)
|
from django.contrib import admin
from nest_box_helper.models import Account, Sheet, Park, Box, Attempt, UserParks
admin.site.register(Account)
admin.site.register(Sheet)
admin.site.register(Park)
admin.site.register(Box)
admin.site.register(Attempt)
admin.site.register(UserParks)
|
none
| 1
| 1.426636
| 1
|
|
archive/scripts/classify_sleep.py
|
marta18a/sleep_classifiers
| 3
|
6626002
|
import numpy as np
import warnings
import time
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.font_manager as font_manager
import matplotlib.cbook
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import class_weight
from sklearn.metrics import accuracy_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_curve, auc, confusion_matrix
import utilities
import multiprocessing
import get_parameters
warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation)
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
run_flag = utilities.RUN_SW
font_name = 'Arial'
verbose = False
NUM_REPS_TRAIN_TEST = 10
LOAD_PARAMS = False # Load params saved from file
PRINT_TABLE = True # Print LaTeX table for paper
# REM Binary search parameters
FALSE_POSITIVE_BUFFER = 0.001 # How close we have to be to the desired goal FP before it can be added to the average
MAX_ATTEMPTS_WAKE_BINARY_SEARCH = 50 # Number of times to try before quitting the binary search
NUM_FALSE_POSITIVE_POINTS_REM = 20
REM_NREM_ACCURACY_DIFFERENCE = 1e-2 # How close we want NREM and REM accuracies to be
MAX_ATTEMPTS_NREM_REM_BINARY_SEARCH = 15
# Constants for plotting and tables
NUM_FALSE_POSITIVE_POINTS_PLOT = 100
FALSE_POSITIVE_INTERPOLATION_POINT_REM_NREM_TABLES = 0.6
METHOD_DICT = {'Random Forest': RandomForestClassifier(n_estimators=500, max_features=1.0, max_depth=10,
min_samples_split=10, min_samples_leaf=1),
'Logistic Regression': LogisticRegression(penalty='l1', solver='liblinear', verbose=0),
'KNeighbors': KNeighborsClassifier(),
'MLP': MLPClassifier(activation='relu', hidden_layer_sizes=(30, 30, 30), max_iter=1000, alpha=0.01)}
feature_sets = [{'Motion': True, 'HR': False, 'Clock': False, 'Time': False, 'CircModel': False},
{'Motion': False, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': False},
{'Motion': True, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': False},
{'Motion': True, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': True}]
cases = ['Motion only', 'HR only', 'Motion and HR', 'Motion, HR, Clock']
colors = [sns.xkcd_rgb["denim blue"],
sns.xkcd_rgb["yellow orange"],
sns.xkcd_rgb["medium green"],
sns.xkcd_rgb["pale red"]]
global train_test_dict
global description
def train_and_test_model(training_subjects, testing_subjects, method_key, classifier, feature_set, data_dict,
save_to_file=False):
"""
Trains and tests model for given feature set and classifier.
Args:
training_subjects ([int]): Subject IDs in training set
testing_subjects ([int]): Subject IDs in testing set
method_key (str): Key for classifier
classifier : Classifier object
feature_set (dict): Feature set to test
data_dict (dict): Dictionary to look up subject training and testing data
save_to_file (bool) : Flag if want to save probabilities to file
Returns:
[int]: ground truth labels
np.array : predicted labels
np.array : class prediction probabilities
"""
classifier_abbrev = str(classifier)[0:4]
save_name = 'parameters/' + classifier_abbrev + utilities.string_from_features(feature_set) + '_params.npy'
if LOAD_PARAMS or method_key == 'MLP': # TODO: Faster parameter searching with MLP
params = np.load(save_name).item()
else:
params = get_parameters.find_best(method_key, feature_set, training_subjects)
np.save(save_name, params)
classifier.set_params(**params)
training_set_features = np.array([])
training_set_true_labels = np.array([])
testing_set_features = np.array([])
testing_set_true_labels = np.array([])
# Get labels and features for training and testing sets
for subject in training_subjects:
scores_by_epoch, features_by_epoch = utilities.get_features(subject, data_dict)
if np.shape(training_set_features)[0] == 0:
training_set_features = features_by_epoch
training_set_true_labels = scores_by_epoch
else:
training_set_features = np.vstack((training_set_features, features_by_epoch))
training_set_true_labels = np.vstack((training_set_true_labels, scores_by_epoch))
for subject in testing_subjects:
scores_by_epoch, features_by_epoch = utilities.get_features(subject, data_dict)
if np.shape(testing_set_features)[0] == 0:
testing_set_features = features_by_epoch
testing_set_true_labels = scores_by_epoch
else:
testing_set_features = np.vstack((testing_set_features, features_by_epoch))
testing_set_true_labels = np.vstack((testing_set_true_labels, scores_by_epoch))
# Convert raw labels to 0/1 or 0-2
training_set_true_labels = utilities.process_raw_scores(training_set_true_labels, run_flag)
testing_set_true_labels = utilities.process_raw_scores(testing_set_true_labels, run_flag)
# Set class weights for those methods that allow them
class_weights = class_weight.compute_class_weight('balanced',
np.unique(training_set_true_labels),
training_set_true_labels)
class_weight_dict = {0: class_weights[0], 1: class_weights[1]}
if len(class_weights) > 2: # Handles wake/NREM/REM case
class_weight_dict = {0: class_weights[0], 1: class_weights[1], 2: class_weights[2]}
classifier.class_weight = class_weight_dict
# # Debug-only: Uncomment to reverse the training/testing order, and test Apple Watch data on MESA-trained models
# classifier = np.load('trained_models/' + classifier_abbrev +
# utilities.string_from_features(feature_set) + '_trained_modelMESA.npy').item()
# Fit model to training data, get class predictions and class probabilities
classifier.fit(training_set_features, training_set_true_labels)
predicted_labels = classifier.predict(testing_set_features)
class_probabilities = classifier.predict_proba(testing_set_features)
# Save trained model to use for testing MESA cohort
save_name = 'trained_models/' + classifier_abbrev + \
utilities.string_from_features(feature_set) + '_trained_model.npy'
np.save(save_name, classifier)
# Optional; save to file for Kalman filter and print performance metrics
if save_to_file:
np.savetxt('sleep_modeling/' + str(testing_subjects[0]) + '.csv',
classifier.predict_proba(testing_set_features), delimiter=',')
np.savetxt('sleep_modeling/' + str(testing_subjects[0]) + '_classes.csv',
testing_set_true_labels, delimiter=',')
np.savetxt('sleep_modeling/' + str(testing_subjects[0]) + '_predicted_classes.csv',
predicted_labels, delimiter=',')
true_positive_rate_for_interpolation = 0.85
false_positive_rates, true_positive_rates, thresholds = roc_curve(testing_set_true_labels,
class_probabilities[:, 1],
pos_label=1, drop_intermediate=False)
print('Subject ID: ' + str(testing_subjects[0]))
print('False positive rate: ' + str(
np.interp(true_positive_rate_for_interpolation, true_positive_rates, false_positive_rates)))
print('True positive rate: ' + str(true_positive_rate_for_interpolation))
print('\n\n')
return testing_set_true_labels, predicted_labels, class_probabilities
def parallel_roc(trial_dictionary):
"""
Calls training and testing model for ROC; allows parallelization
Args:
trial_dictionary (dict): All information needed to train and test the model for a classifier/feature set
Returns:
Performance metrics for the training/testing iteration
"""
method = trial_dictionary['method']
feature_set = trial_dictionary['feature_set']
data_dict = trial_dictionary['data_dict']
train_set = trial_dictionary['train_set']
test_set = trial_dictionary['test_set']
method_key = trial_dictionary['method_key']
# Get ground truth, predictions, and class probabilities
testing_set_true_labels, predicted_labels, class_probabilities = train_and_test_model(train_set, test_set,
method_key, method,
feature_set, data_dict)
if run_flag == utilities.RUN_SW: # If sleep/wake classification
false_positive_rates, true_positive_rates, thresholds = roc_curve(testing_set_true_labels,
class_probabilities[:, 1], pos_label=1,
drop_intermediate=False)
performance = utilities.thresh_interpolation(false_positive_rates, true_positive_rates, thresholds,
class_probabilities, testing_set_true_labels)
return [false_positive_rates, true_positive_rates, thresholds, performance]
else: # If wake/NREM/REM classification
false_positive_rates, true_positive_rate_average, nrem_accuracies, rem_accuracies, best_accuracies, \
kappas_at_best_accuracies = roc_curve_rem(testing_set_true_labels, class_probabilities)
return [false_positive_rates, true_positive_rate_average, nrem_accuracies, rem_accuracies, best_accuracies,
kappas_at_best_accuracies]
def roc_curve_rem(true_labels, class_probabilities):
"""
Make an "ROC curve for NREM/REM/wake classification" by looping over desired false positive rates
and performing two binary searches: one for a wake threshold, and one to balance the accuracies of the REM
and NREM classes
Args:
true_labels (np.array): Ground truth labels for tested epochs
class_probabilities (np.array): Class probabilities for tested epochs
Returns:
false positive rates, average NREM/REM accuracies, individual REM/NREM accuracies, best accuracies
found during the search, and kappas at best accuracies
"""
goal_false_positive_spread = [] # Spread of targeted goal false positive rates
for i in range(0, NUM_FALSE_POSITIVE_POINTS_REM):
goal_false_positive_spread.append(i / (NUM_FALSE_POSITIVE_POINTS_REM * 1.0))
goal_false_positive_spread = np.array(goal_false_positive_spread)
# Holders for performance metrics
false_positive_rate_spread = []
true_positive_rate_spread = []
accuracies = []
kappas = []
nrem_class_accuracies = []
rem_class_accuracies = []
start = time.time()
true_wake_indices = np.where(true_labels == 0)[0] # Indices where ground truth is wake
true_nrem_indices = np.where(true_labels == 1)[0] # Indices of ground truth NREM
true_rem_indices = np.where(true_labels == 2)[0] # Indices of ground truth REM
# Get coverage over entire x-axis of ROC curve by repeating binary searches over a spread
for goal_false_positive_rate in goal_false_positive_spread:
false_positive_rate = -1
binary_search_counter = 0
# Search while we haven't found the target false positive rate
while (false_positive_rate < goal_false_positive_rate - FALSE_POSITIVE_BUFFER
or false_positive_rate >= goal_false_positive_rate + FALSE_POSITIVE_BUFFER) and binary_search_counter < MAX_ATTEMPTS_WAKE_BINARY_SEARCH:
if binary_search_counter == 0: # Start binary search conditions
threshold_for_sleep = 0.5
threshold_delta = 0.25
else: # Update threshold based on difference between goal and actual false positive rate
if false_positive_rate < goal_false_positive_rate - FALSE_POSITIVE_BUFFER:
threshold_for_sleep = threshold_for_sleep - threshold_delta
threshold_delta = threshold_delta / 2
if false_positive_rate >= goal_false_positive_rate + FALSE_POSITIVE_BUFFER:
threshold_for_sleep = threshold_for_sleep + threshold_delta
threshold_delta = threshold_delta / 2
if goal_false_positive_rate == 1: # Edge cases
threshold_for_sleep = 0.0
if goal_false_positive_rate == 0:
threshold_for_sleep = 1.0
predicted_sleep_indices = np.where(1 - np.array(class_probabilities[:, 0]) >= threshold_for_sleep)[0]
predicted_labels = np.zeros(np.shape(true_labels))
predicted_labels[predicted_sleep_indices] = 1 # Set locations of predicted sleep to 1
predicted_labels_at_true_wake_indices = predicted_labels[true_wake_indices]
# FPR: 1 - Wake scored as wake, a.k.a 1 - (Total true wake - true wake scored as sleep)/(Total true wake)
number_wake_correct = len(true_wake_indices) - np.count_nonzero(predicted_labels_at_true_wake_indices)
fraction_wake_correct = number_wake_correct / (len(true_wake_indices) * 1.0)
false_positive_rate = 1.0 - fraction_wake_correct
binary_search_counter = binary_search_counter + 1
# # Uncomment for debugging:
# print('Goal FP = ' + str(goal_false_positive_rate) + ' Thresh: ' + str(threshold_for_sleep) + ',
# Delta: ' + str(threshold_delta) + ', False positive rate: ' + str(false_positive_rate) + ',
# Count: ' + str(binary_search_counter))
if binary_search_counter < MAX_ATTEMPTS_WAKE_BINARY_SEARCH: # Checks we found our target false positive rate
# Initial values for binary search
smallest_accuracy_difference = 2 # Difference between NREM and REM accuracies
true_positive_rate = 0
rem_accuracy = 0
nrem_accuracy = 0
best_accuracy = -1
kappa_at_best_accuracy = -1
# Initial values for second threshold binary search
count_thresh = 0
threshold_for_rem = 0.5
threshold_delta_rem = 0.5
while count_thresh < MAX_ATTEMPTS_NREM_REM_BINARY_SEARCH and \
smallest_accuracy_difference > REM_NREM_ACCURACY_DIFFERENCE:
count_thresh = count_thresh + 1
for predicted_sleep_index in range(len(predicted_sleep_indices)):
predicted_sleep_epoch = predicted_sleep_indices[predicted_sleep_index]
if class_probabilities[predicted_sleep_epoch, 2] > threshold_for_rem:
predicted_labels[predicted_sleep_epoch] = 2 # Set to REM sleep
else:
predicted_labels[predicted_sleep_epoch] = 1 # Set to NREM sleep
# Compute accuracy and kappa at this threshold during the search
accuracy = accuracy_score(predicted_labels, true_labels)
kappa = cohen_kappa_score(predicted_labels, true_labels)
if accuracy > best_accuracy: # Save if we've exceeded best accuracy
best_accuracy = accuracy
kappa_at_best_accuracy = kappa
predicted_nrem_indices = np.where(predicted_labels == 1)[0]
predicted_rem_indices = np.where(predicted_labels == 2)[0]
# Compute NREM/REM accuracies -- number of true class epochs scored as class, divided by number in class
correct_nrem_indices = np.intersect1d(predicted_nrem_indices, true_nrem_indices)
correct_rem_indices = np.intersect1d(predicted_rem_indices, true_rem_indices)
nrem_accuracy = len(correct_nrem_indices) / (1.0 * len(true_nrem_indices))
rem_accuracy = len(correct_rem_indices) / (1.0 * len(true_rem_indices))
true_positive_rate = (rem_accuracy + nrem_accuracy) / 2.0
smallest_accuracy_difference = np.abs(nrem_accuracy - rem_accuracy)
if rem_accuracy < nrem_accuracy:
threshold_for_rem = threshold_for_rem - threshold_delta_rem / 2.0
else:
threshold_for_rem = threshold_for_rem + threshold_delta_rem / 2.0
threshold_delta_rem = threshold_delta_rem / 2.0
# Add found values to holders
false_positive_rate_spread.append(false_positive_rate)
true_positive_rate_spread.append(true_positive_rate)
nrem_class_accuracies.append(nrem_accuracy)
rem_class_accuracies.append(rem_accuracy)
accuracies.append(best_accuracy)
kappas.append(kappa_at_best_accuracy)
end = time.time()
if not PRINT_TABLE:
print('Elapsed time for all goal FPs search: ' + str(end - start))
false_positive_rate_spread = np.array(false_positive_rate_spread)
true_positive_rate_spread = np.array(true_positive_rate_spread)
nrem_class_accuracies = np.array(nrem_class_accuracies)
rem_class_accuracies = np.array(rem_class_accuracies)
accuracies = np.array(accuracies)
kappas = np.array(kappas)
return false_positive_rate_spread, true_positive_rate_spread, nrem_class_accuracies, rem_class_accuracies, accuracies, kappas
def run_roc(method_key, feature_set, data_dict, train_test_dict, legend_text, plot_color):
"""
Plots ROC curve for specified feature set and classifier
Args:
method_key (str): Key for classifier getting used
feature_set (dict): Features to pass to classifier
data_dict (dict): Contains all the subject data for classifiaction
train_test_dict (dict): Contains training/testing subject splits for all trials
legend_text (str): Label for legend
plot_color (RGBA): color to plot
"""
method = METHOD_DICT[method_key] # Classifier to test
params = []
if verbose:
print('Running trials...')
output = []
for run in range(0, NUM_REPS_TRAIN_TEST): # Pre-builds dictionary to pass for training/testing
train_set, test_set = train_test_dict[run]
trial_dictionary = dict()
trial_dictionary['run'] = run
trial_dictionary['method'] = method
trial_dictionary['method_key'] = method_key
trial_dictionary['feature_set'] = feature_set
trial_dictionary['data_dict'] = data_dict
trial_dictionary['train_set'] = train_set
trial_dictionary['test_set'] = test_set
params.append(trial_dictionary)
if run_flag == utilities.RUN_REM or run_flag == utilities.RUN_SW:
output.append(parallel_roc(trial_dictionary))
# TODO: Figure out why parallelization is causing problems
# if run_flag == utilities.RUN_SW:
# output = pool.map(parallel_roc,params)
if verbose:
print('Looping over trials...')
# Create false positive rate range to interpolate results over
false_positive_spread = []
for i in range(0, NUM_FALSE_POSITIVE_POINTS_PLOT):
false_positive_spread.append((i + 1) / (NUM_FALSE_POSITIVE_POINTS_PLOT * 1.0))
false_positive_spread = np.array(false_positive_spread)
true_positive_spread = np.zeros(np.shape(false_positive_spread))
# Average the results of all trials
if run_flag == utilities.RUN_SW:
avg_performance_at_interpolated_points = []
for run in range(0, NUM_REPS_TRAIN_TEST):
false_positive_rate = output[run][0]
true_positive_rate = output[run][1]
performance_at_interpolated_points = output[run][3] # Interpolation points for tables in paper
# Adds up performance across all true positive thresholds, to average over trials
for interpolated_point_index in range(0, len(performance_at_interpolated_points)):
if len(avg_performance_at_interpolated_points) <= interpolated_point_index:
performance_for_run = np.array(performance_at_interpolated_points[interpolated_point_index])
avg_performance_at_interpolated_points.append(performance_for_run)
else:
performance_for_run = np.array(performance_at_interpolated_points[interpolated_point_index])
avg_performance_at_interpolated_points[interpolated_point_index] = \
avg_performance_at_interpolated_points[interpolated_point_index] + performance_for_run
true_positive_rate_interpolated = np.interp(false_positive_spread, false_positive_rate, true_positive_rate)
true_positive_spread = true_positive_spread + true_positive_rate_interpolated
true_positive_spread = true_positive_spread / NUM_REPS_TRAIN_TEST
# Insert (0,0) point for plotting curves
false_positive_spread = np.insert(false_positive_spread, 0, 0)
true_positive_spread = np.insert(true_positive_spread, 0, 0)
false_positive_spread = np.array(false_positive_spread)
true_positive_spread = np.array(true_positive_spread)
plt.plot(false_positive_spread, true_positive_spread, label=legend_text, color=plot_color) # Plot line for ROC
if PRINT_TABLE:
print('\hline ' + utilities.string_from_features(feature_set) + ' & ')
for interpolated_point_index in range(0, len(performance_at_interpolated_points)):
performance_metrics = avg_performance_at_interpolated_points[
interpolated_point_index] / NUM_REPS_TRAIN_TEST
line = ''
if interpolated_point_index > 0:
line = ' & '
for performance_item in performance_metrics[:-1]:
line = line + str(round(performance_item, 3)) + ' & '
if interpolated_point_index == 0:
line = line + str(round(performance_metrics[-1], 3)) + ' \\\\'
else:
line = line + ' \\\\'
print(line)
if run_flag == utilities.RUN_REM:
nrem_class_accuracy_spread = np.zeros(np.shape(false_positive_spread))
rem_class_accuracy_spread = np.zeros(np.shape(false_positive_spread))
accuracy_spread = np.zeros(np.shape(false_positive_spread))
kappa_spread = np.zeros(np.shape(false_positive_spread))
for run in range(0, NUM_REPS_TRAIN_TEST):
# Get performance for trial
false_positive_rate = output[run][0]
true_positive_rate = output[run][1]
nrem_class_accuracy = output[run][2]
rem_class_accuracy = output[run][3]
accuracies = output[run][4]
kappas = output[run][5]
# Interpolate to match the desired spread
true_positive_rate_interpolated = np.interp(false_positive_spread, false_positive_rate, true_positive_rate)
nrem_accuracy_interpolated = np.interp(false_positive_spread, false_positive_rate, nrem_class_accuracy)
rem_accuracy_interpolated = np.interp(false_positive_spread, false_positive_rate, rem_class_accuracy)
accuracy_interpolated = np.interp(false_positive_spread, false_positive_rate, accuracies)
kappa_interpolated = np.interp(false_positive_spread, false_positive_rate, kappas)
# Add to cumulative totals for each value
true_positive_spread = true_positive_spread + true_positive_rate_interpolated
nrem_class_accuracy_spread = nrem_class_accuracy_spread + nrem_accuracy_interpolated
rem_class_accuracy_spread = rem_class_accuracy_spread + rem_accuracy_interpolated
accuracy_spread = accuracy_spread + accuracy_interpolated
kappa_spread = kappa_spread + kappa_interpolated
# Divide by number of trials to get average
true_positive_spread = true_positive_spread / NUM_REPS_TRAIN_TEST
nrem_class_accuracy_spread = nrem_class_accuracy_spread / NUM_REPS_TRAIN_TEST
rem_class_accuracy_spread = rem_class_accuracy_spread / NUM_REPS_TRAIN_TEST
accuracy_spread = accuracy_spread / NUM_REPS_TRAIN_TEST
kappa_spread = kappa_spread / NUM_REPS_TRAIN_TEST
# For tables, interpolate to find threshold where desired false positive rate is met
nrem_accuracy_at_interpolated_point = np.interp(FALSE_POSITIVE_INTERPOLATION_POINT_REM_NREM_TABLES,
false_positive_spread, nrem_class_accuracy_spread)
rem_accuracy_at_interpolated_point = np.interp(FALSE_POSITIVE_INTERPOLATION_POINT_REM_NREM_TABLES,
false_positive_spread, rem_class_accuracy_spread)
index_of_best_accuracy = np.argmax(accuracy_spread)
if PRINT_TABLE:
print('\hline ' + utilities.string_from_features(feature_set) + ' & ')
line = str(round(FALSE_POSITIVE_INTERPOLATION_POINT_REM_NREM_TABLES, 3)) + ' & ' \
+ str(round(nrem_accuracy_at_interpolated_point, 3)) + ' & ' \
+ str(round(rem_accuracy_at_interpolated_point, 3))
line = line + ' & ' + str(round(accuracy_spread[index_of_best_accuracy], 3)) + ' & ' + \
str(round(kappa_spread[index_of_best_accuracy], 3))
line = line + ' \\\\'
print(line)
# Insert(0,0) point for ROC curve
false_positive_spread = np.insert(false_positive_spread, 0, 0)
true_positive_spread = np.insert(true_positive_spread, 0, 0)
nrem_class_accuracy_spread = np.insert(nrem_class_accuracy_spread, 0, 0)
rem_class_accuracy_spread = np.insert(rem_class_accuracy_spread, 0, 0)
false_positive_spread = np.array(false_positive_spread)
true_positive_spread = np.array(true_positive_spread)
tps_nrem = np.array(nrem_class_accuracy_spread)
tps_rem = np.array(rem_class_accuracy_spread)
# Plot line for ROC
plt.plot(false_positive_spread, true_positive_spread, label=legend_text, color=plot_color)
plt.plot(false_positive_spread, tps_nrem, color=plot_color, linestyle=':')
plt.plot(false_positive_spread, tps_rem, color=plot_color, linestyle='--')
def make_method_roc(method_key):
"""
Plots ROC curve for all feature sets given classifier
Args:
method_key (str): Key for classifier to plot
"""
start = time.time()
if verbose:
print("Starting method ROC...")
if PRINT_TABLE and run_flag == utilities.RUN_SW:
print('\\begin{table} \caption{' + method_key +
' Summary Statistics} \\begin{tabular}{l*{5}{c}} & Accuracy & Specificity & Sensitivity & $\kappa$ & AUC \\\\ ')
if PRINT_TABLE and run_flag == utilities.RUN_REM:
print('\\begin{table} \caption{' + method_key +
' REM Summary Statistics} \\begin{tabular}{l*{5}{c}} & Wake Correct & NREM Correct & REM Correct & Best accuracy & $\kappa$ \\\\ ')
# Loop over all feature sets
for feature_set_index in range(0, len(feature_sets)):
data_dict = utilities.build_data_dictionary(feature_sets[feature_set_index]) # Loads all data to dict
run_roc(method_key, feature_sets[feature_set_index], data_dict, train_test_dict, cases[feature_set_index],
colors[feature_set_index]) # Plots ROC curve for feature set
end = time.time()
if not PRINT_TABLE:
print('Elapsed time: ' + str(end - start))
if PRINT_TABLE and run_flag == utilities.RUN_SW:
print('\end{tabular} \label{tab:' + method_key[0:4] + 'params} \end{table}')
if PRINT_TABLE and run_flag == utilities.RUN_REM:
print('\end{tabular} \label{tab:' + method_key[0:4] + '_rem_params} \end{table}')
utilities.tidy_plot()
font = font_manager.FontProperties(family='Arial', style='normal', size=14)
if method_key == 'MLP': # Add legend
plt.legend(bbox_to_anchor=(1.0, 0.4), borderaxespad=0., prop=font)
plt.xlabel('False positive rate', fontsize=16, fontname=font_name)
plt.ylabel('True positive rate', fontsize=16, fontname=font_name)
plt.title(method_key, fontsize=18, fontname=font_name, fontweight='bold')
if run_flag == utilities.RUN_REM:
type_string = '_rem_'
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 0.8])
else:
type_string = '_sw_'
plt.savefig(method_key + '_' + str(NUM_REPS_TRAIN_TEST) + description + type_string + '_roc.png')
plt.close()
def run_all(flag, trial_count):
"""
Call to run all classifiers for either sleep/wake or wake/NREM/REM
Args:
flag (int): Type of classification to run (wake/sleep, or wake/NREM/REM)
trial_count(int): How many times to repeat training and testing
"""
global train_test_dict
global run_flag
global NUM_REPS_TRAIN_TEST
global description
run_flag = flag
NUM_REPS_TRAIN_TEST = trial_count
plt.ioff()
description = 'output'
pool = multiprocessing.Pool(processes=8)
# Use a consistent train/test set across classifiers
train_test_dict = utilities.make_train_test_dict(NUM_REPS_TRAIN_TEST)
for method_key in METHOD_DICT.keys():
if not PRINT_TABLE:
print(method_key)
make_method_roc(method_key)
pool.close()
pool.join()
print('\a')
def run_one(method_key, flag, trial_count):
"""
Call to run a single classifier for either sleep/wake or wake/NREM/REM
Args:
method_key (str): Key for classifier to use
flag (int): Type of classification to run (wake/sleep, or wake/NREM/REM)
trial_count(int): How many times to repeat training and testing
"""
global train_test_dict
global run_flag
global NUM_REPS_TRAIN_TEST
global description
run_flag = flag
NUM_REPS_TRAIN_TEST = trial_count
plt.ioff()
description = 'output'
pool = multiprocessing.Pool(processes=8)
# Use a consistent train/test set across classifiers
train_test_dict = utilities.make_train_test_dict(NUM_REPS_TRAIN_TEST, 0.1)
make_method_roc(method_key)
pool.close()
pool.join()
# Debugging: Prints subject performance
def check_subjects():
method_key = 'MLP'
global run_flag
run_flag = utilities.RUN_SW
feature_set = {'Motion': False, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': False}
export_all_subjects(feature_set, method_key)
# For sleep model/Kalman filter, saves classifier probabilities to file
def sleep_model_export():
method_key = 'MLP'
global run_flag
run_flag = utilities.RUN_REM
feature_set = {'Motion': True, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': False}
export_all_subjects(feature_set, method_key)
# For Kalman filter and debugging, train on all subjects but one; save probabilities for tested class:
def export_all_subjects(feature_set, method_key):
data_dict = utilities.build_data_dictionary(feature_set)
train_set = utilities.FULL_SET
for ind in range(0, len(train_set)):
subject_id = train_set[ind]
if ind > 0:
train_set_temp = train_set[0:ind]
train_set_temp = train_set_temp + (train_set[ind + 1:])
else:
train_set_temp = train_set[1:]
train_and_test_model(train_set_temp, [subject_id], method_key,
METHOD_DICT[method_key], feature_set, data_dict, True)
if __name__ == '__main__':
# check_subjects()
sleep_model_export()
|
import numpy as np
import warnings
import time
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.font_manager as font_manager
import matplotlib.cbook
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils import class_weight
from sklearn.metrics import accuracy_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_curve, auc, confusion_matrix
import utilities
import multiprocessing
import get_parameters
warnings.filterwarnings("ignore", category=matplotlib.cbook.mplDeprecation)
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
run_flag = utilities.RUN_SW
font_name = 'Arial'
verbose = False
NUM_REPS_TRAIN_TEST = 10
LOAD_PARAMS = False # Load params saved from file
PRINT_TABLE = True # Print LaTeX table for paper
# REM Binary search parameters
FALSE_POSITIVE_BUFFER = 0.001 # How close we have to be to the desired goal FP before it can be added to the average
MAX_ATTEMPTS_WAKE_BINARY_SEARCH = 50 # Number of times to try before quitting the binary search
NUM_FALSE_POSITIVE_POINTS_REM = 20
REM_NREM_ACCURACY_DIFFERENCE = 1e-2 # How close we want NREM and REM accuracies to be
MAX_ATTEMPTS_NREM_REM_BINARY_SEARCH = 15
# Constants for plotting and tables
NUM_FALSE_POSITIVE_POINTS_PLOT = 100
FALSE_POSITIVE_INTERPOLATION_POINT_REM_NREM_TABLES = 0.6
METHOD_DICT = {'Random Forest': RandomForestClassifier(n_estimators=500, max_features=1.0, max_depth=10,
min_samples_split=10, min_samples_leaf=1),
'Logistic Regression': LogisticRegression(penalty='l1', solver='liblinear', verbose=0),
'KNeighbors': KNeighborsClassifier(),
'MLP': MLPClassifier(activation='relu', hidden_layer_sizes=(30, 30, 30), max_iter=1000, alpha=0.01)}
feature_sets = [{'Motion': True, 'HR': False, 'Clock': False, 'Time': False, 'CircModel': False},
{'Motion': False, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': False},
{'Motion': True, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': False},
{'Motion': True, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': True}]
cases = ['Motion only', 'HR only', 'Motion and HR', 'Motion, HR, Clock']
colors = [sns.xkcd_rgb["denim blue"],
sns.xkcd_rgb["yellow orange"],
sns.xkcd_rgb["medium green"],
sns.xkcd_rgb["pale red"]]
global train_test_dict
global description
def train_and_test_model(training_subjects, testing_subjects, method_key, classifier, feature_set, data_dict,
save_to_file=False):
"""
Trains and tests model for given feature set and classifier.
Args:
training_subjects ([int]): Subject IDs in training set
testing_subjects ([int]): Subject IDs in testing set
method_key (str): Key for classifier
classifier : Classifier object
feature_set (dict): Feature set to test
data_dict (dict): Dictionary to look up subject training and testing data
save_to_file (bool) : Flag if want to save probabilities to file
Returns:
[int]: ground truth labels
np.array : predicted labels
np.array : class prediction probabilities
"""
classifier_abbrev = str(classifier)[0:4]
save_name = 'parameters/' + classifier_abbrev + utilities.string_from_features(feature_set) + '_params.npy'
if LOAD_PARAMS or method_key == 'MLP': # TODO: Faster parameter searching with MLP
params = np.load(save_name).item()
else:
params = get_parameters.find_best(method_key, feature_set, training_subjects)
np.save(save_name, params)
classifier.set_params(**params)
training_set_features = np.array([])
training_set_true_labels = np.array([])
testing_set_features = np.array([])
testing_set_true_labels = np.array([])
# Get labels and features for training and testing sets
for subject in training_subjects:
scores_by_epoch, features_by_epoch = utilities.get_features(subject, data_dict)
if np.shape(training_set_features)[0] == 0:
training_set_features = features_by_epoch
training_set_true_labels = scores_by_epoch
else:
training_set_features = np.vstack((training_set_features, features_by_epoch))
training_set_true_labels = np.vstack((training_set_true_labels, scores_by_epoch))
for subject in testing_subjects:
scores_by_epoch, features_by_epoch = utilities.get_features(subject, data_dict)
if np.shape(testing_set_features)[0] == 0:
testing_set_features = features_by_epoch
testing_set_true_labels = scores_by_epoch
else:
testing_set_features = np.vstack((testing_set_features, features_by_epoch))
testing_set_true_labels = np.vstack((testing_set_true_labels, scores_by_epoch))
# Convert raw labels to 0/1 or 0-2
training_set_true_labels = utilities.process_raw_scores(training_set_true_labels, run_flag)
testing_set_true_labels = utilities.process_raw_scores(testing_set_true_labels, run_flag)
# Set class weights for those methods that allow them
class_weights = class_weight.compute_class_weight('balanced',
np.unique(training_set_true_labels),
training_set_true_labels)
class_weight_dict = {0: class_weights[0], 1: class_weights[1]}
if len(class_weights) > 2: # Handles wake/NREM/REM case
class_weight_dict = {0: class_weights[0], 1: class_weights[1], 2: class_weights[2]}
classifier.class_weight = class_weight_dict
# # Debug-only: Uncomment to reverse the training/testing order, and test Apple Watch data on MESA-trained models
# classifier = np.load('trained_models/' + classifier_abbrev +
# utilities.string_from_features(feature_set) + '_trained_modelMESA.npy').item()
# Fit model to training data, get class predictions and class probabilities
classifier.fit(training_set_features, training_set_true_labels)
predicted_labels = classifier.predict(testing_set_features)
class_probabilities = classifier.predict_proba(testing_set_features)
# Save trained model to use for testing MESA cohort
save_name = 'trained_models/' + classifier_abbrev + \
utilities.string_from_features(feature_set) + '_trained_model.npy'
np.save(save_name, classifier)
# Optional; save to file for Kalman filter and print performance metrics
if save_to_file:
np.savetxt('sleep_modeling/' + str(testing_subjects[0]) + '.csv',
classifier.predict_proba(testing_set_features), delimiter=',')
np.savetxt('sleep_modeling/' + str(testing_subjects[0]) + '_classes.csv',
testing_set_true_labels, delimiter=',')
np.savetxt('sleep_modeling/' + str(testing_subjects[0]) + '_predicted_classes.csv',
predicted_labels, delimiter=',')
true_positive_rate_for_interpolation = 0.85
false_positive_rates, true_positive_rates, thresholds = roc_curve(testing_set_true_labels,
class_probabilities[:, 1],
pos_label=1, drop_intermediate=False)
print('Subject ID: ' + str(testing_subjects[0]))
print('False positive rate: ' + str(
np.interp(true_positive_rate_for_interpolation, true_positive_rates, false_positive_rates)))
print('True positive rate: ' + str(true_positive_rate_for_interpolation))
print('\n\n')
return testing_set_true_labels, predicted_labels, class_probabilities
def parallel_roc(trial_dictionary):
"""
Calls training and testing model for ROC; allows parallelization
Args:
trial_dictionary (dict): All information needed to train and test the model for a classifier/feature set
Returns:
Performance metrics for the training/testing iteration
"""
method = trial_dictionary['method']
feature_set = trial_dictionary['feature_set']
data_dict = trial_dictionary['data_dict']
train_set = trial_dictionary['train_set']
test_set = trial_dictionary['test_set']
method_key = trial_dictionary['method_key']
# Get ground truth, predictions, and class probabilities
testing_set_true_labels, predicted_labels, class_probabilities = train_and_test_model(train_set, test_set,
method_key, method,
feature_set, data_dict)
if run_flag == utilities.RUN_SW: # If sleep/wake classification
false_positive_rates, true_positive_rates, thresholds = roc_curve(testing_set_true_labels,
class_probabilities[:, 1], pos_label=1,
drop_intermediate=False)
performance = utilities.thresh_interpolation(false_positive_rates, true_positive_rates, thresholds,
class_probabilities, testing_set_true_labels)
return [false_positive_rates, true_positive_rates, thresholds, performance]
else: # If wake/NREM/REM classification
false_positive_rates, true_positive_rate_average, nrem_accuracies, rem_accuracies, best_accuracies, \
kappas_at_best_accuracies = roc_curve_rem(testing_set_true_labels, class_probabilities)
return [false_positive_rates, true_positive_rate_average, nrem_accuracies, rem_accuracies, best_accuracies,
kappas_at_best_accuracies]
def roc_curve_rem(true_labels, class_probabilities):
"""
Make an "ROC curve for NREM/REM/wake classification" by looping over desired false positive rates
and performing two binary searches: one for a wake threshold, and one to balance the accuracies of the REM
and NREM classes
Args:
true_labels (np.array): Ground truth labels for tested epochs
class_probabilities (np.array): Class probabilities for tested epochs
Returns:
false positive rates, average NREM/REM accuracies, individual REM/NREM accuracies, best accuracies
found during the search, and kappas at best accuracies
"""
goal_false_positive_spread = [] # Spread of targeted goal false positive rates
for i in range(0, NUM_FALSE_POSITIVE_POINTS_REM):
goal_false_positive_spread.append(i / (NUM_FALSE_POSITIVE_POINTS_REM * 1.0))
goal_false_positive_spread = np.array(goal_false_positive_spread)
# Holders for performance metrics
false_positive_rate_spread = []
true_positive_rate_spread = []
accuracies = []
kappas = []
nrem_class_accuracies = []
rem_class_accuracies = []
start = time.time()
true_wake_indices = np.where(true_labels == 0)[0] # Indices where ground truth is wake
true_nrem_indices = np.where(true_labels == 1)[0] # Indices of ground truth NREM
true_rem_indices = np.where(true_labels == 2)[0] # Indices of ground truth REM
# Get coverage over entire x-axis of ROC curve by repeating binary searches over a spread
for goal_false_positive_rate in goal_false_positive_spread:
false_positive_rate = -1
binary_search_counter = 0
# Search while we haven't found the target false positive rate
while (false_positive_rate < goal_false_positive_rate - FALSE_POSITIVE_BUFFER
or false_positive_rate >= goal_false_positive_rate + FALSE_POSITIVE_BUFFER) and binary_search_counter < MAX_ATTEMPTS_WAKE_BINARY_SEARCH:
if binary_search_counter == 0: # Start binary search conditions
threshold_for_sleep = 0.5
threshold_delta = 0.25
else: # Update threshold based on difference between goal and actual false positive rate
if false_positive_rate < goal_false_positive_rate - FALSE_POSITIVE_BUFFER:
threshold_for_sleep = threshold_for_sleep - threshold_delta
threshold_delta = threshold_delta / 2
if false_positive_rate >= goal_false_positive_rate + FALSE_POSITIVE_BUFFER:
threshold_for_sleep = threshold_for_sleep + threshold_delta
threshold_delta = threshold_delta / 2
if goal_false_positive_rate == 1: # Edge cases
threshold_for_sleep = 0.0
if goal_false_positive_rate == 0:
threshold_for_sleep = 1.0
predicted_sleep_indices = np.where(1 - np.array(class_probabilities[:, 0]) >= threshold_for_sleep)[0]
predicted_labels = np.zeros(np.shape(true_labels))
predicted_labels[predicted_sleep_indices] = 1 # Set locations of predicted sleep to 1
predicted_labels_at_true_wake_indices = predicted_labels[true_wake_indices]
# FPR: 1 - Wake scored as wake, a.k.a 1 - (Total true wake - true wake scored as sleep)/(Total true wake)
number_wake_correct = len(true_wake_indices) - np.count_nonzero(predicted_labels_at_true_wake_indices)
fraction_wake_correct = number_wake_correct / (len(true_wake_indices) * 1.0)
false_positive_rate = 1.0 - fraction_wake_correct
binary_search_counter = binary_search_counter + 1
# # Uncomment for debugging:
# print('Goal FP = ' + str(goal_false_positive_rate) + ' Thresh: ' + str(threshold_for_sleep) + ',
# Delta: ' + str(threshold_delta) + ', False positive rate: ' + str(false_positive_rate) + ',
# Count: ' + str(binary_search_counter))
if binary_search_counter < MAX_ATTEMPTS_WAKE_BINARY_SEARCH: # Checks we found our target false positive rate
# Initial values for binary search
smallest_accuracy_difference = 2 # Difference between NREM and REM accuracies
true_positive_rate = 0
rem_accuracy = 0
nrem_accuracy = 0
best_accuracy = -1
kappa_at_best_accuracy = -1
# Initial values for second threshold binary search
count_thresh = 0
threshold_for_rem = 0.5
threshold_delta_rem = 0.5
while count_thresh < MAX_ATTEMPTS_NREM_REM_BINARY_SEARCH and \
smallest_accuracy_difference > REM_NREM_ACCURACY_DIFFERENCE:
count_thresh = count_thresh + 1
for predicted_sleep_index in range(len(predicted_sleep_indices)):
predicted_sleep_epoch = predicted_sleep_indices[predicted_sleep_index]
if class_probabilities[predicted_sleep_epoch, 2] > threshold_for_rem:
predicted_labels[predicted_sleep_epoch] = 2 # Set to REM sleep
else:
predicted_labels[predicted_sleep_epoch] = 1 # Set to NREM sleep
# Compute accuracy and kappa at this threshold during the search
accuracy = accuracy_score(predicted_labels, true_labels)
kappa = cohen_kappa_score(predicted_labels, true_labels)
if accuracy > best_accuracy: # Save if we've exceeded best accuracy
best_accuracy = accuracy
kappa_at_best_accuracy = kappa
predicted_nrem_indices = np.where(predicted_labels == 1)[0]
predicted_rem_indices = np.where(predicted_labels == 2)[0]
# Compute NREM/REM accuracies -- number of true class epochs scored as class, divided by number in class
correct_nrem_indices = np.intersect1d(predicted_nrem_indices, true_nrem_indices)
correct_rem_indices = np.intersect1d(predicted_rem_indices, true_rem_indices)
nrem_accuracy = len(correct_nrem_indices) / (1.0 * len(true_nrem_indices))
rem_accuracy = len(correct_rem_indices) / (1.0 * len(true_rem_indices))
true_positive_rate = (rem_accuracy + nrem_accuracy) / 2.0
smallest_accuracy_difference = np.abs(nrem_accuracy - rem_accuracy)
if rem_accuracy < nrem_accuracy:
threshold_for_rem = threshold_for_rem - threshold_delta_rem / 2.0
else:
threshold_for_rem = threshold_for_rem + threshold_delta_rem / 2.0
threshold_delta_rem = threshold_delta_rem / 2.0
# Add found values to holders
false_positive_rate_spread.append(false_positive_rate)
true_positive_rate_spread.append(true_positive_rate)
nrem_class_accuracies.append(nrem_accuracy)
rem_class_accuracies.append(rem_accuracy)
accuracies.append(best_accuracy)
kappas.append(kappa_at_best_accuracy)
end = time.time()
if not PRINT_TABLE:
print('Elapsed time for all goal FPs search: ' + str(end - start))
false_positive_rate_spread = np.array(false_positive_rate_spread)
true_positive_rate_spread = np.array(true_positive_rate_spread)
nrem_class_accuracies = np.array(nrem_class_accuracies)
rem_class_accuracies = np.array(rem_class_accuracies)
accuracies = np.array(accuracies)
kappas = np.array(kappas)
return false_positive_rate_spread, true_positive_rate_spread, nrem_class_accuracies, rem_class_accuracies, accuracies, kappas
def run_roc(method_key, feature_set, data_dict, train_test_dict, legend_text, plot_color):
"""
Plots ROC curve for specified feature set and classifier
Args:
method_key (str): Key for classifier getting used
feature_set (dict): Features to pass to classifier
data_dict (dict): Contains all the subject data for classifiaction
train_test_dict (dict): Contains training/testing subject splits for all trials
legend_text (str): Label for legend
plot_color (RGBA): color to plot
"""
method = METHOD_DICT[method_key] # Classifier to test
params = []
if verbose:
print('Running trials...')
output = []
for run in range(0, NUM_REPS_TRAIN_TEST): # Pre-builds dictionary to pass for training/testing
train_set, test_set = train_test_dict[run]
trial_dictionary = dict()
trial_dictionary['run'] = run
trial_dictionary['method'] = method
trial_dictionary['method_key'] = method_key
trial_dictionary['feature_set'] = feature_set
trial_dictionary['data_dict'] = data_dict
trial_dictionary['train_set'] = train_set
trial_dictionary['test_set'] = test_set
params.append(trial_dictionary)
if run_flag == utilities.RUN_REM or run_flag == utilities.RUN_SW:
output.append(parallel_roc(trial_dictionary))
# TODO: Figure out why parallelization is causing problems
# if run_flag == utilities.RUN_SW:
# output = pool.map(parallel_roc,params)
if verbose:
print('Looping over trials...')
# Create false positive rate range to interpolate results over
false_positive_spread = []
for i in range(0, NUM_FALSE_POSITIVE_POINTS_PLOT):
false_positive_spread.append((i + 1) / (NUM_FALSE_POSITIVE_POINTS_PLOT * 1.0))
false_positive_spread = np.array(false_positive_spread)
true_positive_spread = np.zeros(np.shape(false_positive_spread))
# Average the results of all trials
if run_flag == utilities.RUN_SW:
avg_performance_at_interpolated_points = []
for run in range(0, NUM_REPS_TRAIN_TEST):
false_positive_rate = output[run][0]
true_positive_rate = output[run][1]
performance_at_interpolated_points = output[run][3] # Interpolation points for tables in paper
# Adds up performance across all true positive thresholds, to average over trials
for interpolated_point_index in range(0, len(performance_at_interpolated_points)):
if len(avg_performance_at_interpolated_points) <= interpolated_point_index:
performance_for_run = np.array(performance_at_interpolated_points[interpolated_point_index])
avg_performance_at_interpolated_points.append(performance_for_run)
else:
performance_for_run = np.array(performance_at_interpolated_points[interpolated_point_index])
avg_performance_at_interpolated_points[interpolated_point_index] = \
avg_performance_at_interpolated_points[interpolated_point_index] + performance_for_run
true_positive_rate_interpolated = np.interp(false_positive_spread, false_positive_rate, true_positive_rate)
true_positive_spread = true_positive_spread + true_positive_rate_interpolated
true_positive_spread = true_positive_spread / NUM_REPS_TRAIN_TEST
# Insert (0,0) point for plotting curves
false_positive_spread = np.insert(false_positive_spread, 0, 0)
true_positive_spread = np.insert(true_positive_spread, 0, 0)
false_positive_spread = np.array(false_positive_spread)
true_positive_spread = np.array(true_positive_spread)
plt.plot(false_positive_spread, true_positive_spread, label=legend_text, color=plot_color) # Plot line for ROC
if PRINT_TABLE:
print('\hline ' + utilities.string_from_features(feature_set) + ' & ')
for interpolated_point_index in range(0, len(performance_at_interpolated_points)):
performance_metrics = avg_performance_at_interpolated_points[
interpolated_point_index] / NUM_REPS_TRAIN_TEST
line = ''
if interpolated_point_index > 0:
line = ' & '
for performance_item in performance_metrics[:-1]:
line = line + str(round(performance_item, 3)) + ' & '
if interpolated_point_index == 0:
line = line + str(round(performance_metrics[-1], 3)) + ' \\\\'
else:
line = line + ' \\\\'
print(line)
if run_flag == utilities.RUN_REM:
nrem_class_accuracy_spread = np.zeros(np.shape(false_positive_spread))
rem_class_accuracy_spread = np.zeros(np.shape(false_positive_spread))
accuracy_spread = np.zeros(np.shape(false_positive_spread))
kappa_spread = np.zeros(np.shape(false_positive_spread))
for run in range(0, NUM_REPS_TRAIN_TEST):
# Get performance for trial
false_positive_rate = output[run][0]
true_positive_rate = output[run][1]
nrem_class_accuracy = output[run][2]
rem_class_accuracy = output[run][3]
accuracies = output[run][4]
kappas = output[run][5]
# Interpolate to match the desired spread
true_positive_rate_interpolated = np.interp(false_positive_spread, false_positive_rate, true_positive_rate)
nrem_accuracy_interpolated = np.interp(false_positive_spread, false_positive_rate, nrem_class_accuracy)
rem_accuracy_interpolated = np.interp(false_positive_spread, false_positive_rate, rem_class_accuracy)
accuracy_interpolated = np.interp(false_positive_spread, false_positive_rate, accuracies)
kappa_interpolated = np.interp(false_positive_spread, false_positive_rate, kappas)
# Add to cumulative totals for each value
true_positive_spread = true_positive_spread + true_positive_rate_interpolated
nrem_class_accuracy_spread = nrem_class_accuracy_spread + nrem_accuracy_interpolated
rem_class_accuracy_spread = rem_class_accuracy_spread + rem_accuracy_interpolated
accuracy_spread = accuracy_spread + accuracy_interpolated
kappa_spread = kappa_spread + kappa_interpolated
# Divide by number of trials to get average
true_positive_spread = true_positive_spread / NUM_REPS_TRAIN_TEST
nrem_class_accuracy_spread = nrem_class_accuracy_spread / NUM_REPS_TRAIN_TEST
rem_class_accuracy_spread = rem_class_accuracy_spread / NUM_REPS_TRAIN_TEST
accuracy_spread = accuracy_spread / NUM_REPS_TRAIN_TEST
kappa_spread = kappa_spread / NUM_REPS_TRAIN_TEST
# For tables, interpolate to find threshold where desired false positive rate is met
nrem_accuracy_at_interpolated_point = np.interp(FALSE_POSITIVE_INTERPOLATION_POINT_REM_NREM_TABLES,
false_positive_spread, nrem_class_accuracy_spread)
rem_accuracy_at_interpolated_point = np.interp(FALSE_POSITIVE_INTERPOLATION_POINT_REM_NREM_TABLES,
false_positive_spread, rem_class_accuracy_spread)
index_of_best_accuracy = np.argmax(accuracy_spread)
if PRINT_TABLE:
print('\hline ' + utilities.string_from_features(feature_set) + ' & ')
line = str(round(FALSE_POSITIVE_INTERPOLATION_POINT_REM_NREM_TABLES, 3)) + ' & ' \
+ str(round(nrem_accuracy_at_interpolated_point, 3)) + ' & ' \
+ str(round(rem_accuracy_at_interpolated_point, 3))
line = line + ' & ' + str(round(accuracy_spread[index_of_best_accuracy], 3)) + ' & ' + \
str(round(kappa_spread[index_of_best_accuracy], 3))
line = line + ' \\\\'
print(line)
# Insert(0,0) point for ROC curve
false_positive_spread = np.insert(false_positive_spread, 0, 0)
true_positive_spread = np.insert(true_positive_spread, 0, 0)
nrem_class_accuracy_spread = np.insert(nrem_class_accuracy_spread, 0, 0)
rem_class_accuracy_spread = np.insert(rem_class_accuracy_spread, 0, 0)
false_positive_spread = np.array(false_positive_spread)
true_positive_spread = np.array(true_positive_spread)
tps_nrem = np.array(nrem_class_accuracy_spread)
tps_rem = np.array(rem_class_accuracy_spread)
# Plot line for ROC
plt.plot(false_positive_spread, true_positive_spread, label=legend_text, color=plot_color)
plt.plot(false_positive_spread, tps_nrem, color=plot_color, linestyle=':')
plt.plot(false_positive_spread, tps_rem, color=plot_color, linestyle='--')
def make_method_roc(method_key):
"""
Plots ROC curve for all feature sets given classifier
Args:
method_key (str): Key for classifier to plot
"""
start = time.time()
if verbose:
print("Starting method ROC...")
if PRINT_TABLE and run_flag == utilities.RUN_SW:
print('\\begin{table} \caption{' + method_key +
' Summary Statistics} \\begin{tabular}{l*{5}{c}} & Accuracy & Specificity & Sensitivity & $\kappa$ & AUC \\\\ ')
if PRINT_TABLE and run_flag == utilities.RUN_REM:
print('\\begin{table} \caption{' + method_key +
' REM Summary Statistics} \\begin{tabular}{l*{5}{c}} & Wake Correct & NREM Correct & REM Correct & Best accuracy & $\kappa$ \\\\ ')
# Loop over all feature sets
for feature_set_index in range(0, len(feature_sets)):
data_dict = utilities.build_data_dictionary(feature_sets[feature_set_index]) # Loads all data to dict
run_roc(method_key, feature_sets[feature_set_index], data_dict, train_test_dict, cases[feature_set_index],
colors[feature_set_index]) # Plots ROC curve for feature set
end = time.time()
if not PRINT_TABLE:
print('Elapsed time: ' + str(end - start))
if PRINT_TABLE and run_flag == utilities.RUN_SW:
print('\end{tabular} \label{tab:' + method_key[0:4] + 'params} \end{table}')
if PRINT_TABLE and run_flag == utilities.RUN_REM:
print('\end{tabular} \label{tab:' + method_key[0:4] + '_rem_params} \end{table}')
utilities.tidy_plot()
font = font_manager.FontProperties(family='Arial', style='normal', size=14)
if method_key == 'MLP': # Add legend
plt.legend(bbox_to_anchor=(1.0, 0.4), borderaxespad=0., prop=font)
plt.xlabel('False positive rate', fontsize=16, fontname=font_name)
plt.ylabel('True positive rate', fontsize=16, fontname=font_name)
plt.title(method_key, fontsize=18, fontname=font_name, fontweight='bold')
if run_flag == utilities.RUN_REM:
type_string = '_rem_'
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 0.8])
else:
type_string = '_sw_'
plt.savefig(method_key + '_' + str(NUM_REPS_TRAIN_TEST) + description + type_string + '_roc.png')
plt.close()
def run_all(flag, trial_count):
"""
Call to run all classifiers for either sleep/wake or wake/NREM/REM
Args:
flag (int): Type of classification to run (wake/sleep, or wake/NREM/REM)
trial_count(int): How many times to repeat training and testing
"""
global train_test_dict
global run_flag
global NUM_REPS_TRAIN_TEST
global description
run_flag = flag
NUM_REPS_TRAIN_TEST = trial_count
plt.ioff()
description = 'output'
pool = multiprocessing.Pool(processes=8)
# Use a consistent train/test set across classifiers
train_test_dict = utilities.make_train_test_dict(NUM_REPS_TRAIN_TEST)
for method_key in METHOD_DICT.keys():
if not PRINT_TABLE:
print(method_key)
make_method_roc(method_key)
pool.close()
pool.join()
print('\a')
def run_one(method_key, flag, trial_count):
"""
Call to run a single classifier for either sleep/wake or wake/NREM/REM
Args:
method_key (str): Key for classifier to use
flag (int): Type of classification to run (wake/sleep, or wake/NREM/REM)
trial_count(int): How many times to repeat training and testing
"""
global train_test_dict
global run_flag
global NUM_REPS_TRAIN_TEST
global description
run_flag = flag
NUM_REPS_TRAIN_TEST = trial_count
plt.ioff()
description = 'output'
pool = multiprocessing.Pool(processes=8)
# Use a consistent train/test set across classifiers
train_test_dict = utilities.make_train_test_dict(NUM_REPS_TRAIN_TEST, 0.1)
make_method_roc(method_key)
pool.close()
pool.join()
# Debugging: Prints subject performance
def check_subjects():
method_key = 'MLP'
global run_flag
run_flag = utilities.RUN_SW
feature_set = {'Motion': False, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': False}
export_all_subjects(feature_set, method_key)
# For sleep model/Kalman filter, saves classifier probabilities to file
def sleep_model_export():
method_key = 'MLP'
global run_flag
run_flag = utilities.RUN_REM
feature_set = {'Motion': True, 'HR': True, 'Clock': False, 'Time': False, 'CircModel': False}
export_all_subjects(feature_set, method_key)
# For Kalman filter and debugging, train on all subjects but one; save probabilities for tested class:
def export_all_subjects(feature_set, method_key):
data_dict = utilities.build_data_dictionary(feature_set)
train_set = utilities.FULL_SET
for ind in range(0, len(train_set)):
subject_id = train_set[ind]
if ind > 0:
train_set_temp = train_set[0:ind]
train_set_temp = train_set_temp + (train_set[ind + 1:])
else:
train_set_temp = train_set[1:]
train_and_test_model(train_set_temp, [subject_id], method_key,
METHOD_DICT[method_key], feature_set, data_dict, True)
if __name__ == '__main__':
# check_subjects()
sleep_model_export()
|
en
| 0.778967
|
# Load params saved from file # Print LaTeX table for paper # REM Binary search parameters # How close we have to be to the desired goal FP before it can be added to the average # Number of times to try before quitting the binary search # How close we want NREM and REM accuracies to be # Constants for plotting and tables Trains and tests model for given feature set and classifier. Args: training_subjects ([int]): Subject IDs in training set testing_subjects ([int]): Subject IDs in testing set method_key (str): Key for classifier classifier : Classifier object feature_set (dict): Feature set to test data_dict (dict): Dictionary to look up subject training and testing data save_to_file (bool) : Flag if want to save probabilities to file Returns: [int]: ground truth labels np.array : predicted labels np.array : class prediction probabilities # TODO: Faster parameter searching with MLP # Get labels and features for training and testing sets # Convert raw labels to 0/1 or 0-2 # Set class weights for those methods that allow them # Handles wake/NREM/REM case # # Debug-only: Uncomment to reverse the training/testing order, and test Apple Watch data on MESA-trained models # classifier = np.load('trained_models/' + classifier_abbrev + # utilities.string_from_features(feature_set) + '_trained_modelMESA.npy').item() # Fit model to training data, get class predictions and class probabilities # Save trained model to use for testing MESA cohort # Optional; save to file for Kalman filter and print performance metrics Calls training and testing model for ROC; allows parallelization Args: trial_dictionary (dict): All information needed to train and test the model for a classifier/feature set Returns: Performance metrics for the training/testing iteration # Get ground truth, predictions, and class probabilities # If sleep/wake classification # If wake/NREM/REM classification Make an "ROC curve for NREM/REM/wake classification" by looping over desired false positive rates and performing two binary searches: one for a wake threshold, and one to balance the accuracies of the REM and NREM classes Args: true_labels (np.array): Ground truth labels for tested epochs class_probabilities (np.array): Class probabilities for tested epochs Returns: false positive rates, average NREM/REM accuracies, individual REM/NREM accuracies, best accuracies found during the search, and kappas at best accuracies # Spread of targeted goal false positive rates # Holders for performance metrics # Indices where ground truth is wake # Indices of ground truth NREM # Indices of ground truth REM # Get coverage over entire x-axis of ROC curve by repeating binary searches over a spread # Search while we haven't found the target false positive rate # Start binary search conditions # Update threshold based on difference between goal and actual false positive rate # Edge cases # Set locations of predicted sleep to 1 # FPR: 1 - Wake scored as wake, a.k.a 1 - (Total true wake - true wake scored as sleep)/(Total true wake) # # Uncomment for debugging: # print('Goal FP = ' + str(goal_false_positive_rate) + ' Thresh: ' + str(threshold_for_sleep) + ', # Delta: ' + str(threshold_delta) + ', False positive rate: ' + str(false_positive_rate) + ', # Count: ' + str(binary_search_counter)) # Checks we found our target false positive rate # Initial values for binary search # Difference between NREM and REM accuracies # Initial values for second threshold binary search # Set to REM sleep # Set to NREM sleep # Compute accuracy and kappa at this threshold during the search # Save if we've exceeded best accuracy # Compute NREM/REM accuracies -- number of true class epochs scored as class, divided by number in class # Add found values to holders Plots ROC curve for specified feature set and classifier Args: method_key (str): Key for classifier getting used feature_set (dict): Features to pass to classifier data_dict (dict): Contains all the subject data for classifiaction train_test_dict (dict): Contains training/testing subject splits for all trials legend_text (str): Label for legend plot_color (RGBA): color to plot # Classifier to test # Pre-builds dictionary to pass for training/testing # TODO: Figure out why parallelization is causing problems # if run_flag == utilities.RUN_SW: # output = pool.map(parallel_roc,params) # Create false positive rate range to interpolate results over # Average the results of all trials # Interpolation points for tables in paper # Adds up performance across all true positive thresholds, to average over trials # Insert (0,0) point for plotting curves # Plot line for ROC # Get performance for trial # Interpolate to match the desired spread # Add to cumulative totals for each value # Divide by number of trials to get average # For tables, interpolate to find threshold where desired false positive rate is met # Insert(0,0) point for ROC curve # Plot line for ROC Plots ROC curve for all feature sets given classifier Args: method_key (str): Key for classifier to plot # Loop over all feature sets # Loads all data to dict # Plots ROC curve for feature set # Add legend Call to run all classifiers for either sleep/wake or wake/NREM/REM Args: flag (int): Type of classification to run (wake/sleep, or wake/NREM/REM) trial_count(int): How many times to repeat training and testing # Use a consistent train/test set across classifiers Call to run a single classifier for either sleep/wake or wake/NREM/REM Args: method_key (str): Key for classifier to use flag (int): Type of classification to run (wake/sleep, or wake/NREM/REM) trial_count(int): How many times to repeat training and testing # Use a consistent train/test set across classifiers # Debugging: Prints subject performance # For sleep model/Kalman filter, saves classifier probabilities to file # For Kalman filter and debugging, train on all subjects but one; save probabilities for tested class: # check_subjects()
| 2.252253
| 2
|
eggshell/nc/nc_fetch.py
|
Zeitsperre/eggshell
| 0
|
6626003
|
from eggshell import utils
from eggshell import config
from datetime import datetime as dt
from datetime import timedelta
# import logging
# logger = logging.getLogger(__name__)
import logging
LOGGER = logging.getLogger("PYWPS")
_PRESSUREDATA_ = [
'NCEP_slp', 'NCEP_z1000', 'NCEP_z925', 'NCEP_z850', 'NCEP_z700', 'NCEP_z600', 'NCEP_z500', 'NCEP_z400', 'NCEP_z300',
'NCEP_z250', 'NCEP_z200', 'NCEP_z150', 'NCEP_z100', 'NCEP_z70', 'NCEP_z50', 'NCEP_z30', 'NCEP_z20', 'NCEP_z10',
'20CRV2_prmsl',
'20CRV2_z1000', '20CRV2_z950', '20CRV2_z900', '20CRV2_z850', '20CRV2_z800', '20CRV2_z750', '20CRV2_z700',
'20CRV2_z650', '20CRV2_z600', '20CRV2_z550', '20CRV2_z500', '20CRV2_z450', '20CRV2_z400', '20CRV2_z350',
'20CRV2_z300', '20CRV2_z250', '20CRV2_z200', '20CRV2_z150', '20CRV2_z100', '20CRV2_z70', '20CRV2_z50',
'20CRV2_z30', '20CRV2_z20', '20CRV2_z10',
'20CRV2c_prmsl',
'20CRV2c_z1000', '20CRV2c_z950', '20CRV2c_z900', '20CRV2c_z850', '20CRV2c_z800', '20CRV2c_z750', '20CRV2c_z700',
'20CRV2c_z650', '20CRV2c_z600', '20CRV2c_z550', '20CRV2c_z500', '20CRV2c_z450', '20CRV2c_z400', '20CRV2c_z350',
'20CRV2c_z300', '20CRV2c_z250', '20CRV2c_z200', '20CRV2c_z150', '20CRV2c_z100', '20CRV2c_z70', '20CRV2c_z50',
'20CRV2c_z30', '20CRV2c_z20', '20CRV2c_z10',
]
_EOBSVARIABLES_ = ['tg', 'tx', 'tn', 'rr']
def reanalyses(start=1948, end=None, variable='slp', dataset='NCEP', timres='day', getlevel=True):
"""
Fetches the reanalysis data (NCEP, 20CR or ERA_20C) to local file system
:param start: int for start year to fetch source data
:param end: int for end year to fetch source data (if None, current year will be the end)
:param variable: variable name (default='slp'), geopotential height is given as e.g. z700
:param dataset: default='NCEP'
:return list: list of path/files.nc
"""
# used for NETCDF convertion
from netCDF4 import Dataset
from os import path, system, remove
from eggshell.nc.ogc_utils import call
from shutil import move
# used for NETCDF convertion
try:
if end is None:
end = dt.now().year
obs_data = []
if start is None:
if dataset == 'NCEP':
start = 1948
if dataset == '20CR':
start = 1851
LOGGER.info('start / end date set')
except Exception as ex:
msg = "get_OBS module failed to get start end dates: {}".format(ex)
LOGGER.exception(msg)
raise Exception(msg)
if 'z' in variable:
level = variable.strip('z')
else:
level = None
LOGGER.info('level: %s' % level)
cur_year = dt.now().year
cur_month = dt.now().month
cur_day = dt.now().day
try:
for year in range(start, end + 1):
LOGGER.debug('fetching single file for %s year %s ' % (dataset, year))
try:
if dataset == 'NCEP':
if variable == 'slp':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.dailyavgs/surface/%s.%s.nc' % (variable, year) # noqa
if variable == 'pr_wtr':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.dailyavgs/surface/pr_wtr.eatm.%s.nc' % (year) # noqa
if 'z' in variable:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.dailyavgs/pressure/hgt.%s.nc' % (year) # noqa
elif dataset == '20CRV2':
if variable == 'prmsl':
if timres == '6h':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2/monolevel/prmsl.%s.nc' % year # noqa
else:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2/Dailies/monolevel/prmsl.%s.nc' % year # noqa
if 'z' in variable:
if timres == '6h':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2/pressure/hgt.%s.nc' % (year) # noqa
else:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2/Dailies/pressure/hgt.%s.nc' % (year) # noqa
elif dataset == '20CRV2c':
if variable == 'prmsl':
if timres == '6h':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2c/monolevel/prmsl.%s.nc' % year # noqa
else:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2c/Dailies/monolevel/prmsl.%s.nc' % year # noqa
if 'z' in variable:
if timres == '6h':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2c/pressure/hgt.%s.nc' % (year) # noqa
else:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2c/Dailies/pressure/hgt.%s.nc' % (year) # noqa
else:
LOGGER.debug('Dataset %s not known' % dataset)
LOGGER.debug('url: %s' % url)
except Exception as ex:
msg = "could not set url: {}".format(ex)
LOGGER.exception(msg)
try:
# force updating of the current year dataset
if year == cur_year:
import urlparse
from blackswan import config
parsed_url = urlparse.urlparse(url)
cur_filename = path.join(config.cache_path(), parsed_url.netloc, parsed_url.path.strip('/'))
if path.exists(cur_filename):
fn_time = dt.fromtimestamp(path.getmtime(cur_filename))
LOGGER.debug('Rean data for %s year creation time: %s' % (year, fn_time))
if (fn_time.year == cur_year) and (fn_time.month == cur_month) and (fn_time.day == cur_day):
LOGGER.debug('Rean data for %s year is up-to-date' % year)
else:
LOGGER.debug('Rean data for %s year forced to update' % year)
remove(cur_filename)
# ###########################################
df = download(url, cache=True)
LOGGER.debug('single file fetched %s ' % year)
# convert to NETCDF4_CLASSIC
try:
ds = Dataset(df)
df_time = ds.variables['time']
# Here, need to check not just calendar, but that file is ncdf_classic already...
if (hasattr(df_time, 'calendar')) is False:
p, f = path.split(path.abspath(df))
LOGGER.debug("path = %s , file %s " % (p, f))
# May be an issue if several users are working at the same time
move(df, f)
conv = call(resource=f,
output_format_options={'data_model': 'NETCDF4_CLASSIC'},
dir_output=p,
prefix=f.replace('.nc', ''))
obs_data.append(conv)
LOGGER.debug('file %s to NETCDF4_CLASSIC converted' % conv)
# Cleaning, could be 50gb... for each (!) user
# TODO Check how links work
cmdrm = 'rm -f %s' % (f)
system(cmdrm)
else:
obs_data.append(df)
ds.close()
except Exception as ex:
LOGGER.exception('failed to convert into NETCDF4_CLASSIC: {}'.format(ex))
except Exception as ex:
msg = "download failed on {}: {}".format(url, ex)
LOGGER.exception(msg)
LOGGER.info('Reanalyses data fetched for %s files' % len(obs_data))
except Exception as ex:
msg = "get reanalyses module failed to fetch data: {}".format(ex)
LOGGER.exception(msg)
raise Exception(msg)
if (level is None) or (getlevel==False):
data = obs_data
else:
LOGGER.info('get level: %s' % level)
data = get_level(obs_data, level=level)
return data
def get_level(resource, level):
from flyingpigeon.ocgis_module import call
from netCDF4 import Dataset
from flyingpigeon.utils import get_variable
from numpy import squeeze
from os import path
try:
if type(resource) == list:
resource = sorted(resource, key=lambda i: path.splitext(path.basename(i))[0])
# resource.sort()
level_data = call(resource, level_range=[int(level), int(level)])
variable = get_variable(level_data)
LOGGER.info('found %s in file' % variable)
ds = Dataset(level_data, mode='a')
var = ds.variables.pop(variable)
dims = var.dimensions
new_var = ds.createVariable('z%s' % level, var.dtype, dimensions=(dims[0], dims[2], dims[3]))
# i = where(var[:]==level)
new_var[:, :, :] = squeeze(var[:, 0, :, :])
# TODO: Here may be an error! in case of exception, dataset will not close!
# Exception arise for example for 20CRV2 data...
try:
new_var.setncatts({k: var.getncattr(k) for k in var.ncattrs()})
except:
LOGGER.info('Could not set attributes for z%s' % level)
ds.close()
LOGGER.info('level %s extracted' % level)
data = call(level_data, variable='z%s' % level)
except:
LOGGER.exception('failed to extract level')
return data
def write_fileinfo(resource, filepath=False):
"""
write path and filenames to a text file
:param ressource: list of files to be documented
:param filepath: if True the absolute filepath is written out as well (default = False)
:return txt: textfile with appropriate information"""
from os.path import basename, realpath
from tempfile import mkstemp
_, text_src = mkstemp(dir='.', suffix='.txt')
try:
with open(text_src, 'w') as fp:
fp.write('###############################################\n')
fp.write('####### birdhouse process ######\n')
fp.write('###############################################\n')
if filepath is False:
fp.write('Following is a list of resource files: \n')
fp.write('\n')
for f in resource:
fp.write('%s \n' % basename(f))
else:
fp.write('Following files are stored to your local discs: \n')
fp.write('\n')
for f in resource:
fp.write('%s \n' % realpath(f))
LOGGER.info('resources filenames written to textfile')
except:
LOGGER.exception('failed to write file names to file')
return text_src
# _EODATA_ = ["PSScene3Band__visual",
# "PSScene4Band__analytic",
# "PSScene4Band__analytic_xml",
# "Sentinel2L1C__metadata_aux",
# "Sentinel2L1C__analytic_b1",
# "Sentinel2L1C__analytic_b2", # blue
# "Sentinel2L1C__analytic_b3", # green
# "Sentinel2L1C__analytic_b4", # red
# "Sentinel2L1C__analytic_b8", # nivr
# ]
# PSScene3Band PlanetScope Scenes
# PSScene4Band PlanetScope Scenes
# PSOrthoTile PlanetScope OrthoTiles
# REOrthoTile RapidEye OrthoTiles
# REScene RapidEye Scenes (unorthorectified strips)
# SkySatScene SkySat Scenes
# Landsat8L1G Landsat8 Scenes
# Sentinel2L1C Copernicus Sentinel-2 Scenes
# "_permissions": [
# "assets.analytic_b1:download",
# "assets.analytic_b3:download",
# "assets.analytic_b2:download",
# "assets.analytic_b5:download",
# "assets.analytic_b4:download",
# "assets.analytic_b7:download",
# "assets.analytic_b6:download",
# "assets.analytic_b9:download",
# "assets.analytic_b8:download",
# "assets.analytic_b8a:download",
# "assets.visual:download",
# "assets.metadata_aux:download",
# "assets.analytic_b10:download",
# "assets.analytic_b11:download",
# "assets.analytic_b12:download"
# ],
#
# def fetch_eodata(item_type, asset, token, bbox, period=[dt.today()-timedelta(days=30), dt.today()], cloud_cover=0.5, cache=True):
# """
# search for given EO data product provided by planet.
# The search and appropriate download is limited by bbox and search period
#
# :param item_type: product provided by planet
# :param asset: product asset, (visible, analytic, bands)
# :param token: Authentification token generated by planet Earth Obersavation Explorer
# :param bbox: latitude longitude coordinates defining a bounding box
# :param period: [start , end] datetime objects (default last 30 days)
# :param cloud_cover: threshold for cloud_cover tolerance. 0 = 0percent cloud_cover 1=100percent cloud_cover
# :param cache: if True file (default) is stored in local cache
#
# return list: list of pathes for fetched products
# """
#
# import os
# import requests
# from requests.auth import HTTPBasicAuth
# import shutil
# import time
# from os.path import join
# from os import makedirs
# from flyingpigeon.config import cache_path
# # Enter a bbox: min_lon, max_lon, min_lat, max_lat.
# # xmin ymin xmax ymax
# geojson_geometry = {"type": "Polygon",
# "coordinates": [[
# [bbox[0], bbox[1]], # [14.600830078125, 8.677421123289992],
# [bbox[2], bbox[1]], # [14.797210693359375, 8.677421123289992],
# [bbox[2], bbox[3]], # [14.797210693359375, 8.90678000752024],
# [bbox[0], bbox[3]], # [14.600830078125, 8.90678000752024],
# [bbox[0], bbox[1]], # [14.600830078125, 8.677421123289992]
# ]]}
#
# LOGGER.debug("geojson_geometry: %s" % geojson_geometry)
# # get images that overlap with our AOI
# geometry_filter = {
# "type": "GeometryFilter",
# "field_name": "geometry",
# "config": geojson_geometry
# }
#
# start = period[0]
# end = period[1]
#
#
# LOGGER.debug("Period %s to %s " % (start, end))
#
# # get images acquired within a date range
# date_range_filter = {
# "type": "DateRangeFilter",
# "field_name": "acquired",
# "config": {
# "gte": "%s000Z" % (start.strftime('%Y-%m-%dT%H:%M:%S.')),
# "lte": "%s000Z" % (end.strftime('%Y-%m-%dT%H:%M:%S.')),
# }
# }
#
# # only get images which have <50% cloud coverage
# cloud_cover_filter = {
# "type": "RangeFilter",
# "field_name": "cloud_cover",
# "config": {
# "lte": cloud_cover
# }
# }
#
# # combine our geo, date, cloud filters
# combined_filter = {"type": "AndFilter",
# "config": [geometry_filter, date_range_filter, cloud_cover_filter]}
#
# # API Key
# PLANET_API_KEY = token # os.getenv('PL_API_KEY')
#
# # item_type = item_type, assetproducts[0] # "PSScene4Band"
# # API request object
#
# search_request = {
# "interval": "day",
# "item_types": [item_type],
# "filter": combined_filter
# }
#
# if cache:
# DIR_archiv = cache_path()
# else:
# DIR_archiv = '.'
# DIR = join(DIR_archiv, "EO_data", item_type, asset)
#
# if not os.path.exists(DIR):
# makedirs(DIR)
#
# # fire off the POST request
# search_result = requests.post(
# 'https://api.planet.com/data/v1/quick-search',
# auth=HTTPBasicAuth(PLANET_API_KEY, ''),
# json=search_request)
#
# # LOGGER.info('Search result: %s ' % json.dumps(search_result.json(), indent=1))
#
# # extract image IDs only
# image_ids = [feature['id'] for feature in search_result.json()['features']]
# LOGGER.info("image IDs: %s " % image_ids)
#
# resources = []
# resources_sleeping = []
#
# for image_id in image_ids:
#
# id0 = image_id
# if "xml" in asset:
# filename = "%s.xml" % id0
# else:
# filename = "%s.tif" % id0
#
# local_file = join(DIR, filename) # mkstemp(dir="/home/nils/data/planet/", prefix=id0, suffix='.tif')
#
# if os.path.exists(local_file):
# LOGGER.info('File %s in cache' % filename)
# resources.extend([local_file])
# else:
# id0_url = 'https://api.planet.com/data/v1/item-types/{}/items/{}/assets'.format(item_type, id0)
#
# # Returns JSON metadata for assets in this ID. Learn more: planet.com/docs/reference/data-api/items-assets/#asset
# result = requests.get(id0_url, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
# # List of asset types available for this particular satellite image
# keys = result.json().keys()
# LOGGER.debug("assets in file %s : %s " % (filename, keys))
# # This is "inactive" if the "visual" asset has not yet been activated; otherwise 'active'
# # if 'analytic' in result.json().keys():
# if asset in keys:
# LOGGER.debug("downloading file %s" % filename)
# # LOGGER.debug(result.json()[asset]['status'])
# # Parse out useful links
# links = result.json()[asset]["_links"] # u"analytic"
# self_link = links["_self"]
# activation_link = links["activate"]
# # Request activation of the 'visual' asset:
# activate_result = requests.get(activation_link, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
# # Parse out useful links
# links = result.json()[asset]["_links"] # u"analytic"
# self_link = links["_self"]
# activation_link = links["activate"]
# # Request activation of the 'visual' asset:
# activate_result = requests.get(activation_link, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
# activation_status_result = requests.get(self_link, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
#
# try:
# timeout = time.time() + 30 # 30 seconds from now
# while activation_status_result.json()["status"] != 'active':
# if time.time() > timeout and activation_status_result.json()["status"] == 'inactive':
# LOGGER.debug("File %s is still inactive after 30sec. Giving up" % filename)
# resources_sleeping.extend([filename])
# break
# else:
# LOGGER.debug('File %s is sleeping. gently waking up' % filename)
# LOGGER.debug(activation_status_result.json()["status"])
# time.sleep(30)
# activation_status_result = requests.get(self_link, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
#
# if time.time() < timeout or activation_status_result.json()["status"] == 'active':
# LOGGER.debug('File ready to download: %s' % (activation_status_result.json()["status"]))
# # Image can be downloaded by making a GET with your Planet API key, from here:
# download_link = activation_status_result.json()["location"]
# r = requests.get(download_link, stream=True, verify=False)
# with open(local_file, 'wb') as fp:
# shutil.copyfileobj(r.raw, fp)
# resources.extend([local_file])
# except:
# LOGGER.exception("failed to download file %s " % filename)
# else:
# LOGGER.debug('Asset not found in keys, most likely no permissions for this data set %s ' % filename)
#
# return resources_sleeping, resources
|
from eggshell import utils
from eggshell import config
from datetime import datetime as dt
from datetime import timedelta
# import logging
# logger = logging.getLogger(__name__)
import logging
LOGGER = logging.getLogger("PYWPS")
_PRESSUREDATA_ = [
'NCEP_slp', 'NCEP_z1000', 'NCEP_z925', 'NCEP_z850', 'NCEP_z700', 'NCEP_z600', 'NCEP_z500', 'NCEP_z400', 'NCEP_z300',
'NCEP_z250', 'NCEP_z200', 'NCEP_z150', 'NCEP_z100', 'NCEP_z70', 'NCEP_z50', 'NCEP_z30', 'NCEP_z20', 'NCEP_z10',
'20CRV2_prmsl',
'20CRV2_z1000', '20CRV2_z950', '20CRV2_z900', '20CRV2_z850', '20CRV2_z800', '20CRV2_z750', '20CRV2_z700',
'20CRV2_z650', '20CRV2_z600', '20CRV2_z550', '20CRV2_z500', '20CRV2_z450', '20CRV2_z400', '20CRV2_z350',
'20CRV2_z300', '20CRV2_z250', '20CRV2_z200', '20CRV2_z150', '20CRV2_z100', '20CRV2_z70', '20CRV2_z50',
'20CRV2_z30', '20CRV2_z20', '20CRV2_z10',
'20CRV2c_prmsl',
'20CRV2c_z1000', '20CRV2c_z950', '20CRV2c_z900', '20CRV2c_z850', '20CRV2c_z800', '20CRV2c_z750', '20CRV2c_z700',
'20CRV2c_z650', '20CRV2c_z600', '20CRV2c_z550', '20CRV2c_z500', '20CRV2c_z450', '20CRV2c_z400', '20CRV2c_z350',
'20CRV2c_z300', '20CRV2c_z250', '20CRV2c_z200', '20CRV2c_z150', '20CRV2c_z100', '20CRV2c_z70', '20CRV2c_z50',
'20CRV2c_z30', '20CRV2c_z20', '20CRV2c_z10',
]
_EOBSVARIABLES_ = ['tg', 'tx', 'tn', 'rr']
def reanalyses(start=1948, end=None, variable='slp', dataset='NCEP', timres='day', getlevel=True):
"""
Fetches the reanalysis data (NCEP, 20CR or ERA_20C) to local file system
:param start: int for start year to fetch source data
:param end: int for end year to fetch source data (if None, current year will be the end)
:param variable: variable name (default='slp'), geopotential height is given as e.g. z700
:param dataset: default='NCEP'
:return list: list of path/files.nc
"""
# used for NETCDF convertion
from netCDF4 import Dataset
from os import path, system, remove
from eggshell.nc.ogc_utils import call
from shutil import move
# used for NETCDF convertion
try:
if end is None:
end = dt.now().year
obs_data = []
if start is None:
if dataset == 'NCEP':
start = 1948
if dataset == '20CR':
start = 1851
LOGGER.info('start / end date set')
except Exception as ex:
msg = "get_OBS module failed to get start end dates: {}".format(ex)
LOGGER.exception(msg)
raise Exception(msg)
if 'z' in variable:
level = variable.strip('z')
else:
level = None
LOGGER.info('level: %s' % level)
cur_year = dt.now().year
cur_month = dt.now().month
cur_day = dt.now().day
try:
for year in range(start, end + 1):
LOGGER.debug('fetching single file for %s year %s ' % (dataset, year))
try:
if dataset == 'NCEP':
if variable == 'slp':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.dailyavgs/surface/%s.%s.nc' % (variable, year) # noqa
if variable == 'pr_wtr':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.dailyavgs/surface/pr_wtr.eatm.%s.nc' % (year) # noqa
if 'z' in variable:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.dailyavgs/pressure/hgt.%s.nc' % (year) # noqa
elif dataset == '20CRV2':
if variable == 'prmsl':
if timres == '6h':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2/monolevel/prmsl.%s.nc' % year # noqa
else:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2/Dailies/monolevel/prmsl.%s.nc' % year # noqa
if 'z' in variable:
if timres == '6h':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2/pressure/hgt.%s.nc' % (year) # noqa
else:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2/Dailies/pressure/hgt.%s.nc' % (year) # noqa
elif dataset == '20CRV2c':
if variable == 'prmsl':
if timres == '6h':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2c/monolevel/prmsl.%s.nc' % year # noqa
else:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2c/Dailies/monolevel/prmsl.%s.nc' % year # noqa
if 'z' in variable:
if timres == '6h':
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2c/pressure/hgt.%s.nc' % (year) # noqa
else:
url = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/20thC_ReanV2c/Dailies/pressure/hgt.%s.nc' % (year) # noqa
else:
LOGGER.debug('Dataset %s not known' % dataset)
LOGGER.debug('url: %s' % url)
except Exception as ex:
msg = "could not set url: {}".format(ex)
LOGGER.exception(msg)
try:
# force updating of the current year dataset
if year == cur_year:
import urlparse
from blackswan import config
parsed_url = urlparse.urlparse(url)
cur_filename = path.join(config.cache_path(), parsed_url.netloc, parsed_url.path.strip('/'))
if path.exists(cur_filename):
fn_time = dt.fromtimestamp(path.getmtime(cur_filename))
LOGGER.debug('Rean data for %s year creation time: %s' % (year, fn_time))
if (fn_time.year == cur_year) and (fn_time.month == cur_month) and (fn_time.day == cur_day):
LOGGER.debug('Rean data for %s year is up-to-date' % year)
else:
LOGGER.debug('Rean data for %s year forced to update' % year)
remove(cur_filename)
# ###########################################
df = download(url, cache=True)
LOGGER.debug('single file fetched %s ' % year)
# convert to NETCDF4_CLASSIC
try:
ds = Dataset(df)
df_time = ds.variables['time']
# Here, need to check not just calendar, but that file is ncdf_classic already...
if (hasattr(df_time, 'calendar')) is False:
p, f = path.split(path.abspath(df))
LOGGER.debug("path = %s , file %s " % (p, f))
# May be an issue if several users are working at the same time
move(df, f)
conv = call(resource=f,
output_format_options={'data_model': 'NETCDF4_CLASSIC'},
dir_output=p,
prefix=f.replace('.nc', ''))
obs_data.append(conv)
LOGGER.debug('file %s to NETCDF4_CLASSIC converted' % conv)
# Cleaning, could be 50gb... for each (!) user
# TODO Check how links work
cmdrm = 'rm -f %s' % (f)
system(cmdrm)
else:
obs_data.append(df)
ds.close()
except Exception as ex:
LOGGER.exception('failed to convert into NETCDF4_CLASSIC: {}'.format(ex))
except Exception as ex:
msg = "download failed on {}: {}".format(url, ex)
LOGGER.exception(msg)
LOGGER.info('Reanalyses data fetched for %s files' % len(obs_data))
except Exception as ex:
msg = "get reanalyses module failed to fetch data: {}".format(ex)
LOGGER.exception(msg)
raise Exception(msg)
if (level is None) or (getlevel==False):
data = obs_data
else:
LOGGER.info('get level: %s' % level)
data = get_level(obs_data, level=level)
return data
def get_level(resource, level):
from flyingpigeon.ocgis_module import call
from netCDF4 import Dataset
from flyingpigeon.utils import get_variable
from numpy import squeeze
from os import path
try:
if type(resource) == list:
resource = sorted(resource, key=lambda i: path.splitext(path.basename(i))[0])
# resource.sort()
level_data = call(resource, level_range=[int(level), int(level)])
variable = get_variable(level_data)
LOGGER.info('found %s in file' % variable)
ds = Dataset(level_data, mode='a')
var = ds.variables.pop(variable)
dims = var.dimensions
new_var = ds.createVariable('z%s' % level, var.dtype, dimensions=(dims[0], dims[2], dims[3]))
# i = where(var[:]==level)
new_var[:, :, :] = squeeze(var[:, 0, :, :])
# TODO: Here may be an error! in case of exception, dataset will not close!
# Exception arise for example for 20CRV2 data...
try:
new_var.setncatts({k: var.getncattr(k) for k in var.ncattrs()})
except:
LOGGER.info('Could not set attributes for z%s' % level)
ds.close()
LOGGER.info('level %s extracted' % level)
data = call(level_data, variable='z%s' % level)
except:
LOGGER.exception('failed to extract level')
return data
def write_fileinfo(resource, filepath=False):
"""
write path and filenames to a text file
:param ressource: list of files to be documented
:param filepath: if True the absolute filepath is written out as well (default = False)
:return txt: textfile with appropriate information"""
from os.path import basename, realpath
from tempfile import mkstemp
_, text_src = mkstemp(dir='.', suffix='.txt')
try:
with open(text_src, 'w') as fp:
fp.write('###############################################\n')
fp.write('####### birdhouse process ######\n')
fp.write('###############################################\n')
if filepath is False:
fp.write('Following is a list of resource files: \n')
fp.write('\n')
for f in resource:
fp.write('%s \n' % basename(f))
else:
fp.write('Following files are stored to your local discs: \n')
fp.write('\n')
for f in resource:
fp.write('%s \n' % realpath(f))
LOGGER.info('resources filenames written to textfile')
except:
LOGGER.exception('failed to write file names to file')
return text_src
# _EODATA_ = ["PSScene3Band__visual",
# "PSScene4Band__analytic",
# "PSScene4Band__analytic_xml",
# "Sentinel2L1C__metadata_aux",
# "Sentinel2L1C__analytic_b1",
# "Sentinel2L1C__analytic_b2", # blue
# "Sentinel2L1C__analytic_b3", # green
# "Sentinel2L1C__analytic_b4", # red
# "Sentinel2L1C__analytic_b8", # nivr
# ]
# PSScene3Band PlanetScope Scenes
# PSScene4Band PlanetScope Scenes
# PSOrthoTile PlanetScope OrthoTiles
# REOrthoTile RapidEye OrthoTiles
# REScene RapidEye Scenes (unorthorectified strips)
# SkySatScene SkySat Scenes
# Landsat8L1G Landsat8 Scenes
# Sentinel2L1C Copernicus Sentinel-2 Scenes
# "_permissions": [
# "assets.analytic_b1:download",
# "assets.analytic_b3:download",
# "assets.analytic_b2:download",
# "assets.analytic_b5:download",
# "assets.analytic_b4:download",
# "assets.analytic_b7:download",
# "assets.analytic_b6:download",
# "assets.analytic_b9:download",
# "assets.analytic_b8:download",
# "assets.analytic_b8a:download",
# "assets.visual:download",
# "assets.metadata_aux:download",
# "assets.analytic_b10:download",
# "assets.analytic_b11:download",
# "assets.analytic_b12:download"
# ],
#
# def fetch_eodata(item_type, asset, token, bbox, period=[dt.today()-timedelta(days=30), dt.today()], cloud_cover=0.5, cache=True):
# """
# search for given EO data product provided by planet.
# The search and appropriate download is limited by bbox and search period
#
# :param item_type: product provided by planet
# :param asset: product asset, (visible, analytic, bands)
# :param token: Authentification token generated by planet Earth Obersavation Explorer
# :param bbox: latitude longitude coordinates defining a bounding box
# :param period: [start , end] datetime objects (default last 30 days)
# :param cloud_cover: threshold for cloud_cover tolerance. 0 = 0percent cloud_cover 1=100percent cloud_cover
# :param cache: if True file (default) is stored in local cache
#
# return list: list of pathes for fetched products
# """
#
# import os
# import requests
# from requests.auth import HTTPBasicAuth
# import shutil
# import time
# from os.path import join
# from os import makedirs
# from flyingpigeon.config import cache_path
# # Enter a bbox: min_lon, max_lon, min_lat, max_lat.
# # xmin ymin xmax ymax
# geojson_geometry = {"type": "Polygon",
# "coordinates": [[
# [bbox[0], bbox[1]], # [14.600830078125, 8.677421123289992],
# [bbox[2], bbox[1]], # [14.797210693359375, 8.677421123289992],
# [bbox[2], bbox[3]], # [14.797210693359375, 8.90678000752024],
# [bbox[0], bbox[3]], # [14.600830078125, 8.90678000752024],
# [bbox[0], bbox[1]], # [14.600830078125, 8.677421123289992]
# ]]}
#
# LOGGER.debug("geojson_geometry: %s" % geojson_geometry)
# # get images that overlap with our AOI
# geometry_filter = {
# "type": "GeometryFilter",
# "field_name": "geometry",
# "config": geojson_geometry
# }
#
# start = period[0]
# end = period[1]
#
#
# LOGGER.debug("Period %s to %s " % (start, end))
#
# # get images acquired within a date range
# date_range_filter = {
# "type": "DateRangeFilter",
# "field_name": "acquired",
# "config": {
# "gte": "%s000Z" % (start.strftime('%Y-%m-%dT%H:%M:%S.')),
# "lte": "%s000Z" % (end.strftime('%Y-%m-%dT%H:%M:%S.')),
# }
# }
#
# # only get images which have <50% cloud coverage
# cloud_cover_filter = {
# "type": "RangeFilter",
# "field_name": "cloud_cover",
# "config": {
# "lte": cloud_cover
# }
# }
#
# # combine our geo, date, cloud filters
# combined_filter = {"type": "AndFilter",
# "config": [geometry_filter, date_range_filter, cloud_cover_filter]}
#
# # API Key
# PLANET_API_KEY = token # os.getenv('PL_API_KEY')
#
# # item_type = item_type, assetproducts[0] # "PSScene4Band"
# # API request object
#
# search_request = {
# "interval": "day",
# "item_types": [item_type],
# "filter": combined_filter
# }
#
# if cache:
# DIR_archiv = cache_path()
# else:
# DIR_archiv = '.'
# DIR = join(DIR_archiv, "EO_data", item_type, asset)
#
# if not os.path.exists(DIR):
# makedirs(DIR)
#
# # fire off the POST request
# search_result = requests.post(
# 'https://api.planet.com/data/v1/quick-search',
# auth=HTTPBasicAuth(PLANET_API_KEY, ''),
# json=search_request)
#
# # LOGGER.info('Search result: %s ' % json.dumps(search_result.json(), indent=1))
#
# # extract image IDs only
# image_ids = [feature['id'] for feature in search_result.json()['features']]
# LOGGER.info("image IDs: %s " % image_ids)
#
# resources = []
# resources_sleeping = []
#
# for image_id in image_ids:
#
# id0 = image_id
# if "xml" in asset:
# filename = "%s.xml" % id0
# else:
# filename = "%s.tif" % id0
#
# local_file = join(DIR, filename) # mkstemp(dir="/home/nils/data/planet/", prefix=id0, suffix='.tif')
#
# if os.path.exists(local_file):
# LOGGER.info('File %s in cache' % filename)
# resources.extend([local_file])
# else:
# id0_url = 'https://api.planet.com/data/v1/item-types/{}/items/{}/assets'.format(item_type, id0)
#
# # Returns JSON metadata for assets in this ID. Learn more: planet.com/docs/reference/data-api/items-assets/#asset
# result = requests.get(id0_url, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
# # List of asset types available for this particular satellite image
# keys = result.json().keys()
# LOGGER.debug("assets in file %s : %s " % (filename, keys))
# # This is "inactive" if the "visual" asset has not yet been activated; otherwise 'active'
# # if 'analytic' in result.json().keys():
# if asset in keys:
# LOGGER.debug("downloading file %s" % filename)
# # LOGGER.debug(result.json()[asset]['status'])
# # Parse out useful links
# links = result.json()[asset]["_links"] # u"analytic"
# self_link = links["_self"]
# activation_link = links["activate"]
# # Request activation of the 'visual' asset:
# activate_result = requests.get(activation_link, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
# # Parse out useful links
# links = result.json()[asset]["_links"] # u"analytic"
# self_link = links["_self"]
# activation_link = links["activate"]
# # Request activation of the 'visual' asset:
# activate_result = requests.get(activation_link, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
# activation_status_result = requests.get(self_link, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
#
# try:
# timeout = time.time() + 30 # 30 seconds from now
# while activation_status_result.json()["status"] != 'active':
# if time.time() > timeout and activation_status_result.json()["status"] == 'inactive':
# LOGGER.debug("File %s is still inactive after 30sec. Giving up" % filename)
# resources_sleeping.extend([filename])
# break
# else:
# LOGGER.debug('File %s is sleeping. gently waking up' % filename)
# LOGGER.debug(activation_status_result.json()["status"])
# time.sleep(30)
# activation_status_result = requests.get(self_link, auth=HTTPBasicAuth(PLANET_API_KEY, ''))
#
# if time.time() < timeout or activation_status_result.json()["status"] == 'active':
# LOGGER.debug('File ready to download: %s' % (activation_status_result.json()["status"]))
# # Image can be downloaded by making a GET with your Planet API key, from here:
# download_link = activation_status_result.json()["location"]
# r = requests.get(download_link, stream=True, verify=False)
# with open(local_file, 'wb') as fp:
# shutil.copyfileobj(r.raw, fp)
# resources.extend([local_file])
# except:
# LOGGER.exception("failed to download file %s " % filename)
# else:
# LOGGER.debug('Asset not found in keys, most likely no permissions for this data set %s ' % filename)
#
# return resources_sleeping, resources
|
en
| 0.54999
|
# import logging # logger = logging.getLogger(__name__) Fetches the reanalysis data (NCEP, 20CR or ERA_20C) to local file system :param start: int for start year to fetch source data :param end: int for end year to fetch source data (if None, current year will be the end) :param variable: variable name (default='slp'), geopotential height is given as e.g. z700 :param dataset: default='NCEP' :return list: list of path/files.nc # used for NETCDF convertion # used for NETCDF convertion # noqa # noqa # noqa # noqa # noqa # noqa # noqa # noqa # noqa # noqa # noqa # force updating of the current year dataset # ########################################### # convert to NETCDF4_CLASSIC # Here, need to check not just calendar, but that file is ncdf_classic already... # May be an issue if several users are working at the same time # Cleaning, could be 50gb... for each (!) user # TODO Check how links work # resource.sort() # i = where(var[:]==level) # TODO: Here may be an error! in case of exception, dataset will not close! # Exception arise for example for 20CRV2 data... write path and filenames to a text file :param ressource: list of files to be documented :param filepath: if True the absolute filepath is written out as well (default = False) :return txt: textfile with appropriate information ##############################################\n') ###### birdhouse process ######\n') ##############################################\n') # _EODATA_ = ["PSScene3Band__visual", # "PSScene4Band__analytic", # "PSScene4Band__analytic_xml", # "Sentinel2L1C__metadata_aux", # "Sentinel2L1C__analytic_b1", # "Sentinel2L1C__analytic_b2", # blue # "Sentinel2L1C__analytic_b3", # green # "Sentinel2L1C__analytic_b4", # red # "Sentinel2L1C__analytic_b8", # nivr # ] # PSScene3Band PlanetScope Scenes # PSScene4Band PlanetScope Scenes # PSOrthoTile PlanetScope OrthoTiles # REOrthoTile RapidEye OrthoTiles # REScene RapidEye Scenes (unorthorectified strips) # SkySatScene SkySat Scenes # Landsat8L1G Landsat8 Scenes # Sentinel2L1C Copernicus Sentinel-2 Scenes # "_permissions": [ # "assets.analytic_b1:download", # "assets.analytic_b3:download", # "assets.analytic_b2:download", # "assets.analytic_b5:download", # "assets.analytic_b4:download", # "assets.analytic_b7:download", # "assets.analytic_b6:download", # "assets.analytic_b9:download", # "assets.analytic_b8:download", # "assets.analytic_b8a:download", # "assets.visual:download", # "assets.metadata_aux:download", # "assets.analytic_b10:download", # "assets.analytic_b11:download", # "assets.analytic_b12:download" # ], # # def fetch_eodata(item_type, asset, token, bbox, period=[dt.today()-timedelta(days=30), dt.today()], cloud_cover=0.5, cache=True): # """ # search for given EO data product provided by planet. # The search and appropriate download is limited by bbox and search period # # :param item_type: product provided by planet # :param asset: product asset, (visible, analytic, bands) # :param token: Authentification token generated by planet Earth Obersavation Explorer # :param bbox: latitude longitude coordinates defining a bounding box # :param period: [start , end] datetime objects (default last 30 days) # :param cloud_cover: threshold for cloud_cover tolerance. 0 = 0percent cloud_cover 1=100percent cloud_cover # :param cache: if True file (default) is stored in local cache # # return list: list of pathes for fetched products # """ # # import os # import requests # from requests.auth import HTTPBasicAuth # import shutil # import time # from os.path import join # from os import makedirs # from flyingpigeon.config import cache_path # # Enter a bbox: min_lon, max_lon, min_lat, max_lat. # # xmin ymin xmax ymax # geojson_geometry = {"type": "Polygon", # "coordinates": [[ # [bbox[0], bbox[1]], # [14.600830078125, 8.677421123289992], # [bbox[2], bbox[1]], # [14.797210693359375, 8.677421123289992], # [bbox[2], bbox[3]], # [14.797210693359375, 8.90678000752024], # [bbox[0], bbox[3]], # [14.600830078125, 8.90678000752024], # [bbox[0], bbox[1]], # [14.600830078125, 8.677421123289992] # ]]} # # LOGGER.debug("geojson_geometry: %s" % geojson_geometry) # # get images that overlap with our AOI # geometry_filter = { # "type": "GeometryFilter", # "field_name": "geometry", # "config": geojson_geometry # } # # start = period[0] # end = period[1] # # # LOGGER.debug("Period %s to %s " % (start, end)) # # # get images acquired within a date range # date_range_filter = { # "type": "DateRangeFilter", # "field_name": "acquired", # "config": { # "gte": "%s000Z" % (start.strftime('%Y-%m-%dT%H:%M:%S.')), # "lte": "%s000Z" % (end.strftime('%Y-%m-%dT%H:%M:%S.')), # } # } # # # only get images which have <50% cloud coverage # cloud_cover_filter = { # "type": "RangeFilter", # "field_name": "cloud_cover", # "config": { # "lte": cloud_cover # } # } # # # combine our geo, date, cloud filters # combined_filter = {"type": "AndFilter", # "config": [geometry_filter, date_range_filter, cloud_cover_filter]} # # # API Key # PLANET_API_KEY = token # os.getenv('PL_API_KEY') # # # item_type = item_type, assetproducts[0] # "PSScene4Band" # # API request object # # search_request = { # "interval": "day", # "item_types": [item_type], # "filter": combined_filter # } # # if cache: # DIR_archiv = cache_path() # else: # DIR_archiv = '.' # DIR = join(DIR_archiv, "EO_data", item_type, asset) # # if not os.path.exists(DIR): # makedirs(DIR) # # # fire off the POST request # search_result = requests.post( # 'https://api.planet.com/data/v1/quick-search', # auth=HTTPBasicAuth(PLANET_API_KEY, ''), # json=search_request) # # # LOGGER.info('Search result: %s ' % json.dumps(search_result.json(), indent=1)) # # # extract image IDs only # image_ids = [feature['id'] for feature in search_result.json()['features']] # LOGGER.info("image IDs: %s " % image_ids) # # resources = [] # resources_sleeping = [] # # for image_id in image_ids: # # id0 = image_id # if "xml" in asset: # filename = "%s.xml" % id0 # else: # filename = "%s.tif" % id0 # # local_file = join(DIR, filename) # mkstemp(dir="/home/nils/data/planet/", prefix=id0, suffix='.tif') # # if os.path.exists(local_file): # LOGGER.info('File %s in cache' % filename) # resources.extend([local_file]) # else: # id0_url = 'https://api.planet.com/data/v1/item-types/{}/items/{}/assets'.format(item_type, id0) # # # Returns JSON metadata for assets in this ID. Learn more: planet.com/docs/reference/data-api/items-assets/#asset # result = requests.get(id0_url, auth=HTTPBasicAuth(PLANET_API_KEY, '')) # # List of asset types available for this particular satellite image # keys = result.json().keys() # LOGGER.debug("assets in file %s : %s " % (filename, keys)) # # This is "inactive" if the "visual" asset has not yet been activated; otherwise 'active' # # if 'analytic' in result.json().keys(): # if asset in keys: # LOGGER.debug("downloading file %s" % filename) # # LOGGER.debug(result.json()[asset]['status']) # # Parse out useful links # links = result.json()[asset]["_links"] # u"analytic" # self_link = links["_self"] # activation_link = links["activate"] # # Request activation of the 'visual' asset: # activate_result = requests.get(activation_link, auth=HTTPBasicAuth(PLANET_API_KEY, '')) # # Parse out useful links # links = result.json()[asset]["_links"] # u"analytic" # self_link = links["_self"] # activation_link = links["activate"] # # Request activation of the 'visual' asset: # activate_result = requests.get(activation_link, auth=HTTPBasicAuth(PLANET_API_KEY, '')) # activation_status_result = requests.get(self_link, auth=HTTPBasicAuth(PLANET_API_KEY, '')) # # try: # timeout = time.time() + 30 # 30 seconds from now # while activation_status_result.json()["status"] != 'active': # if time.time() > timeout and activation_status_result.json()["status"] == 'inactive': # LOGGER.debug("File %s is still inactive after 30sec. Giving up" % filename) # resources_sleeping.extend([filename]) # break # else: # LOGGER.debug('File %s is sleeping. gently waking up' % filename) # LOGGER.debug(activation_status_result.json()["status"]) # time.sleep(30) # activation_status_result = requests.get(self_link, auth=HTTPBasicAuth(PLANET_API_KEY, '')) # # if time.time() < timeout or activation_status_result.json()["status"] == 'active': # LOGGER.debug('File ready to download: %s' % (activation_status_result.json()["status"])) # # Image can be downloaded by making a GET with your Planet API key, from here: # download_link = activation_status_result.json()["location"] # r = requests.get(download_link, stream=True, verify=False) # with open(local_file, 'wb') as fp: # shutil.copyfileobj(r.raw, fp) # resources.extend([local_file]) # except: # LOGGER.exception("failed to download file %s " % filename) # else: # LOGGER.debug('Asset not found in keys, most likely no permissions for this data set %s ' % filename) # # return resources_sleeping, resources
| 2.025341
| 2
|
mutual_library/models/mutual_diary_user.py
|
mubuca95/mutual_diary
| 0
|
6626004
|
<reponame>mubuca95/mutual_diary
import mysql.connector
from mysql.connector import errorcode
from sqlalchemy.sql.schema import ForeignKey
from db import db
class mutual_diary_user(db.Model):
__tablename__ = 'mutual_diary_user'
iduser = db.Column(db.Integer, foreign_key = True)
iddiary = db.Column(db.Integer, foreign_key = True)
def __init__(self, iduser, iddiary):
self.iduser = iddiary
self.iddiary = iddiary
|
import mysql.connector
from mysql.connector import errorcode
from sqlalchemy.sql.schema import ForeignKey
from db import db
class mutual_diary_user(db.Model):
__tablename__ = 'mutual_diary_user'
iduser = db.Column(db.Integer, foreign_key = True)
iddiary = db.Column(db.Integer, foreign_key = True)
def __init__(self, iduser, iddiary):
self.iduser = iddiary
self.iddiary = iddiary
|
none
| 1
| 2.502166
| 3
|
|
lib/dataset/transforms/build.py
|
ankhzaya/HigherHRNet-Human-Pose-Estimation
| 775
|
6626005
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# Modified by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import transforms as T
FLIP_CONFIG = {
'COCO': [
0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15
],
'COCO_WITH_CENTER': [
0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 17
],
'CROWDPOSE': [
1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 12, 13
],
'CROWDPOSE_WITH_CENTER': [
1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 12, 13, 14
]
}
def build_transforms(cfg, is_train=True):
assert is_train is True, 'Please only use build_transforms for training.'
assert isinstance(cfg.DATASET.OUTPUT_SIZE, (list, tuple)), 'DATASET.OUTPUT_SIZE should be list or tuple'
if is_train:
max_rotation = cfg.DATASET.MAX_ROTATION
min_scale = cfg.DATASET.MIN_SCALE
max_scale = cfg.DATASET.MAX_SCALE
max_translate = cfg.DATASET.MAX_TRANSLATE
input_size = cfg.DATASET.INPUT_SIZE
output_size = cfg.DATASET.OUTPUT_SIZE
flip = cfg.DATASET.FLIP
scale_type = cfg.DATASET.SCALE_TYPE
else:
scale_type = cfg.DATASET.SCALE_TYPE
max_rotation = 0
min_scale = 1
max_scale = 1
max_translate = 0
input_size = 512
output_size = [128]
flip = 0
# coco_flip_index = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
# if cfg.DATASET.WITH_CENTER:
# coco_flip_index.append(17)
if 'coco' in cfg.DATASET.DATASET:
dataset_name = 'COCO'
elif 'crowd_pose' in cfg.DATASET.DATASET:
dataset_name = 'CROWDPOSE'
else:
raise ValueError('Please implement flip_index for new dataset: %s.' % cfg.DATASET.DATASET)
if cfg.DATASET.WITH_CENTER:
coco_flip_index = FLIP_CONFIG[dataset_name + '_WITH_CENTER']
else:
coco_flip_index = FLIP_CONFIG[dataset_name]
transforms = T.Compose(
[
T.RandomAffineTransform(
input_size,
output_size,
max_rotation,
min_scale,
max_scale,
scale_type,
max_translate,
scale_aware_sigma=cfg.DATASET.SCALE_AWARE_SIGMA
),
T.RandomHorizontalFlip(coco_flip_index, output_size, flip),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]
)
return transforms
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# Modified by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import transforms as T
FLIP_CONFIG = {
'COCO': [
0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15
],
'COCO_WITH_CENTER': [
0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 17
],
'CROWDPOSE': [
1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 12, 13
],
'CROWDPOSE_WITH_CENTER': [
1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 12, 13, 14
]
}
def build_transforms(cfg, is_train=True):
assert is_train is True, 'Please only use build_transforms for training.'
assert isinstance(cfg.DATASET.OUTPUT_SIZE, (list, tuple)), 'DATASET.OUTPUT_SIZE should be list or tuple'
if is_train:
max_rotation = cfg.DATASET.MAX_ROTATION
min_scale = cfg.DATASET.MIN_SCALE
max_scale = cfg.DATASET.MAX_SCALE
max_translate = cfg.DATASET.MAX_TRANSLATE
input_size = cfg.DATASET.INPUT_SIZE
output_size = cfg.DATASET.OUTPUT_SIZE
flip = cfg.DATASET.FLIP
scale_type = cfg.DATASET.SCALE_TYPE
else:
scale_type = cfg.DATASET.SCALE_TYPE
max_rotation = 0
min_scale = 1
max_scale = 1
max_translate = 0
input_size = 512
output_size = [128]
flip = 0
# coco_flip_index = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
# if cfg.DATASET.WITH_CENTER:
# coco_flip_index.append(17)
if 'coco' in cfg.DATASET.DATASET:
dataset_name = 'COCO'
elif 'crowd_pose' in cfg.DATASET.DATASET:
dataset_name = 'CROWDPOSE'
else:
raise ValueError('Please implement flip_index for new dataset: %s.' % cfg.DATASET.DATASET)
if cfg.DATASET.WITH_CENTER:
coco_flip_index = FLIP_CONFIG[dataset_name + '_WITH_CENTER']
else:
coco_flip_index = FLIP_CONFIG[dataset_name]
transforms = T.Compose(
[
T.RandomAffineTransform(
input_size,
output_size,
max_rotation,
min_scale,
max_scale,
scale_type,
max_translate,
scale_aware_sigma=cfg.DATASET.SCALE_AWARE_SIGMA
),
T.RandomHorizontalFlip(coco_flip_index, output_size, flip),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]
)
return transforms
|
en
| 0.335606
|
# ------------------------------------------------------------------------------ # Copyright (c) Microsoft # Licensed under the MIT License. # Written by <NAME> (<EMAIL>) # Modified by <NAME> (<EMAIL>) # ------------------------------------------------------------------------------ # coco_flip_index = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] # if cfg.DATASET.WITH_CENTER: # coco_flip_index.append(17)
| 2.135575
| 2
|
application/models/inventory/brand.py
|
mutalisk999/bibi
| 1,037
|
6626006
|
<filename>application/models/inventory/brand.py
# -*- coding: utf-8 -*-
from application.extensions import db
__all__ = ['Brand']
class Brand(db.Document):
meta = {
'db_alias': 'inventory_db',
'indexes': ['en']
}
en = db.StringField(required=True, unique=True)
cn = db.StringField()
description = db.StringField()
logo = db.StringField()
def __unicode__(self):
return '%s' % self.en
def to_json(self):
return dict(
id=str(self.id),
en=self.en,
cn=self.cn,
logo=self.logo,
description=self.description)
@classmethod
def get_brand_or_create(cls, en):
try:
brand = cls.objects.get(en=en)
except:
brand = cls(en=en).save()
return brand
|
<filename>application/models/inventory/brand.py
# -*- coding: utf-8 -*-
from application.extensions import db
__all__ = ['Brand']
class Brand(db.Document):
meta = {
'db_alias': 'inventory_db',
'indexes': ['en']
}
en = db.StringField(required=True, unique=True)
cn = db.StringField()
description = db.StringField()
logo = db.StringField()
def __unicode__(self):
return '%s' % self.en
def to_json(self):
return dict(
id=str(self.id),
en=self.en,
cn=self.cn,
logo=self.logo,
description=self.description)
@classmethod
def get_brand_or_create(cls, en):
try:
brand = cls.objects.get(en=en)
except:
brand = cls(en=en).save()
return brand
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.252218
| 2
|
kittycad/models/engine_metadata.py
|
KittyCAD/kittycad.py
| 1
|
6626007
|
<filename>kittycad/models/engine_metadata.py
from typing import Any, Dict, List, Type, TypeVar, Union, cast
import attr
from ..models.file_system_metadata import FileSystemMetadata
from ..models.nats_connection import NatsConnection
from ..types import UNSET, Unset
T = TypeVar("T", bound="EngineMetadata")
@attr.s(auto_attribs=True)
class EngineMetadata:
""" """
async_jobs_running: Union[Unset, bool] = False
fs: Union[Unset, FileSystemMetadata] = UNSET
git_hash: Union[Unset, str] = UNSET
nats: Union[Unset, NatsConnection] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
async_jobs_running = self.async_jobs_running
fs: Union[Unset, str] = UNSET
if not isinstance(self.fs, Unset):
fs = self.fs.value
git_hash = self.git_hash
nats: Union[Unset, str] = UNSET
if not isinstance(self.nats, Unset):
nats = self.nats.value
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if async_jobs_running is not UNSET:
field_dict['async_jobs_running'] = async_jobs_running
if fs is not UNSET:
field_dict['fs'] = fs
if git_hash is not UNSET:
field_dict['git_hash'] = git_hash
if nats is not UNSET:
field_dict['nats'] = nats
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
async_jobs_running = d.pop("async_jobs_running", UNSET)
_fs = d.pop("fs", UNSET)
fs: Union[Unset, FileSystemMetadata]
if isinstance(_fs, Unset):
fs = UNSET
else:
fs = FileSystemMetadata(_fs)
git_hash = d.pop("git_hash", UNSET)
_nats = d.pop("nats", UNSET)
nats: Union[Unset, NatsConnection]
if isinstance(_nats, Unset):
nats = UNSET
else:
nats = NatsConnection(_nats)
engine_metadata = cls(
async_jobs_running=async_jobs_running,
fs=fs,
git_hash=git_hash,
nats=nats,
)
engine_metadata.additional_properties = d
return engine_metadata
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
<filename>kittycad/models/engine_metadata.py
from typing import Any, Dict, List, Type, TypeVar, Union, cast
import attr
from ..models.file_system_metadata import FileSystemMetadata
from ..models.nats_connection import NatsConnection
from ..types import UNSET, Unset
T = TypeVar("T", bound="EngineMetadata")
@attr.s(auto_attribs=True)
class EngineMetadata:
""" """
async_jobs_running: Union[Unset, bool] = False
fs: Union[Unset, FileSystemMetadata] = UNSET
git_hash: Union[Unset, str] = UNSET
nats: Union[Unset, NatsConnection] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
async_jobs_running = self.async_jobs_running
fs: Union[Unset, str] = UNSET
if not isinstance(self.fs, Unset):
fs = self.fs.value
git_hash = self.git_hash
nats: Union[Unset, str] = UNSET
if not isinstance(self.nats, Unset):
nats = self.nats.value
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if async_jobs_running is not UNSET:
field_dict['async_jobs_running'] = async_jobs_running
if fs is not UNSET:
field_dict['fs'] = fs
if git_hash is not UNSET:
field_dict['git_hash'] = git_hash
if nats is not UNSET:
field_dict['nats'] = nats
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
async_jobs_running = d.pop("async_jobs_running", UNSET)
_fs = d.pop("fs", UNSET)
fs: Union[Unset, FileSystemMetadata]
if isinstance(_fs, Unset):
fs = UNSET
else:
fs = FileSystemMetadata(_fs)
git_hash = d.pop("git_hash", UNSET)
_nats = d.pop("nats", UNSET)
nats: Union[Unset, NatsConnection]
if isinstance(_nats, Unset):
nats = UNSET
else:
nats = NatsConnection(_nats)
engine_metadata = cls(
async_jobs_running=async_jobs_running,
fs=fs,
git_hash=git_hash,
nats=nats,
)
engine_metadata.additional_properties = d
return engine_metadata
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
none
| 1
| 1.95969
| 2
|
|
genmotion/render/c4d/params.py
|
yizhouzhao/GenMotion
| 32
|
6626008
|
<filename>genmotion/render/c4d/params.py
# Get skeleton information from https://meshcapade.wiki/SMPL#smpl-x
SMPL_SKELETON = {
0: 'Pelvis', 3: 'Spine1', 6: 'Spine2', 9: 'Spine3', 12: 'Neck', 15: 'Head',
1: 'L_Hip', 4: 'L_Knee', 7: 'L_Ankle', 10: 'L_Foot',
2: 'R_Hip', 5: 'R_Knee', 8: 'R_Ankle', 11: 'R_Foot',
13: 'L_Collar', 16: 'L_Shoulder', 18: 'L_Elbow', 20: 'L_Wrist',
14: 'R_Collar', 17: 'R_Shoulder', 19: 'R_Elbow', 21: 'R_Wrist',
22: 'L_Hand',
23: 'R_Hand'
}
SMPL_H_SKELETON = {
0: 'Pelvis', 3: 'Spine1', 6: 'Spine2', 9: 'Spine3', 12: 'Neck', 15: 'Head',
1: 'L_Hip', 4: 'L_Knee', 7: 'L_Ankle', 10: 'L_Foot',
2: 'R_Hip', 5: 'R_Knee', 8: 'R_Ankle', 11: 'R_Foot',
13: 'L_Collar', 16: 'L_Shoulder', 18: 'L_Elbow', 20: 'L_Wrist',
14: 'R_Collar', 17: 'R_Shoulder', 19: 'R_Elbow', 21: 'R_Wrist',
22: 'lindex0', 23: 'lindex1', 24: 'lindex2',
25: 'lmiddle0', 26: 'lmiddle1', 27: 'lmiddle2',
28: 'lpinky0', 29: 'lpinky1', 30: 'lpinky2',
31: 'lring0', 32: 'lring1', 33: 'lring2',
34: 'lthumb0', 35: 'lthumb1', 36: 'lthumb2',
37: 'rindex0', 38: 'rindex1', 39: 'rindex2',
40: 'rmiddle0', 41: 'rmiddle1', 42: 'rmiddle2',
43: 'rpinky0', 44: 'rpinky1', 45: 'rpinky2',
46: 'rring0', 47: 'rring1', 48: 'rring2',
49: 'rthumb0', 50: 'rthumb1', 51: 'rthumb2'
}
|
<filename>genmotion/render/c4d/params.py
# Get skeleton information from https://meshcapade.wiki/SMPL#smpl-x
SMPL_SKELETON = {
0: 'Pelvis', 3: 'Spine1', 6: 'Spine2', 9: 'Spine3', 12: 'Neck', 15: 'Head',
1: 'L_Hip', 4: 'L_Knee', 7: 'L_Ankle', 10: 'L_Foot',
2: 'R_Hip', 5: 'R_Knee', 8: 'R_Ankle', 11: 'R_Foot',
13: 'L_Collar', 16: 'L_Shoulder', 18: 'L_Elbow', 20: 'L_Wrist',
14: 'R_Collar', 17: 'R_Shoulder', 19: 'R_Elbow', 21: 'R_Wrist',
22: 'L_Hand',
23: 'R_Hand'
}
SMPL_H_SKELETON = {
0: 'Pelvis', 3: 'Spine1', 6: 'Spine2', 9: 'Spine3', 12: 'Neck', 15: 'Head',
1: 'L_Hip', 4: 'L_Knee', 7: 'L_Ankle', 10: 'L_Foot',
2: 'R_Hip', 5: 'R_Knee', 8: 'R_Ankle', 11: 'R_Foot',
13: 'L_Collar', 16: 'L_Shoulder', 18: 'L_Elbow', 20: 'L_Wrist',
14: 'R_Collar', 17: 'R_Shoulder', 19: 'R_Elbow', 21: 'R_Wrist',
22: 'lindex0', 23: 'lindex1', 24: 'lindex2',
25: 'lmiddle0', 26: 'lmiddle1', 27: 'lmiddle2',
28: 'lpinky0', 29: 'lpinky1', 30: 'lpinky2',
31: 'lring0', 32: 'lring1', 33: 'lring2',
34: 'lthumb0', 35: 'lthumb1', 36: 'lthumb2',
37: 'rindex0', 38: 'rindex1', 39: 'rindex2',
40: 'rmiddle0', 41: 'rmiddle1', 42: 'rmiddle2',
43: 'rpinky0', 44: 'rpinky1', 45: 'rpinky2',
46: 'rring0', 47: 'rring1', 48: 'rring2',
49: 'rthumb0', 50: 'rthumb1', 51: 'rthumb2'
}
|
en
| 0.374059
|
# Get skeleton information from https://meshcapade.wiki/SMPL#smpl-x
| 1.829999
| 2
|
7 term/Local-Computer-Networks-System-Software/Lab 4/shared/errors/disconnected_exception.py
|
Vanya112/BSUIR_Labs
| 24
|
6626009
|
class DisconnectedException(Exception):
pass
|
class DisconnectedException(Exception):
pass
|
none
| 1
| 0.995239
| 1
|
|
rllib/examples/centralized_critic_2.py
|
77loopin/ray
| 21,382
|
6626010
|
"""An example of implementing a centralized critic with ObservationFunction.
The advantage of this approach is that it's very simple and you don't have to
change the algorithm at all -- just use callbacks and a custom model.
However, it is a bit less principled in that you have to change the agent
observation spaces to include data that is only used at train time.
See also: centralized_critic.py for an alternative approach that instead
modifies the policy to add a centralized value function.
"""
import numpy as np
from gym.spaces import Dict, Discrete
import argparse
import os
from ray import tune
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.examples.models.centralized_critic_models import \
YetAnotherCentralizedCriticModel, YetAnotherTorchCentralizedCriticModel
from ray.rllib.examples.env.two_step_game import TwoStepGame
from ray.rllib.models import ModelCatalog
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.test_utils import check_learning_achieved
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.")
parser.add_argument(
"--stop-iters",
type=int,
default=100,
help="Number of iterations to train.")
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train.")
parser.add_argument(
"--stop-reward",
type=float,
default=7.99,
help="Reward at which we stop training.")
class FillInActions(DefaultCallbacks):
"""Fills in the opponent actions info in the training batches."""
def on_postprocess_trajectory(self, worker, episode, agent_id, policy_id,
policies, postprocessed_batch,
original_batches, **kwargs):
to_update = postprocessed_batch[SampleBatch.CUR_OBS]
other_id = 1 if agent_id == 0 else 0
action_encoder = ModelCatalog.get_preprocessor_for_space(Discrete(2))
# set the opponent actions into the observation
_, opponent_batch = original_batches[other_id]
opponent_actions = np.array([
action_encoder.transform(a)
for a in opponent_batch[SampleBatch.ACTIONS]
])
to_update[:, -2:] = opponent_actions
def central_critic_observer(agent_obs, **kw):
"""Rewrites the agent obs to include opponent data for training."""
new_obs = {
0: {
"own_obs": agent_obs[0],
"opponent_obs": agent_obs[1],
"opponent_action": 0, # filled in by FillInActions
},
1: {
"own_obs": agent_obs[1],
"opponent_obs": agent_obs[0],
"opponent_action": 0, # filled in by FillInActions
},
}
return new_obs
if __name__ == "__main__":
args = parser.parse_args()
ModelCatalog.register_custom_model(
"cc_model", YetAnotherTorchCentralizedCriticModel
if args.framework == "torch" else YetAnotherCentralizedCriticModel)
action_space = Discrete(2)
observer_space = Dict({
"own_obs": Discrete(6),
# These two fields are filled in by the CentralCriticObserver, and are
# not used for inference, only for training.
"opponent_obs": Discrete(6),
"opponent_action": Discrete(2),
})
config = {
"env": TwoStepGame,
"batch_mode": "complete_episodes",
"callbacks": FillInActions,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"num_workers": 0,
"multiagent": {
"policies": {
"pol1": (None, observer_space, action_space, {}),
"pol2": (None, observer_space, action_space, {}),
},
"policy_mapping_fn": (
lambda aid, **kwargs: "pol1" if aid == 0 else "pol2"),
"observation_fn": central_critic_observer,
},
"model": {
"custom_model": "cc_model",
},
"framework": args.framework,
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run("PPO", config=config, stop=stop, verbose=1)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
|
"""An example of implementing a centralized critic with ObservationFunction.
The advantage of this approach is that it's very simple and you don't have to
change the algorithm at all -- just use callbacks and a custom model.
However, it is a bit less principled in that you have to change the agent
observation spaces to include data that is only used at train time.
See also: centralized_critic.py for an alternative approach that instead
modifies the policy to add a centralized value function.
"""
import numpy as np
from gym.spaces import Dict, Discrete
import argparse
import os
from ray import tune
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.examples.models.centralized_critic_models import \
YetAnotherCentralizedCriticModel, YetAnotherTorchCentralizedCriticModel
from ray.rllib.examples.env.two_step_game import TwoStepGame
from ray.rllib.models import ModelCatalog
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.test_utils import check_learning_achieved
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument(
"--as-test",
action="store_true",
help="Whether this script should be run as a test: --stop-reward must "
"be achieved within --stop-timesteps AND --stop-iters.")
parser.add_argument(
"--stop-iters",
type=int,
default=100,
help="Number of iterations to train.")
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train.")
parser.add_argument(
"--stop-reward",
type=float,
default=7.99,
help="Reward at which we stop training.")
class FillInActions(DefaultCallbacks):
"""Fills in the opponent actions info in the training batches."""
def on_postprocess_trajectory(self, worker, episode, agent_id, policy_id,
policies, postprocessed_batch,
original_batches, **kwargs):
to_update = postprocessed_batch[SampleBatch.CUR_OBS]
other_id = 1 if agent_id == 0 else 0
action_encoder = ModelCatalog.get_preprocessor_for_space(Discrete(2))
# set the opponent actions into the observation
_, opponent_batch = original_batches[other_id]
opponent_actions = np.array([
action_encoder.transform(a)
for a in opponent_batch[SampleBatch.ACTIONS]
])
to_update[:, -2:] = opponent_actions
def central_critic_observer(agent_obs, **kw):
"""Rewrites the agent obs to include opponent data for training."""
new_obs = {
0: {
"own_obs": agent_obs[0],
"opponent_obs": agent_obs[1],
"opponent_action": 0, # filled in by FillInActions
},
1: {
"own_obs": agent_obs[1],
"opponent_obs": agent_obs[0],
"opponent_action": 0, # filled in by FillInActions
},
}
return new_obs
if __name__ == "__main__":
args = parser.parse_args()
ModelCatalog.register_custom_model(
"cc_model", YetAnotherTorchCentralizedCriticModel
if args.framework == "torch" else YetAnotherCentralizedCriticModel)
action_space = Discrete(2)
observer_space = Dict({
"own_obs": Discrete(6),
# These two fields are filled in by the CentralCriticObserver, and are
# not used for inference, only for training.
"opponent_obs": Discrete(6),
"opponent_action": Discrete(2),
})
config = {
"env": TwoStepGame,
"batch_mode": "complete_episodes",
"callbacks": FillInActions,
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"num_workers": 0,
"multiagent": {
"policies": {
"pol1": (None, observer_space, action_space, {}),
"pol2": (None, observer_space, action_space, {}),
},
"policy_mapping_fn": (
lambda aid, **kwargs: "pol1" if aid == 0 else "pol2"),
"observation_fn": central_critic_observer,
},
"model": {
"custom_model": "cc_model",
},
"framework": args.framework,
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run("PPO", config=config, stop=stop, verbose=1)
if args.as_test:
check_learning_achieved(results, args.stop_reward)
|
en
| 0.94022
|
An example of implementing a centralized critic with ObservationFunction. The advantage of this approach is that it's very simple and you don't have to change the algorithm at all -- just use callbacks and a custom model. However, it is a bit less principled in that you have to change the agent observation spaces to include data that is only used at train time. See also: centralized_critic.py for an alternative approach that instead modifies the policy to add a centralized value function. Fills in the opponent actions info in the training batches. # set the opponent actions into the observation Rewrites the agent obs to include opponent data for training. # filled in by FillInActions # filled in by FillInActions # These two fields are filled in by the CentralCriticObserver, and are # not used for inference, only for training. # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
| 2.900671
| 3
|
Tests/test_dict.py
|
hackf5/ironpython3
| 0
|
6626011
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
#Regression: CodePlex 15715
#Do not move or remove these two lines
x = dir(dict)
x = dir(dict.fromkeys)
import collections
import os
import unittest
import sys
from iptest import IronPythonTestCase, is_cli, path_modifier, run_test, skipUnlessIronPython, source_root
class DictTest(IronPythonTestCase):
def test_sanity(self):
items = 0
d = {'key1': 'value1', 'key2': 'value2'}
for key, value in d.items():
items += 1
self.assertTrue((key, value) == ('key1', 'value1') or (key,value) == ('key2', 'value2'))
self.assertTrue(items == 2)
self.assertTrue(d["key1"] == "value1")
self.assertTrue(d["key2"] == "value2")
def getitem(d,k):
d[k]
self.assertRaises(KeyError, getitem, d, "key3")
x = d.get("key3")
self.assertTrue(x == None)
self.assertTrue(d["key1"] == d.get("key1"))
self.assertTrue(d["key2"] == d.get("key2"))
self.assertTrue(d.get("key3", "value3") == "value3")
self.assertRaises(KeyError, getitem, d, "key3")
self.assertTrue(d.setdefault("key3") == None)
self.assertTrue(d.setdefault("key4", "value4") == "value4")
self.assertTrue(d["key3"] == None)
self.assertTrue(d["key4"] == "value4")
d2= dict(key1 = 'value1', key2 = 'value2')
self.assertTrue(d2['key1'] == 'value1')
def test_dict_inherit(self):
class MyDict(dict):
def __setitem__(self, *args):
super(MyDict, self).__setitem__(*args)
a = MyDict()
with self.assertRaises(SystemError): # TODO: remove assertRaises when https://github.com/IronLanguages/ironpython3/issues/456 is fixed
a[0] = 'abc'
self.assertEqual(a[0], 'abc')
with self.assertRaises(SystemError): # TODO: remove assertRaises when https://github.com/IronLanguages/ironpython3/issues/456 is fixed
a[None] = 3
self.assertEqual(a[None], 3)
class MyDict(dict):
def __setitem__(self, *args):
dict.__setitem__(self, *args)
a = MyDict()
a[0] = 'abc'
self.assertEqual(a[0], 'abc')
a[None] = 3
self.assertEqual(a[None], 3)
def test_function_environments(self):
"""verify function environments, FieldIdDict, custom old class dict, and module environments all local identical to normal dictionaries"""
x = type(type.__dict__)({})
class C: pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
xx = 'abc'
yy = 'def'
pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
x4 = 'abc'
x5 = 'def'
x6 = 'aaa'
x7 = 'aaa'
x0 = 'abc'
pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
x4 = 'abc'
x5 = 'def'
x6 = 'aaa'
x7 = 'aaa'
x0 = 'abc'
x10 = 'abc'
x11 = 'def'
x12 = 'aaa'
x13 = 'aaa'
x14 = 'abc'
x15 = 'def'
x16 = 'aaa'
x17 = 'aaa'
x10 = 'abc'
pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
x4 = 'abc'
x5 = 'def'
x6 = 'aaa'
x7 = 'aaa'
x0 = 'abc'
x10 = 'abc'
x11 = 'def'
x12 = 'aaa'
x13 = 'aaa'
x14 = 'abc'
x15 = 'def'
x16 = 'aaa'
x17 = 'aaa'
x10 = 'abc'
x20 = 'abc'
x21 = 'def'
x22 = 'aaa'
x23 = 'aaa'
x24 = 'abc'
x25 = 'def'
x26 = 'aaa'
x27 = 'aaa'
x20 = 'abc'
x110 = 'abc'
x111 = 'def'
x112 = 'aaa'
x113 = 'aaa'
x114 = 'abc'
x115 = 'def'
x116 = 'aaa'
x117 = 'aaa'
x110 = 'abc'
pass
self.assertEqual(dir(x), dir(C.__dict__))
x = {}
a = C()
self.assertEqual(dir(x), dir(a.__dict__))
a = C()
a.abc = 'def'
a.ghi = 'def'
self.assertEqual(dir(x), dir(a.__dict__))
#####################################################################
## coverage for CustomFieldIdDict
def contains(self, d, *attrs):
for attr in attrs:
self.assertTrue(attr in d, "didn't find " + str(attr) + " in " + repr(d))
self.assertTrue(d.__contains__(attr), "didn't find " + str(attr) + " in " + repr(d))
def repeat_on_class(self, C):
newStyle = "__class__" in dir(C)
c = C()
d = C.__dict__
self.contains(d, '__doc__', 'x1', 'f1')
## recursive entries & repr
C.abc = d
if not newStyle:
x = repr(d) # shouldn't stack overflow
else:
x = str(d)
self.assertTrue(x.find("'abc'") != -1)
if not newStyle:
self.assertTrue(x.find("{...}") != -1)
else:
self.assertTrue(x.find("'abc': <mappingproxy") != -1)
del C.abc
keys, values = d.keys(), d.values()
self.assertEqual(len(keys), len(values))
self.contains(keys, '__doc__', 'x1', 'f1')
## initial length
l = len(d)
self.assertTrue(l > 3)
# add more attributes
def f2(self): return 22
def f3(self): return 33
if not newStyle:
d['f2'] = f2
d['x2'] = 20
self.assertEqual(len(d), l + 2)
self.assertEqual(d.__len__(), l + 2)
if not newStyle:
self.contains(d, '__doc__', 'x1', 'x2', 'f1', 'f2')
self.contains(d.keys(), '__doc__', 'x1', 'x2', 'f1', 'f2')
else:
self.contains(d, '__doc__', 'x1', 'f1')
self.contains(d.keys(), '__doc__', 'x1', 'f1')
self.assertEqual(d['x1'], 10)
if not newStyle:
self.assertEqual(d['x2'], 20)
self.assertEqual(d['f1'](c), 11)
if not newStyle:
self.assertEqual(d['f2'](c), 22)
self.assertRaises(KeyError, lambda : d['x3'])
self.assertRaises(KeyError, lambda : d['f3'])
## get
self.assertEqual(d.get('x1'), 10)
if not newStyle:
self.assertEqual(d.get('x2'), 20)
self.assertEqual(d.get('f1')(c), 11)
if not newStyle:
self.assertEqual(d.get('f2')(c), 22)
self.assertEqual(d.get('x3'), None)
self.assertEqual(d.get('x3', 30), 30)
self.assertEqual(d.get('f3'), None)
self.assertEqual(d.get('f3', f3)(c), 33)
if not newStyle:
## setdefault
self.assertEqual(d.setdefault('x1'), 10)
self.assertEqual(d.setdefault('x1', 30), 10)
self.assertEqual(d.setdefault('f1')(c), 11)
self.assertEqual(d.setdefault('f1', f3)(c), 11)
self.assertEqual(d.setdefault('x2'), 20)
self.assertEqual(d.setdefault('x2', 30), 20)
self.assertEqual(d.setdefault('f2')(c), 22)
self.assertEqual(d.setdefault('f2', f3)(c), 22)
self.assertEqual(d.setdefault('x3', 30), 30)
self.assertEqual(d.setdefault('f3', f3)(c), 33)
if not newStyle:
## pop
l1 = len(d)
self.assertEqual(d.pop('x1', 30), 10)
self.assertEqual(len(d), l1-1)
l1 = len(d)
self.assertEqual(d.pop('x2', 30), 20)
self.assertEqual(len(d), l1-1)
l1 = len(d)
self.assertEqual(d.pop("xx", 70), 70)
self.assertEqual(len(d), l1)
## in
self.assertTrue('f1' in d)
if not newStyle:
self.assertTrue('f2' in d)
self.assertTrue('f3' in d)
self.assertTrue('fx' not in d)
# subclassing, overriding __getitem__, and passing to
# eval
dictType = type(d)
try:
class newDict(dictType):
def __getitem__(self, key):
if key == 'abc':
return 'def'
return super(self, dictType).__getitem__(key)
except TypeError as ex:
if not newStyle:
self.assertTrue(ex.message.find('cannot derive from sealed or value types') != -1, ex.message)
else:
self.assertTrue(ex.message.find('Error when calling the metaclass bases') != -1, ex.message)
else:
try:
nd = newDict()
except TypeError as e:
if is_cli:
import clr
if clr.GetClrType(dictType).ToString() == 'IronPython.Runtime.Types.NamespaceDictionary':
self.fail("Error! Threw TypeError when creating newDict deriving from NamespaceDictionary")
else:
self.assertEqual(eval('abc', {}, nd), 'def')
############### IN THIS POINT, d LOOKS LIKE ###############
## {'f1': f1, 'f2': f2, 'f3': f3, 'x3': 30, '__doc__': 'This is comment', '__module__': '??'}
## iteritems
lk = []
for (k, v) in d.items():
lk.append(k)
exp = None
if k == 'f1': exp = 11
elif k == 'f2': exp == 22
elif k == 'f3': exp == 33
if exp is not None:
self.assertEqual(v(c), exp)
if not newStyle:
self.contains(lk, 'f1', 'f2', 'f3', 'x3', '__doc__')
else:
self.contains(lk, 'f1', '__module__', '__dict__', 'x1', '__weakref__', '__doc__')
# iterkeys
lk = []
for k in d.keys():
lk.append(k)
if not newStyle:
self.contains(lk, 'f1', 'f2', 'f3', 'x3', '__doc__')
else:
self.contains(lk, 'f1', '__module__', '__dict__', 'x1', '__weakref__', '__doc__')
# itervalues
for v in d.values():
if callable(v):
exp = v(c)
self.assertTrue(exp in [11, 22, 33])
elif v is str:
self.assertTrue(v == 'This is comment')
elif v is int:
self.assertTrue(v == 30)
if not newStyle:
## something fun before destorying it
l1 = len(d)
d[dict] = 3 # object as key
self.assertEqual(len(d), l1+1)
l1 = len(d)
d[int] = 4 # object as key
if is_cli:
print("CodePlex 16811")
return
self.assertEqual(len(d), l1+1)
l1 = len(d)
del d[int]
self.assertEqual(len(d), l1-1)
l1 = len(d)
del d[dict]
self.assertEqual(len(d), l1-1)
l1 = len(d)
del d['x3']
self.assertEqual(len(d), l1-1)
l1 = len(d)
d.popitem()
self.assertEqual(len(d), l1-1)
## object as key
d[int] = int
d[str] = "str"
self.assertEqual(d[int], int)
self.assertEqual(d[str], "str")
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(d.__len__(), 0)
def test_customfieldiddict_old(self):
class C:
'''This is comment'''
x1 = 10
def f1(self): return 11
self.repeat_on_class(C)
def test_customfieldiddict_new(self):
class C(object):
'''This is comment'''
x1 = 10
def f1(self): return 11
self.repeat_on_class(C)
def test_customfieldiddict_fromkeys(self):
def new_repeat_on_class(C):
d1 = C.__dict__
l1 = len(d1)
d2 = dict.fromkeys(d1)
l2 = len(d2)
self.assertEqual(l1, l2)
self.assertEqual(d2['x'], None)
self.assertEqual(d2['f'], None)
d2 = dict.fromkeys(d1, 10)
l2 = len(d2)
self.assertEqual(l1, l2)
self.assertEqual(d2['x'], 10)
self.assertEqual(d2['f'], 10)
class C:
x = 10
def f(self): pass
new_repeat_on_class(C)
class C(object):
x = 10
def f(self): pass
new_repeat_on_class(C)
def test_customfieldiddict_compare(self):
def new_repeat_on_class(C1, C2):
d1 = C1.__dict__
d2 = C2.__dict__
# object as key
self.assertTrue(d1 != d2)
self.assertTrue([x for x in d1] == [x for x in d2])
class C1:
x = 10
def f(self): pass
class C2:
x = 10
def f(self): pass
new_repeat_on_class(C1, C2)
def t_func():
class C1(object):
x = 10
def f(self): pass
C1.__dict__[1] = 2
self.assertRaises(TypeError, t_func)
@skipUnlessIronPython()
def test_dict_to_idict(self):
"""verify dicts can be converted to IDictionaries"""
self.load_iron_python_test()
from IronPythonTest import DictConversion
class MyDict(dict): pass
class KOld: pass
class KNew(object): pass
class KOldDerived(KOld): pass
class KNewDerived(KNew): pass
test_dicts = [
{},
{1:100},
{None:None},
{object:object},
{1:100, 2:200},
{1:100, 2:200, 3:300, 4:400},
MyDict.__dict__,
KOld.__dict__,
KNew.__dict__,
KOldDerived.__dict__,
KNewDerived.__dict__,
]
for temp_dict in test_dicts:
expected = list(temp_dict.keys()) + list(temp_dict.values())
expected.sort()
to_idict = list(DictConversion.ToIDictionary(temp_dict))
to_idict.sort()
self.assertEqual(to_idict, expected)
to_idict = list(DictConversion.ToIDictionary(MyDict(temp_dict)))
to_idict.sort()
self.assertEqual(to_idict, expected)
def test_fieldiddict(self):
"""coverage for FieldIdDict"""
def func(): pass
d = func.__dict__
d['x1'] = 10
d['f1'] = lambda : 11
d[int] = "int"
d[dict] = {2:20}
keys, values = d.keys(), d.values()
self.assertEqual(len(keys), len(values))
self.contains(keys, 'x1', 'f1', int, dict)
## initial length
l = len(d)
self.assertTrue(l == 4)
# add more attributes
d['x2'] = 20
d['f2'] = lambda x: 22
self.assertEqual(len(d), l + 2)
self.assertEqual(d.__len__(), l + 2)
self.contains(d, 'x1', 'x2', 'f1', 'f2', int, dict)
self.contains(d.keys(), 'x1', 'x2', 'f1', 'f2', int, dict)
self.assertEqual(d['x1'], 10)
self.assertEqual(d['x2'], 20)
self.assertEqual(d['f1'](), 11)
self.assertEqual(d['f2'](9), 22)
self.assertRaises(KeyError, lambda : d['x3'])
self.assertRaises(KeyError, lambda : d['f3'])
## get
self.assertEqual(d.get('x1'), 10)
self.assertEqual(d.get('x2'), 20)
self.assertEqual(d.get('f1')(), 11)
self.assertEqual(d.get('f2')(1), 22)
def f3(): return 33
self.assertEqual(d.get('x3'), None)
self.assertEqual(d.get('x3', 30), 30)
self.assertEqual(d.get('f3'), None)
self.assertEqual(d.get('f3', f3)(), 33)
## setdefault
self.assertEqual(d.setdefault('x1'), 10)
self.assertEqual(d.setdefault('x1', 30), 10)
self.assertEqual(d.setdefault('f1')(), 11)
self.assertEqual(d.setdefault('f1', f3)(), 11)
self.assertEqual(d.setdefault('x2'), 20)
self.assertEqual(d.setdefault('x2', 30), 20)
self.assertEqual(d.setdefault('f2')(1), 22)
self.assertEqual(d.setdefault('f2', f3)(1), 22)
self.assertEqual(d.setdefault('x3', 30), 30)
self.assertEqual(d.setdefault('f3', f3)(), 33)
## pop
l1 = len(d); self.assertEqual(d.pop('x1', 30), 10)
self.assertEqual(len(d), l1-1)
l1 = len(d); self.assertEqual(d.pop('x2', 30), 20)
self.assertEqual(len(d), l1-1)
l1 = len(d); self.assertEqual(d.pop(int, 70), "int")
self.assertEqual(len(d), l1-1)
l1 = len(d); self.assertEqual(d.pop("xx", 70), 70)
self.assertEqual(len(d), l1)
## in
self.assertTrue('f1' in d)
self.assertTrue('f2' in d)
self.assertTrue('f3' in d)
self.assertTrue(dict in d)
self.assertTrue('fx' not in d)
############### IN THIS POINT, d LOOKS LIKE ###############
# f1, f2, f3, x3, dict as keys
## iteritems
lk = []
for (k, v) in d.items():
lk.append(k)
if k == 'f1': self.assertEqual(v(), 11)
elif k == 'f2': self.assertEqual(v(1), 22)
elif k == 'f3': self.assertEqual(v(), 33)
elif k == 'x3': self.assertEqual(v, 30)
elif k == dict: self.assertEqual(v, {2:20})
self.contains(lk, 'f1', 'f2', 'f3', 'x3', dict)
# iterkeys
lk = []
for k in d.keys():
lk.append(k)
self.contains(lk, 'f1', 'f2', 'f3', 'x3', dict)
# itervalues
for v in d.values():
if callable(v):
try: exp = v(1)
except: pass
try: exp = v()
except: pass
self.assertTrue(exp in [11, 22, 33])
elif v is dict:
self.assertTrue(v == {2:20})
elif v is int:
self.assertTrue(v == 30)
## something fun before destorying it
l1 = len(d); d[int] = 4 # object as key
self.assertEqual(len(d), l1+1)
l1 = len(d); del d[int]
self.assertEqual(len(d), l1-1)
l1 = len(d); del d[dict]
self.assertEqual(len(d), l1-1)
l1 = len(d); del d['x3']
self.assertEqual(len(d), l1-1)
l1 = len(d); popped_item = d.popitem()
self.assertEqual(len(d), l1-1)
## object as key
d[int] = int
d[str] = "str"
self.assertEqual(d[int], int)
self.assertEqual(d[str], "str")
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(d.__len__(), 0)
d[int] = int
self.assertEqual(len(d), 1)
## comparison
def func1(): pass
def func2(): pass
d1 = func1.__dict__
d2 = func2.__dict__
d1['x'] = 10
d2['x'] = 30
d1[int] = int
d2[int] = int
# object as key
self.assertTrue(d1 != d2)
d2['x'] = 10
self.assertTrue(d1 == d2)
def test_subclass_dict_override__init__(self):
"""subclassing dict, overriding __init__"""
class foo(dict):
def __init__(self, abc):
self.abc = abc
a = foo('abc')
self.assertEqual(a.abc, 'abc')
# make sure dict.__init__ works
a = {}
a.__init__({'abc':'def'})
self.assertEqual(a, {'abc':'def'})
a.__init__({'abcd':'defg'})
self.assertEqual(a, {'abc':'def', 'abcd':'defg'})
# keyword arg contruction
# single kw-arg, should go into dict
a = dict(b=2)
self.assertEqual(a, {'b':2})
# dict value to init, Plus kw-arg
a = dict({'a':3}, b=2)
self.assertEqual(a, {'a':3, 'b':2})
# more than one
a = dict({'a':3}, b=2, c=5)
self.assertEqual(a, {'a':3, 'b':2, 'c':5})
try:
dict({'a':3}, {'b':2}, c=5)
self.fail('Should not reach this code')
except TypeError: pass
@skipUnlessIronPython()
def test_DictionaryUnionEnumerator(self):
class C(object): pass
c = C()
d = c.__dict__
import System
# Check empty enumerator
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
# Add non-string attribute
d[1] = 100
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), True)
self.assertEqual(e.Key, 1)
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
# Add string attribute
c.attr = 100
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), True)
key1 = e.Key
self.assertEqual(e.MoveNext(), True)
key2 = e.Key
self.assertEqual((key1, key2) == (1, "attr") or (key1, key2) == ("attr", 1), True)
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
# Remove non-string attribute
del d[1]
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), True)
self.assertEqual(e.Key, "attr")
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
# Remove string attribute and check empty enumerator
del c.attr
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
def test_same_but_different(self):
"""Test case checks that when two values who are logically different but share hash code & equality result in only a single entry"""
self.assertEqual({-10:0, long(-10):1}, {-10:1})
def test_module_dict(self):
me = sys.modules[__name__]
moduleDict = me.__dict__
self.assertTrue(isinstance(moduleDict, collections.Mapping))
self.assertTrue(moduleDict.__contains__("DictTest"))
self.assertEqual(moduleDict["DictTest"], DictTest)
self.assertTrue(moduleDict.keys().__contains__("DictTest"))
def test_eval_locals_simple(self):
class Locals(dict):
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError as e:
return 'abc'
locs = Locals()
self.assertEqual(eval("unknownvariable", globals(), locs), 'abc')
def test_key_error(self):
class c: pass
class d(object): pass
for key in ['abc', 1, c(), d(), 1.0, long(1)]:
try:
{}[key]
except KeyError as e:
self.assertEqual(e.args[0], key)
try:
del {}[key]
except KeyError as e:
self.assertEqual(e.args[0], key)
try:
set([]).remove(key)
except KeyError as e:
self.assertEqual(e.args[0], key)
def test_contains(self):
class ContainsDict(dict):
was_called = False
def __contains__(self, key):
ContainsDict.was_called = True
return dict.__contains__(self, key)
md = ContainsDict()
md["stuff"] = 1
self.assertEqual(ContainsDict.was_called, False)
self.assertEqual("nothing" in md, False)
self.assertEqual("stuff" in md, True)
self.assertEqual(ContainsDict.was_called, True)
def test_stdtypes_dict(self):
temp_types = [ int,
long,
float,
complex,
bool,
bytes,
str,
list,
tuple,
range,
dict,
set,
frozenset,
type,
object,
] #+ [eval("types." + x) for x in dir(types) if x.endswith("Type")]
temp_keys = [ None, -1, 0, 1, 2.34, "", "None", int, object, self.test_stdtypes_dict, [], (None,)]
for temp_type in temp_types:
for temp_key in temp_keys:
def tFunc(): temp_type.__dict__[temp_key] = 0
self.assertRaises(TypeError, tFunc)
def test_main_dict(self):
import __main__
#just make sure this doesn't throw...
t_list = []
for w in __main__.__dict__: t_list.append(w)
t_list.sort()
g_list = list(globals().keys())
g_list.sort()
self.assertEqual(t_list, g_list)
def test_update(self):
test_cases = (
#N changes with an empty dict
({}, (), {}, {}),
({}, ({'k':'v'},), {}, {'k':'v'}),
({}, (), {'k':'v'}, {'k':'v'}),
({}, ({'k':'v', 'x':'y'},), {}, {'k':'v', 'x':'y'}),
({}, (), {'k':'v', 'x':'y'}, {'k':'v', 'x':'y'}),
({}, ({'k':'v'},), {'x':'y'}, {'k':'v', 'x':'y'}),
#N changes with one pre-existing dict element
({'a':'b'}, (), {}, {'a':'b'}),
({'a':'b'}, ({'k':'v'},), {}, {'a':'b', 'k':'v'}),
({'a':'b'}, (), {'k':'v'}, {'a':'b', 'k':'v'}),
({'a':'b'}, ({'k':'v', 'x':'y'},), {}, {'a':'b', 'k':'v', 'x':'y'}),
({'a':'b'}, (), {'k':'v', 'x':'y'}, {'a':'b', 'k':'v', 'x':'y'}),
({'a':'b'}, ({'k':'v'},), {'x':'y'}, {'a':'b', 'k':'v', 'x':'y'}),
#N changes with one pre-existing dict element
({'a':'b', 'c':'d'}, (), {}, {'a':'b', 'c':'d'}),
({'a':'b', 'c':'d'}, ({'k':'v'},), {}, {'a':'b', 'c':'d', 'k':'v'}),
({'a':'b', 'c':'d'}, (), {'k':'v'}, {'a':'b', 'c':'d', 'k':'v'}),
({'a':'b', 'c':'d'}, ({'k':'v', 'x':'y'},), {}, {'a':'b', 'c':'d', 'k':'v', 'x':'y'}),
({'a':'b', 'c':'d'}, (), {'k':'v', 'x':'y'}, {'a':'b', 'c':'d', 'k':'v', 'x':'y'}),
({'a':'b', 'c':'d'}, ({'k':'v'},), {'x':'y'}, {'a':'b', 'c':'d', 'k':'v', 'x':'y'}),
)
for start_dict, dict_param, kw_params, expected in test_cases:
try:
start_dict.update(*dict_param, **kw_params)
except Exception as e:
print("ERROR:", start_dict, ".update(*", dict_param, ", **", kw_params, ") failed!")
raise e
self.assertEqual(start_dict, expected)
def test_update_argnames(self):
expected = {"b": 1}
result = {}
result.update(b=1)
self.assertEqual(result, expected)
expected = {"other": 1}
result = {}
result.update(other=1)
self.assertEqual(result, expected)
expected = {"other": 1, "otherArgs": 2}
result = {}
result.update({"other": 1}, otherArgs=2)
self.assertEqual(result, expected)
def test_update_no_setitem(self):
# update doesn't call __setitem__
class mydict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.setcalled = False
def __setitem__(self, index, value):
self.setcalled = True
raise Exception()
d = mydict()
d.update(mydict(abc=2))
self.assertEqual(d.setcalled, False)
d.update({'foo': 2})
self.assertEqual(d.setcalled, False)
def test_keys_not_as_property(self):
def f():
mapping = { 10: 10}
for k in mapping.keys: pass
if is_cli:
self.assertRaisesMessage(TypeError,
"iteration over non-sequence of type builtin_function_or_method",
f)
else:
self.assertRaisesMessage(TypeError,
"'builtin_function_or_method' object is not iterable",
f)
def test_dict_class_dictionary(self):
class KOld:
KLASS_MEMBER = 3.14
def aFunc(): pass
def aMethod(self): pass
class KNew(object):
KLASS_MEMBER = 3.14
def aFunc(): pass
def aMethod(self): pass
for K in [KOld, KNew]:
temp_dict = dict(K.__dict__)
#class member has the correct value?
self.assertEqual(K.__dict__["KLASS_MEMBER"], 3.14)
self.assertEqual(temp_dict["KLASS_MEMBER"], 3.14)
#methods show up?
for func_name in ["aFunc", "aMethod"]:
self.assertTrue(func_name in K.__dict__.keys())
self.assertTrue(func_name in temp_dict.keys())
expected_keys = [ '__module__', 'KLASS_MEMBER', 'aFunc', 'aMethod',
'__dict__',
'__weakref__', '__doc__']
for expected_key in expected_keys:
self.assertTrue(expected_key in KNew.__dict__, expected_key)
self.assertTrue(expected_key in temp_dict, expected_key)
def test_cp15882(self):
x = {}
#negative cases
for bad_stuff in [
[1],
{}, {1:1}, {(1,2): 1},
set()]:
try:
x[bad_stuff] = 1
self.fail(str(bad_stuff) + " is unhashable")
except TypeError:
self.assertEqual(x, {})
#positive cases
for stuff in [
(), (None),
(-1), (0), (1), (2),
(1, 2), (1, 2, 3),
range(3), 1j, object, self.test_cp15882,
(range(3)), (1j), (object), (self.test_cp15882),
(()), ((())),
]:
for i in range(2):
x[stuff] = 1
self.assertEqual(x[stuff], 1)
del x[stuff]
self.assertEqual(x, {})
self.assertRaises(KeyError, x.__delitem__, stuff)
for i in range(2):
x[stuff] = 1
self.assertEqual(x[stuff], 1)
x.__delitem__(stuff)
self.assertEqual(x, {})
self.assertRaises(KeyError, x.__delitem__, stuff)
def test_cp35348(self):
empty = {} # underlying type: EmptyDictionaryStorage
emptied = {1:1} # underlying type: CommonDictionaryStorage
del emptied[1]
not_empty = {42:1}
#negative cases
for bad_stuff in [
[1],
{}, {1:1}, {(1,2): 1},
set()]:
try:
dummy = bad_stuff in empty
self.fail(str(bad_stuff) + " is unhashable")
except TypeError:
pass
try:
dummy = bad_stuff in emptied
self.fail(str(bad_stuff) + " is unhashable")
except TypeError:
pass
try:
dummy = bad_stuff in not_empty
self.fail(str(bad_stuff) + " is unhashable")
except TypeError:
pass
class C1(object):
pass
c1=C1()
class C2:
pass
c2=C2()
#positive cases
for stuff in [
(), (None),
(-1), (0), (1), (2),
(1, 2), (1, 2, 3),
range(3), 1j, object, self.test_cp35348,
(range(3)), (1j), (object), (self.test_cp35348),
(()), ((())), c1, c2,
]:
self.assertFalse(stuff in empty)
self.assertFalse(stuff in emptied)
self.assertFalse(stuff in not_empty)
for stuff in [
(), (None),
(-1), (0), (1), (2),
(1, 2), (1, 2, 3),
range(3), 1j, object, self.test_cp35348,
(range(3)), (1j), (object), (self.test_cp35348),
(()), ((())), c1, c2,
]:
emptied[stuff] = 'test_cp35348'
self.assertTrue(stuff in emptied)
del emptied[stuff]
self.assertEqual(len(empty), 0)
not_empty[stuff] = 'test_cp35348'
self.assertTrue(stuff in not_empty)
del not_empty[stuff]
self.assertEqual(len(not_empty), 1)
def test_cp35667(self):
try:
self.assertFalse(type([]) in {})
self.assertFalse(type({}) in {})
d = {list:1, dict:2}
self.assertTrue(list in d)
self.assertTrue(dict in d)
except Exception as ex:
self.assertTrue(False, "unexpected exception: %s" % ex)
def test_comparison_operators(self):
x = {2:3}
y = {2:4}
for oper in ('__lt__', '__gt__', '__le__', '__ge__'):
for data in (y, None, 1, 1.0, long(1), (), [], 1j, "abc"):
self.assertEqual(getattr(x, oper)(data), NotImplemented)
def test_cp16519(self):
__main__ = __import__(__name__)
__main__.Dict = {"1": "a"}
self.assertEqual(__main__.Dict["1"], "a")
del __main__.Dict
import sys
sys.Dict = {"1": "b"}
self.assertEqual(sys.Dict["1"], "b")
del sys.Dict
with path_modifier(os.path.join(source_root(), 'Tests')):
import testpkg1
testpkg1.Dict = {"1": "c"}
self.assertEqual(testpkg1.Dict["1"], "c")
del testpkg1.Dict
def test_dict_equality_lookup(self):
"""dictionaries check object equality before running normal equality"""
class x(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
def __hash__(self):
return 0
a = x()
d = {}
d[a] = 42
self.assertEqual(d[a], 42)
def test_missing(self):
class Foo(dict):
def __missing__(self, key):
raise TypeError('Foo.__missing__ should not be called')
f = Foo()
self.assertEqual(f.setdefault(1, 2), 2)
self.assertEqual(f.get(2), None)
self.assertEqual(f.get(2, 3), 3)
self.assertRaises(KeyError, f.pop, 3)
self.assertEqual(f.pop(3, 4), 4)
x = {2:3}
for f in (Foo({'abc':3}), Foo()):
self.assertTrue(x != f)
self.assertTrue(f != x)
self.assertEqual(x.__eq__(f), False)
self.assertEqual(f.__eq__(x), False)
def test_cp29914(self):
self.assertEqual(dict(o=42), {'o':42})
def test_cp32527(self):
'''test for duplicate key in dict under specific hash value conditions'''
d = {'1': 1, '2': 1, '3': 1, 'a7': 1, 'a8': 1}
#d now has 7 buckets internally, and computed hash for a7 and a8 keys will land on same starting bucket index
#recycle the a7 bucket
d.pop('a7')
#attempt to update the a8 bucket, which now comes after the recycled a7
d['a8'] = 5
#if working properly, there will now be a recycled bucket (former home of a7) and a single a8 bucket
#if not working properly, there will instead be two a8 buckets
expected = 1
actual = list(d.keys()).count('a8')
self.assertEqual(actual, expected)
@skipUnlessIronPython()
def test_cp34770(self):
# Entries added with Int64/UInt64 should be findable with Python long
from System import Int64, UInt64
i64 = Int64(1110766100758387874)
u64 = UInt64(9223372036854775808)
m = {}
m[i64] = 'a'
self.assertEqual(m[long(1110766100758387874)], 'a')
m[u64] = 'b'
self.assertEqual(m[long(9223372036854775808)], 'b')
run_test(__name__)
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
#Regression: CodePlex 15715
#Do not move or remove these two lines
x = dir(dict)
x = dir(dict.fromkeys)
import collections
import os
import unittest
import sys
from iptest import IronPythonTestCase, is_cli, path_modifier, run_test, skipUnlessIronPython, source_root
class DictTest(IronPythonTestCase):
def test_sanity(self):
items = 0
d = {'key1': 'value1', 'key2': 'value2'}
for key, value in d.items():
items += 1
self.assertTrue((key, value) == ('key1', 'value1') or (key,value) == ('key2', 'value2'))
self.assertTrue(items == 2)
self.assertTrue(d["key1"] == "value1")
self.assertTrue(d["key2"] == "value2")
def getitem(d,k):
d[k]
self.assertRaises(KeyError, getitem, d, "key3")
x = d.get("key3")
self.assertTrue(x == None)
self.assertTrue(d["key1"] == d.get("key1"))
self.assertTrue(d["key2"] == d.get("key2"))
self.assertTrue(d.get("key3", "value3") == "value3")
self.assertRaises(KeyError, getitem, d, "key3")
self.assertTrue(d.setdefault("key3") == None)
self.assertTrue(d.setdefault("key4", "value4") == "value4")
self.assertTrue(d["key3"] == None)
self.assertTrue(d["key4"] == "value4")
d2= dict(key1 = 'value1', key2 = 'value2')
self.assertTrue(d2['key1'] == 'value1')
def test_dict_inherit(self):
class MyDict(dict):
def __setitem__(self, *args):
super(MyDict, self).__setitem__(*args)
a = MyDict()
with self.assertRaises(SystemError): # TODO: remove assertRaises when https://github.com/IronLanguages/ironpython3/issues/456 is fixed
a[0] = 'abc'
self.assertEqual(a[0], 'abc')
with self.assertRaises(SystemError): # TODO: remove assertRaises when https://github.com/IronLanguages/ironpython3/issues/456 is fixed
a[None] = 3
self.assertEqual(a[None], 3)
class MyDict(dict):
def __setitem__(self, *args):
dict.__setitem__(self, *args)
a = MyDict()
a[0] = 'abc'
self.assertEqual(a[0], 'abc')
a[None] = 3
self.assertEqual(a[None], 3)
def test_function_environments(self):
"""verify function environments, FieldIdDict, custom old class dict, and module environments all local identical to normal dictionaries"""
x = type(type.__dict__)({})
class C: pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
xx = 'abc'
yy = 'def'
pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
x4 = 'abc'
x5 = 'def'
x6 = 'aaa'
x7 = 'aaa'
x0 = 'abc'
pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
x4 = 'abc'
x5 = 'def'
x6 = 'aaa'
x7 = 'aaa'
x0 = 'abc'
x10 = 'abc'
x11 = 'def'
x12 = 'aaa'
x13 = 'aaa'
x14 = 'abc'
x15 = 'def'
x16 = 'aaa'
x17 = 'aaa'
x10 = 'abc'
pass
self.assertEqual(dir(x), dir(C.__dict__))
class C:
x0 = 'abc'
x1 = 'def'
x2 = 'aaa'
x3 = 'aaa'
x4 = 'abc'
x5 = 'def'
x6 = 'aaa'
x7 = 'aaa'
x0 = 'abc'
x10 = 'abc'
x11 = 'def'
x12 = 'aaa'
x13 = 'aaa'
x14 = 'abc'
x15 = 'def'
x16 = 'aaa'
x17 = 'aaa'
x10 = 'abc'
x20 = 'abc'
x21 = 'def'
x22 = 'aaa'
x23 = 'aaa'
x24 = 'abc'
x25 = 'def'
x26 = 'aaa'
x27 = 'aaa'
x20 = 'abc'
x110 = 'abc'
x111 = 'def'
x112 = 'aaa'
x113 = 'aaa'
x114 = 'abc'
x115 = 'def'
x116 = 'aaa'
x117 = 'aaa'
x110 = 'abc'
pass
self.assertEqual(dir(x), dir(C.__dict__))
x = {}
a = C()
self.assertEqual(dir(x), dir(a.__dict__))
a = C()
a.abc = 'def'
a.ghi = 'def'
self.assertEqual(dir(x), dir(a.__dict__))
#####################################################################
## coverage for CustomFieldIdDict
def contains(self, d, *attrs):
for attr in attrs:
self.assertTrue(attr in d, "didn't find " + str(attr) + " in " + repr(d))
self.assertTrue(d.__contains__(attr), "didn't find " + str(attr) + " in " + repr(d))
def repeat_on_class(self, C):
newStyle = "__class__" in dir(C)
c = C()
d = C.__dict__
self.contains(d, '__doc__', 'x1', 'f1')
## recursive entries & repr
C.abc = d
if not newStyle:
x = repr(d) # shouldn't stack overflow
else:
x = str(d)
self.assertTrue(x.find("'abc'") != -1)
if not newStyle:
self.assertTrue(x.find("{...}") != -1)
else:
self.assertTrue(x.find("'abc': <mappingproxy") != -1)
del C.abc
keys, values = d.keys(), d.values()
self.assertEqual(len(keys), len(values))
self.contains(keys, '__doc__', 'x1', 'f1')
## initial length
l = len(d)
self.assertTrue(l > 3)
# add more attributes
def f2(self): return 22
def f3(self): return 33
if not newStyle:
d['f2'] = f2
d['x2'] = 20
self.assertEqual(len(d), l + 2)
self.assertEqual(d.__len__(), l + 2)
if not newStyle:
self.contains(d, '__doc__', 'x1', 'x2', 'f1', 'f2')
self.contains(d.keys(), '__doc__', 'x1', 'x2', 'f1', 'f2')
else:
self.contains(d, '__doc__', 'x1', 'f1')
self.contains(d.keys(), '__doc__', 'x1', 'f1')
self.assertEqual(d['x1'], 10)
if not newStyle:
self.assertEqual(d['x2'], 20)
self.assertEqual(d['f1'](c), 11)
if not newStyle:
self.assertEqual(d['f2'](c), 22)
self.assertRaises(KeyError, lambda : d['x3'])
self.assertRaises(KeyError, lambda : d['f3'])
## get
self.assertEqual(d.get('x1'), 10)
if not newStyle:
self.assertEqual(d.get('x2'), 20)
self.assertEqual(d.get('f1')(c), 11)
if not newStyle:
self.assertEqual(d.get('f2')(c), 22)
self.assertEqual(d.get('x3'), None)
self.assertEqual(d.get('x3', 30), 30)
self.assertEqual(d.get('f3'), None)
self.assertEqual(d.get('f3', f3)(c), 33)
if not newStyle:
## setdefault
self.assertEqual(d.setdefault('x1'), 10)
self.assertEqual(d.setdefault('x1', 30), 10)
self.assertEqual(d.setdefault('f1')(c), 11)
self.assertEqual(d.setdefault('f1', f3)(c), 11)
self.assertEqual(d.setdefault('x2'), 20)
self.assertEqual(d.setdefault('x2', 30), 20)
self.assertEqual(d.setdefault('f2')(c), 22)
self.assertEqual(d.setdefault('f2', f3)(c), 22)
self.assertEqual(d.setdefault('x3', 30), 30)
self.assertEqual(d.setdefault('f3', f3)(c), 33)
if not newStyle:
## pop
l1 = len(d)
self.assertEqual(d.pop('x1', 30), 10)
self.assertEqual(len(d), l1-1)
l1 = len(d)
self.assertEqual(d.pop('x2', 30), 20)
self.assertEqual(len(d), l1-1)
l1 = len(d)
self.assertEqual(d.pop("xx", 70), 70)
self.assertEqual(len(d), l1)
## in
self.assertTrue('f1' in d)
if not newStyle:
self.assertTrue('f2' in d)
self.assertTrue('f3' in d)
self.assertTrue('fx' not in d)
# subclassing, overriding __getitem__, and passing to
# eval
dictType = type(d)
try:
class newDict(dictType):
def __getitem__(self, key):
if key == 'abc':
return 'def'
return super(self, dictType).__getitem__(key)
except TypeError as ex:
if not newStyle:
self.assertTrue(ex.message.find('cannot derive from sealed or value types') != -1, ex.message)
else:
self.assertTrue(ex.message.find('Error when calling the metaclass bases') != -1, ex.message)
else:
try:
nd = newDict()
except TypeError as e:
if is_cli:
import clr
if clr.GetClrType(dictType).ToString() == 'IronPython.Runtime.Types.NamespaceDictionary':
self.fail("Error! Threw TypeError when creating newDict deriving from NamespaceDictionary")
else:
self.assertEqual(eval('abc', {}, nd), 'def')
############### IN THIS POINT, d LOOKS LIKE ###############
## {'f1': f1, 'f2': f2, 'f3': f3, 'x3': 30, '__doc__': 'This is comment', '__module__': '??'}
## iteritems
lk = []
for (k, v) in d.items():
lk.append(k)
exp = None
if k == 'f1': exp = 11
elif k == 'f2': exp == 22
elif k == 'f3': exp == 33
if exp is not None:
self.assertEqual(v(c), exp)
if not newStyle:
self.contains(lk, 'f1', 'f2', 'f3', 'x3', '__doc__')
else:
self.contains(lk, 'f1', '__module__', '__dict__', 'x1', '__weakref__', '__doc__')
# iterkeys
lk = []
for k in d.keys():
lk.append(k)
if not newStyle:
self.contains(lk, 'f1', 'f2', 'f3', 'x3', '__doc__')
else:
self.contains(lk, 'f1', '__module__', '__dict__', 'x1', '__weakref__', '__doc__')
# itervalues
for v in d.values():
if callable(v):
exp = v(c)
self.assertTrue(exp in [11, 22, 33])
elif v is str:
self.assertTrue(v == 'This is comment')
elif v is int:
self.assertTrue(v == 30)
if not newStyle:
## something fun before destorying it
l1 = len(d)
d[dict] = 3 # object as key
self.assertEqual(len(d), l1+1)
l1 = len(d)
d[int] = 4 # object as key
if is_cli:
print("CodePlex 16811")
return
self.assertEqual(len(d), l1+1)
l1 = len(d)
del d[int]
self.assertEqual(len(d), l1-1)
l1 = len(d)
del d[dict]
self.assertEqual(len(d), l1-1)
l1 = len(d)
del d['x3']
self.assertEqual(len(d), l1-1)
l1 = len(d)
d.popitem()
self.assertEqual(len(d), l1-1)
## object as key
d[int] = int
d[str] = "str"
self.assertEqual(d[int], int)
self.assertEqual(d[str], "str")
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(d.__len__(), 0)
def test_customfieldiddict_old(self):
class C:
'''This is comment'''
x1 = 10
def f1(self): return 11
self.repeat_on_class(C)
def test_customfieldiddict_new(self):
class C(object):
'''This is comment'''
x1 = 10
def f1(self): return 11
self.repeat_on_class(C)
def test_customfieldiddict_fromkeys(self):
def new_repeat_on_class(C):
d1 = C.__dict__
l1 = len(d1)
d2 = dict.fromkeys(d1)
l2 = len(d2)
self.assertEqual(l1, l2)
self.assertEqual(d2['x'], None)
self.assertEqual(d2['f'], None)
d2 = dict.fromkeys(d1, 10)
l2 = len(d2)
self.assertEqual(l1, l2)
self.assertEqual(d2['x'], 10)
self.assertEqual(d2['f'], 10)
class C:
x = 10
def f(self): pass
new_repeat_on_class(C)
class C(object):
x = 10
def f(self): pass
new_repeat_on_class(C)
def test_customfieldiddict_compare(self):
def new_repeat_on_class(C1, C2):
d1 = C1.__dict__
d2 = C2.__dict__
# object as key
self.assertTrue(d1 != d2)
self.assertTrue([x for x in d1] == [x for x in d2])
class C1:
x = 10
def f(self): pass
class C2:
x = 10
def f(self): pass
new_repeat_on_class(C1, C2)
def t_func():
class C1(object):
x = 10
def f(self): pass
C1.__dict__[1] = 2
self.assertRaises(TypeError, t_func)
@skipUnlessIronPython()
def test_dict_to_idict(self):
"""verify dicts can be converted to IDictionaries"""
self.load_iron_python_test()
from IronPythonTest import DictConversion
class MyDict(dict): pass
class KOld: pass
class KNew(object): pass
class KOldDerived(KOld): pass
class KNewDerived(KNew): pass
test_dicts = [
{},
{1:100},
{None:None},
{object:object},
{1:100, 2:200},
{1:100, 2:200, 3:300, 4:400},
MyDict.__dict__,
KOld.__dict__,
KNew.__dict__,
KOldDerived.__dict__,
KNewDerived.__dict__,
]
for temp_dict in test_dicts:
expected = list(temp_dict.keys()) + list(temp_dict.values())
expected.sort()
to_idict = list(DictConversion.ToIDictionary(temp_dict))
to_idict.sort()
self.assertEqual(to_idict, expected)
to_idict = list(DictConversion.ToIDictionary(MyDict(temp_dict)))
to_idict.sort()
self.assertEqual(to_idict, expected)
def test_fieldiddict(self):
"""coverage for FieldIdDict"""
def func(): pass
d = func.__dict__
d['x1'] = 10
d['f1'] = lambda : 11
d[int] = "int"
d[dict] = {2:20}
keys, values = d.keys(), d.values()
self.assertEqual(len(keys), len(values))
self.contains(keys, 'x1', 'f1', int, dict)
## initial length
l = len(d)
self.assertTrue(l == 4)
# add more attributes
d['x2'] = 20
d['f2'] = lambda x: 22
self.assertEqual(len(d), l + 2)
self.assertEqual(d.__len__(), l + 2)
self.contains(d, 'x1', 'x2', 'f1', 'f2', int, dict)
self.contains(d.keys(), 'x1', 'x2', 'f1', 'f2', int, dict)
self.assertEqual(d['x1'], 10)
self.assertEqual(d['x2'], 20)
self.assertEqual(d['f1'](), 11)
self.assertEqual(d['f2'](9), 22)
self.assertRaises(KeyError, lambda : d['x3'])
self.assertRaises(KeyError, lambda : d['f3'])
## get
self.assertEqual(d.get('x1'), 10)
self.assertEqual(d.get('x2'), 20)
self.assertEqual(d.get('f1')(), 11)
self.assertEqual(d.get('f2')(1), 22)
def f3(): return 33
self.assertEqual(d.get('x3'), None)
self.assertEqual(d.get('x3', 30), 30)
self.assertEqual(d.get('f3'), None)
self.assertEqual(d.get('f3', f3)(), 33)
## setdefault
self.assertEqual(d.setdefault('x1'), 10)
self.assertEqual(d.setdefault('x1', 30), 10)
self.assertEqual(d.setdefault('f1')(), 11)
self.assertEqual(d.setdefault('f1', f3)(), 11)
self.assertEqual(d.setdefault('x2'), 20)
self.assertEqual(d.setdefault('x2', 30), 20)
self.assertEqual(d.setdefault('f2')(1), 22)
self.assertEqual(d.setdefault('f2', f3)(1), 22)
self.assertEqual(d.setdefault('x3', 30), 30)
self.assertEqual(d.setdefault('f3', f3)(), 33)
## pop
l1 = len(d); self.assertEqual(d.pop('x1', 30), 10)
self.assertEqual(len(d), l1-1)
l1 = len(d); self.assertEqual(d.pop('x2', 30), 20)
self.assertEqual(len(d), l1-1)
l1 = len(d); self.assertEqual(d.pop(int, 70), "int")
self.assertEqual(len(d), l1-1)
l1 = len(d); self.assertEqual(d.pop("xx", 70), 70)
self.assertEqual(len(d), l1)
## in
self.assertTrue('f1' in d)
self.assertTrue('f2' in d)
self.assertTrue('f3' in d)
self.assertTrue(dict in d)
self.assertTrue('fx' not in d)
############### IN THIS POINT, d LOOKS LIKE ###############
# f1, f2, f3, x3, dict as keys
## iteritems
lk = []
for (k, v) in d.items():
lk.append(k)
if k == 'f1': self.assertEqual(v(), 11)
elif k == 'f2': self.assertEqual(v(1), 22)
elif k == 'f3': self.assertEqual(v(), 33)
elif k == 'x3': self.assertEqual(v, 30)
elif k == dict: self.assertEqual(v, {2:20})
self.contains(lk, 'f1', 'f2', 'f3', 'x3', dict)
# iterkeys
lk = []
for k in d.keys():
lk.append(k)
self.contains(lk, 'f1', 'f2', 'f3', 'x3', dict)
# itervalues
for v in d.values():
if callable(v):
try: exp = v(1)
except: pass
try: exp = v()
except: pass
self.assertTrue(exp in [11, 22, 33])
elif v is dict:
self.assertTrue(v == {2:20})
elif v is int:
self.assertTrue(v == 30)
## something fun before destorying it
l1 = len(d); d[int] = 4 # object as key
self.assertEqual(len(d), l1+1)
l1 = len(d); del d[int]
self.assertEqual(len(d), l1-1)
l1 = len(d); del d[dict]
self.assertEqual(len(d), l1-1)
l1 = len(d); del d['x3']
self.assertEqual(len(d), l1-1)
l1 = len(d); popped_item = d.popitem()
self.assertEqual(len(d), l1-1)
## object as key
d[int] = int
d[str] = "str"
self.assertEqual(d[int], int)
self.assertEqual(d[str], "str")
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(d.__len__(), 0)
d[int] = int
self.assertEqual(len(d), 1)
## comparison
def func1(): pass
def func2(): pass
d1 = func1.__dict__
d2 = func2.__dict__
d1['x'] = 10
d2['x'] = 30
d1[int] = int
d2[int] = int
# object as key
self.assertTrue(d1 != d2)
d2['x'] = 10
self.assertTrue(d1 == d2)
def test_subclass_dict_override__init__(self):
"""subclassing dict, overriding __init__"""
class foo(dict):
def __init__(self, abc):
self.abc = abc
a = foo('abc')
self.assertEqual(a.abc, 'abc')
# make sure dict.__init__ works
a = {}
a.__init__({'abc':'def'})
self.assertEqual(a, {'abc':'def'})
a.__init__({'abcd':'defg'})
self.assertEqual(a, {'abc':'def', 'abcd':'defg'})
# keyword arg contruction
# single kw-arg, should go into dict
a = dict(b=2)
self.assertEqual(a, {'b':2})
# dict value to init, Plus kw-arg
a = dict({'a':3}, b=2)
self.assertEqual(a, {'a':3, 'b':2})
# more than one
a = dict({'a':3}, b=2, c=5)
self.assertEqual(a, {'a':3, 'b':2, 'c':5})
try:
dict({'a':3}, {'b':2}, c=5)
self.fail('Should not reach this code')
except TypeError: pass
@skipUnlessIronPython()
def test_DictionaryUnionEnumerator(self):
class C(object): pass
c = C()
d = c.__dict__
import System
# Check empty enumerator
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
# Add non-string attribute
d[1] = 100
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), True)
self.assertEqual(e.Key, 1)
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
# Add string attribute
c.attr = 100
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), True)
key1 = e.Key
self.assertEqual(e.MoveNext(), True)
key2 = e.Key
self.assertEqual((key1, key2) == (1, "attr") or (key1, key2) == ("attr", 1), True)
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
# Remove non-string attribute
del d[1]
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), True)
self.assertEqual(e.Key, "attr")
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
# Remove string attribute and check empty enumerator
del c.attr
e = System.Collections.IDictionary.GetEnumerator(d)
self.assertRaises(SystemError, getattr, e, "Key")
self.assertEqual(e.MoveNext(), False)
self.assertRaises(SystemError, getattr, e, "Key")
def test_same_but_different(self):
"""Test case checks that when two values who are logically different but share hash code & equality result in only a single entry"""
self.assertEqual({-10:0, long(-10):1}, {-10:1})
def test_module_dict(self):
me = sys.modules[__name__]
moduleDict = me.__dict__
self.assertTrue(isinstance(moduleDict, collections.Mapping))
self.assertTrue(moduleDict.__contains__("DictTest"))
self.assertEqual(moduleDict["DictTest"], DictTest)
self.assertTrue(moduleDict.keys().__contains__("DictTest"))
def test_eval_locals_simple(self):
class Locals(dict):
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError as e:
return 'abc'
locs = Locals()
self.assertEqual(eval("unknownvariable", globals(), locs), 'abc')
def test_key_error(self):
class c: pass
class d(object): pass
for key in ['abc', 1, c(), d(), 1.0, long(1)]:
try:
{}[key]
except KeyError as e:
self.assertEqual(e.args[0], key)
try:
del {}[key]
except KeyError as e:
self.assertEqual(e.args[0], key)
try:
set([]).remove(key)
except KeyError as e:
self.assertEqual(e.args[0], key)
def test_contains(self):
class ContainsDict(dict):
was_called = False
def __contains__(self, key):
ContainsDict.was_called = True
return dict.__contains__(self, key)
md = ContainsDict()
md["stuff"] = 1
self.assertEqual(ContainsDict.was_called, False)
self.assertEqual("nothing" in md, False)
self.assertEqual("stuff" in md, True)
self.assertEqual(ContainsDict.was_called, True)
def test_stdtypes_dict(self):
temp_types = [ int,
long,
float,
complex,
bool,
bytes,
str,
list,
tuple,
range,
dict,
set,
frozenset,
type,
object,
] #+ [eval("types." + x) for x in dir(types) if x.endswith("Type")]
temp_keys = [ None, -1, 0, 1, 2.34, "", "None", int, object, self.test_stdtypes_dict, [], (None,)]
for temp_type in temp_types:
for temp_key in temp_keys:
def tFunc(): temp_type.__dict__[temp_key] = 0
self.assertRaises(TypeError, tFunc)
def test_main_dict(self):
import __main__
#just make sure this doesn't throw...
t_list = []
for w in __main__.__dict__: t_list.append(w)
t_list.sort()
g_list = list(globals().keys())
g_list.sort()
self.assertEqual(t_list, g_list)
def test_update(self):
test_cases = (
#N changes with an empty dict
({}, (), {}, {}),
({}, ({'k':'v'},), {}, {'k':'v'}),
({}, (), {'k':'v'}, {'k':'v'}),
({}, ({'k':'v', 'x':'y'},), {}, {'k':'v', 'x':'y'}),
({}, (), {'k':'v', 'x':'y'}, {'k':'v', 'x':'y'}),
({}, ({'k':'v'},), {'x':'y'}, {'k':'v', 'x':'y'}),
#N changes with one pre-existing dict element
({'a':'b'}, (), {}, {'a':'b'}),
({'a':'b'}, ({'k':'v'},), {}, {'a':'b', 'k':'v'}),
({'a':'b'}, (), {'k':'v'}, {'a':'b', 'k':'v'}),
({'a':'b'}, ({'k':'v', 'x':'y'},), {}, {'a':'b', 'k':'v', 'x':'y'}),
({'a':'b'}, (), {'k':'v', 'x':'y'}, {'a':'b', 'k':'v', 'x':'y'}),
({'a':'b'}, ({'k':'v'},), {'x':'y'}, {'a':'b', 'k':'v', 'x':'y'}),
#N changes with one pre-existing dict element
({'a':'b', 'c':'d'}, (), {}, {'a':'b', 'c':'d'}),
({'a':'b', 'c':'d'}, ({'k':'v'},), {}, {'a':'b', 'c':'d', 'k':'v'}),
({'a':'b', 'c':'d'}, (), {'k':'v'}, {'a':'b', 'c':'d', 'k':'v'}),
({'a':'b', 'c':'d'}, ({'k':'v', 'x':'y'},), {}, {'a':'b', 'c':'d', 'k':'v', 'x':'y'}),
({'a':'b', 'c':'d'}, (), {'k':'v', 'x':'y'}, {'a':'b', 'c':'d', 'k':'v', 'x':'y'}),
({'a':'b', 'c':'d'}, ({'k':'v'},), {'x':'y'}, {'a':'b', 'c':'d', 'k':'v', 'x':'y'}),
)
for start_dict, dict_param, kw_params, expected in test_cases:
try:
start_dict.update(*dict_param, **kw_params)
except Exception as e:
print("ERROR:", start_dict, ".update(*", dict_param, ", **", kw_params, ") failed!")
raise e
self.assertEqual(start_dict, expected)
def test_update_argnames(self):
expected = {"b": 1}
result = {}
result.update(b=1)
self.assertEqual(result, expected)
expected = {"other": 1}
result = {}
result.update(other=1)
self.assertEqual(result, expected)
expected = {"other": 1, "otherArgs": 2}
result = {}
result.update({"other": 1}, otherArgs=2)
self.assertEqual(result, expected)
def test_update_no_setitem(self):
# update doesn't call __setitem__
class mydict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.setcalled = False
def __setitem__(self, index, value):
self.setcalled = True
raise Exception()
d = mydict()
d.update(mydict(abc=2))
self.assertEqual(d.setcalled, False)
d.update({'foo': 2})
self.assertEqual(d.setcalled, False)
def test_keys_not_as_property(self):
def f():
mapping = { 10: 10}
for k in mapping.keys: pass
if is_cli:
self.assertRaisesMessage(TypeError,
"iteration over non-sequence of type builtin_function_or_method",
f)
else:
self.assertRaisesMessage(TypeError,
"'builtin_function_or_method' object is not iterable",
f)
def test_dict_class_dictionary(self):
class KOld:
KLASS_MEMBER = 3.14
def aFunc(): pass
def aMethod(self): pass
class KNew(object):
KLASS_MEMBER = 3.14
def aFunc(): pass
def aMethod(self): pass
for K in [KOld, KNew]:
temp_dict = dict(K.__dict__)
#class member has the correct value?
self.assertEqual(K.__dict__["KLASS_MEMBER"], 3.14)
self.assertEqual(temp_dict["KLASS_MEMBER"], 3.14)
#methods show up?
for func_name in ["aFunc", "aMethod"]:
self.assertTrue(func_name in K.__dict__.keys())
self.assertTrue(func_name in temp_dict.keys())
expected_keys = [ '__module__', 'KLASS_MEMBER', 'aFunc', 'aMethod',
'__dict__',
'__weakref__', '__doc__']
for expected_key in expected_keys:
self.assertTrue(expected_key in KNew.__dict__, expected_key)
self.assertTrue(expected_key in temp_dict, expected_key)
def test_cp15882(self):
x = {}
#negative cases
for bad_stuff in [
[1],
{}, {1:1}, {(1,2): 1},
set()]:
try:
x[bad_stuff] = 1
self.fail(str(bad_stuff) + " is unhashable")
except TypeError:
self.assertEqual(x, {})
#positive cases
for stuff in [
(), (None),
(-1), (0), (1), (2),
(1, 2), (1, 2, 3),
range(3), 1j, object, self.test_cp15882,
(range(3)), (1j), (object), (self.test_cp15882),
(()), ((())),
]:
for i in range(2):
x[stuff] = 1
self.assertEqual(x[stuff], 1)
del x[stuff]
self.assertEqual(x, {})
self.assertRaises(KeyError, x.__delitem__, stuff)
for i in range(2):
x[stuff] = 1
self.assertEqual(x[stuff], 1)
x.__delitem__(stuff)
self.assertEqual(x, {})
self.assertRaises(KeyError, x.__delitem__, stuff)
def test_cp35348(self):
empty = {} # underlying type: EmptyDictionaryStorage
emptied = {1:1} # underlying type: CommonDictionaryStorage
del emptied[1]
not_empty = {42:1}
#negative cases
for bad_stuff in [
[1],
{}, {1:1}, {(1,2): 1},
set()]:
try:
dummy = bad_stuff in empty
self.fail(str(bad_stuff) + " is unhashable")
except TypeError:
pass
try:
dummy = bad_stuff in emptied
self.fail(str(bad_stuff) + " is unhashable")
except TypeError:
pass
try:
dummy = bad_stuff in not_empty
self.fail(str(bad_stuff) + " is unhashable")
except TypeError:
pass
class C1(object):
pass
c1=C1()
class C2:
pass
c2=C2()
#positive cases
for stuff in [
(), (None),
(-1), (0), (1), (2),
(1, 2), (1, 2, 3),
range(3), 1j, object, self.test_cp35348,
(range(3)), (1j), (object), (self.test_cp35348),
(()), ((())), c1, c2,
]:
self.assertFalse(stuff in empty)
self.assertFalse(stuff in emptied)
self.assertFalse(stuff in not_empty)
for stuff in [
(), (None),
(-1), (0), (1), (2),
(1, 2), (1, 2, 3),
range(3), 1j, object, self.test_cp35348,
(range(3)), (1j), (object), (self.test_cp35348),
(()), ((())), c1, c2,
]:
emptied[stuff] = 'test_cp35348'
self.assertTrue(stuff in emptied)
del emptied[stuff]
self.assertEqual(len(empty), 0)
not_empty[stuff] = 'test_cp35348'
self.assertTrue(stuff in not_empty)
del not_empty[stuff]
self.assertEqual(len(not_empty), 1)
def test_cp35667(self):
try:
self.assertFalse(type([]) in {})
self.assertFalse(type({}) in {})
d = {list:1, dict:2}
self.assertTrue(list in d)
self.assertTrue(dict in d)
except Exception as ex:
self.assertTrue(False, "unexpected exception: %s" % ex)
def test_comparison_operators(self):
x = {2:3}
y = {2:4}
for oper in ('__lt__', '__gt__', '__le__', '__ge__'):
for data in (y, None, 1, 1.0, long(1), (), [], 1j, "abc"):
self.assertEqual(getattr(x, oper)(data), NotImplemented)
def test_cp16519(self):
__main__ = __import__(__name__)
__main__.Dict = {"1": "a"}
self.assertEqual(__main__.Dict["1"], "a")
del __main__.Dict
import sys
sys.Dict = {"1": "b"}
self.assertEqual(sys.Dict["1"], "b")
del sys.Dict
with path_modifier(os.path.join(source_root(), 'Tests')):
import testpkg1
testpkg1.Dict = {"1": "c"}
self.assertEqual(testpkg1.Dict["1"], "c")
del testpkg1.Dict
def test_dict_equality_lookup(self):
"""dictionaries check object equality before running normal equality"""
class x(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
def __hash__(self):
return 0
a = x()
d = {}
d[a] = 42
self.assertEqual(d[a], 42)
def test_missing(self):
class Foo(dict):
def __missing__(self, key):
raise TypeError('Foo.__missing__ should not be called')
f = Foo()
self.assertEqual(f.setdefault(1, 2), 2)
self.assertEqual(f.get(2), None)
self.assertEqual(f.get(2, 3), 3)
self.assertRaises(KeyError, f.pop, 3)
self.assertEqual(f.pop(3, 4), 4)
x = {2:3}
for f in (Foo({'abc':3}), Foo()):
self.assertTrue(x != f)
self.assertTrue(f != x)
self.assertEqual(x.__eq__(f), False)
self.assertEqual(f.__eq__(x), False)
def test_cp29914(self):
self.assertEqual(dict(o=42), {'o':42})
def test_cp32527(self):
'''test for duplicate key in dict under specific hash value conditions'''
d = {'1': 1, '2': 1, '3': 1, 'a7': 1, 'a8': 1}
#d now has 7 buckets internally, and computed hash for a7 and a8 keys will land on same starting bucket index
#recycle the a7 bucket
d.pop('a7')
#attempt to update the a8 bucket, which now comes after the recycled a7
d['a8'] = 5
#if working properly, there will now be a recycled bucket (former home of a7) and a single a8 bucket
#if not working properly, there will instead be two a8 buckets
expected = 1
actual = list(d.keys()).count('a8')
self.assertEqual(actual, expected)
@skipUnlessIronPython()
def test_cp34770(self):
# Entries added with Int64/UInt64 should be findable with Python long
from System import Int64, UInt64
i64 = Int64(1110766100758387874)
u64 = UInt64(9223372036854775808)
m = {}
m[i64] = 'a'
self.assertEqual(m[long(1110766100758387874)], 'a')
m[u64] = 'b'
self.assertEqual(m[long(9223372036854775808)], 'b')
run_test(__name__)
|
en
| 0.724389
|
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information. #Regression: CodePlex 15715 #Do not move or remove these two lines # TODO: remove assertRaises when https://github.com/IronLanguages/ironpython3/issues/456 is fixed # TODO: remove assertRaises when https://github.com/IronLanguages/ironpython3/issues/456 is fixed verify function environments, FieldIdDict, custom old class dict, and module environments all local identical to normal dictionaries ##################################################################### ## coverage for CustomFieldIdDict ## recursive entries & repr # shouldn't stack overflow ## initial length # add more attributes ## get ## setdefault ## pop ## in # subclassing, overriding __getitem__, and passing to # eval ############### IN THIS POINT, d LOOKS LIKE ############### ## {'f1': f1, 'f2': f2, 'f3': f3, 'x3': 30, '__doc__': 'This is comment', '__module__': '??'} ## iteritems # iterkeys # itervalues ## something fun before destorying it # object as key # object as key ## object as key This is comment This is comment # object as key verify dicts can be converted to IDictionaries coverage for FieldIdDict ## initial length # add more attributes ## get ## setdefault ## pop ## in ############### IN THIS POINT, d LOOKS LIKE ############### # f1, f2, f3, x3, dict as keys ## iteritems # iterkeys # itervalues ## something fun before destorying it # object as key ## object as key ## comparison # object as key subclassing dict, overriding __init__ # make sure dict.__init__ works # keyword arg contruction # single kw-arg, should go into dict # dict value to init, Plus kw-arg # more than one # Check empty enumerator # Add non-string attribute # Add string attribute # Remove non-string attribute # Remove string attribute and check empty enumerator Test case checks that when two values who are logically different but share hash code & equality result in only a single entry #+ [eval("types." + x) for x in dir(types) if x.endswith("Type")] #just make sure this doesn't throw... #N changes with an empty dict #N changes with one pre-existing dict element #N changes with one pre-existing dict element # update doesn't call __setitem__ #class member has the correct value? #methods show up? #negative cases #positive cases # underlying type: EmptyDictionaryStorage # underlying type: CommonDictionaryStorage #negative cases #positive cases dictionaries check object equality before running normal equality test for duplicate key in dict under specific hash value conditions #d now has 7 buckets internally, and computed hash for a7 and a8 keys will land on same starting bucket index #recycle the a7 bucket #attempt to update the a8 bucket, which now comes after the recycled a7 #if working properly, there will now be a recycled bucket (former home of a7) and a single a8 bucket #if not working properly, there will instead be two a8 buckets # Entries added with Int64/UInt64 should be findable with Python long
| 2.134592
| 2
|
hooks/tk-multi-setframerange/frame_operations_tk-rumba.py
|
diegogarciahuerta/tk-rumba
| 0
|
6626012
|
<gh_stars>0
# ----------------------------------------------------------------------------
# Copyright (c) 2021, <NAME>.
#
# Your use of this software as distributed in this GitHub repository, is
# governed by the MIT License
#
# Your use of the Shotgun Pipeline Toolkit is governed by the applicable
# license agreement between you and Autodesk / Shotgun.
#
# Read LICENSE and SHOTGUN_LICENSE for full details about the licenses that
# pertain to this software.
# ----------------------------------------------------------------------------
import sgtk
from sgtk import TankError
import rumba
__author__ = "<NAME>"
__contact__ = "https://www.linkedin.com/in/diegogh/"
HookBaseClass = sgtk.get_hook_baseclass()
class FrameOperation(HookBaseClass):
"""
Hook called to perform a frame operation with the
current scene
"""
def get_frame_range(self, **kwargs):
"""
get_frame_range will return a tuple of (in_frame, out_frame)
:returns: Returns the frame range in the form (in_frame, out_frame)
:rtype: tuple[int, int]
"""
current_in = 0
current_out = 0
active_doc = rumba.active_document()
if active_doc:
current_in = active_doc.start_frame.value().as_int()
current_out = active_doc.end_frame.value().as_int()
return (current_in, current_out)
def set_frame_range(self, in_frame=None, out_frame=None, **kwargs):
"""
set_frame_range will set the frame range using `in_frame` and `out_frame`
:param int in_frame: in_frame for the current context
(e.g. the current shot, current asset etc)
:param int out_frame: out_frame for the current context
(e.g. the current shot, current asset etc)
"""
active_doc = rumba.active_document()
if active_doc:
start = int(in_frame)
end = int(out_frame)
rumba.modify_begin("Shotgun Update Frame Range")
active_doc.start_frame.set_value(start)
active_doc.end_frame.set_value(end)
active_doc.range_start_frame.set_value(start)
active_doc.range_end_frame.set_value(end)
rumba.modify_end()
|
# ----------------------------------------------------------------------------
# Copyright (c) 2021, <NAME>.
#
# Your use of this software as distributed in this GitHub repository, is
# governed by the MIT License
#
# Your use of the Shotgun Pipeline Toolkit is governed by the applicable
# license agreement between you and Autodesk / Shotgun.
#
# Read LICENSE and SHOTGUN_LICENSE for full details about the licenses that
# pertain to this software.
# ----------------------------------------------------------------------------
import sgtk
from sgtk import TankError
import rumba
__author__ = "<NAME>"
__contact__ = "https://www.linkedin.com/in/diegogh/"
HookBaseClass = sgtk.get_hook_baseclass()
class FrameOperation(HookBaseClass):
"""
Hook called to perform a frame operation with the
current scene
"""
def get_frame_range(self, **kwargs):
"""
get_frame_range will return a tuple of (in_frame, out_frame)
:returns: Returns the frame range in the form (in_frame, out_frame)
:rtype: tuple[int, int]
"""
current_in = 0
current_out = 0
active_doc = rumba.active_document()
if active_doc:
current_in = active_doc.start_frame.value().as_int()
current_out = active_doc.end_frame.value().as_int()
return (current_in, current_out)
def set_frame_range(self, in_frame=None, out_frame=None, **kwargs):
"""
set_frame_range will set the frame range using `in_frame` and `out_frame`
:param int in_frame: in_frame for the current context
(e.g. the current shot, current asset etc)
:param int out_frame: out_frame for the current context
(e.g. the current shot, current asset etc)
"""
active_doc = rumba.active_document()
if active_doc:
start = int(in_frame)
end = int(out_frame)
rumba.modify_begin("Shotgun Update Frame Range")
active_doc.start_frame.set_value(start)
active_doc.end_frame.set_value(end)
active_doc.range_start_frame.set_value(start)
active_doc.range_end_frame.set_value(end)
rumba.modify_end()
|
en
| 0.675689
|
# ---------------------------------------------------------------------------- # Copyright (c) 2021, <NAME>. # # Your use of this software as distributed in this GitHub repository, is # governed by the MIT License # # Your use of the Shotgun Pipeline Toolkit is governed by the applicable # license agreement between you and Autodesk / Shotgun. # # Read LICENSE and SHOTGUN_LICENSE for full details about the licenses that # pertain to this software. # ---------------------------------------------------------------------------- Hook called to perform a frame operation with the current scene get_frame_range will return a tuple of (in_frame, out_frame) :returns: Returns the frame range in the form (in_frame, out_frame) :rtype: tuple[int, int] set_frame_range will set the frame range using `in_frame` and `out_frame` :param int in_frame: in_frame for the current context (e.g. the current shot, current asset etc) :param int out_frame: out_frame for the current context (e.g. the current shot, current asset etc)
| 2.327222
| 2
|
splink/default_settings.py
|
slobo/splink
| 176
|
6626013
|
import warnings
from pyspark.sql.session import SparkSession
from copy import deepcopy
from .validate import get_default_value_from_schema
from .case_statements import (
_check_jaro_registered,
sql_gen_case_smnt_strict_equality_2,
sql_gen_case_stmt_levenshtein_rel_3,
sql_gen_case_stmt_levenshtein_rel_4,
sql_gen_case_stmt_jaro_3,
sql_gen_case_stmt_jaro_4,
sql_gen_case_stmt_numeric_float_equality_2,
sql_gen_case_stmt_numeric_perc_3,
sql_gen_case_stmt_numeric_perc_4,
_check_no_obvious_problem_with_case_statement,
_add_as_gamma_to_case_statement,
)
def _normalise_prob_list(prob_array: list):
sum_list = sum(prob_array)
return [i / sum_list for i in prob_array]
def _get_default_case_statements_functions(spark):
default_case_stmts = {
"numeric": {},
"string": {},
}
default_case_stmts["numeric"][2] = sql_gen_case_stmt_numeric_float_equality_2
default_case_stmts["numeric"][3] = sql_gen_case_stmt_numeric_perc_3
default_case_stmts["numeric"][4] = sql_gen_case_stmt_numeric_perc_4
jaro_exists = _check_jaro_registered(spark)
if jaro_exists:
default_case_stmts["string"][2] = sql_gen_case_smnt_strict_equality_2
default_case_stmts["string"][3] = sql_gen_case_stmt_jaro_3
default_case_stmts["string"][4] = sql_gen_case_stmt_jaro_4
else:
default_case_stmts["string"][2] = sql_gen_case_smnt_strict_equality_2
default_case_stmts["string"][3] = sql_gen_case_stmt_levenshtein_rel_3
default_case_stmts["string"][4] = sql_gen_case_stmt_levenshtein_rel_4
return default_case_stmts
def _get_default_case_statement_fn(default_statements, data_type, levels):
if data_type not in ["string", "numeric"]:
raise ValueError(
f"No default case statement available for data type {data_type}, "
"please specify a custom case_expression"
)
if levels > 4:
raise ValueError(
f"No default case statement available when levels > 4, "
"please specify a custom 'case_expression' within your settings dictionary"
)
return default_statements[data_type][levels]
def _get_default_probabilities(m_or_u, levels):
if levels > 6:
raise ValueError(
f"No default m and u probabilities available when levels > 6, "
"please specify custom values for 'm_probabilities' and 'u_probabilities' "
"within your settings dictionary"
)
# Note all m and u probabilities are automatically normalised to sum to 1
default_m_u_probabilities = {
"m_probabilities": {
2: _normalise_prob_list([1, 9]),
3: _normalise_prob_list([1, 2, 7]),
4: _normalise_prob_list([1, 1, 1, 7]),
5: _normalise_prob_list([0.33, 0.67, 1, 2, 6]),
6: _normalise_prob_list([0.33, 0.67, 1, 2, 3, 6]),
},
"u_probabilities": {
2: _normalise_prob_list([9, 1]),
3: _normalise_prob_list([7, 2, 1]),
4: _normalise_prob_list([7, 1, 1, 1]),
5: _normalise_prob_list([6, 2, 1, 0.33, 0.67]),
6: _normalise_prob_list([6, 3, 2, 1, 0.33, 0.67]),
},
}
probabilities = default_m_u_probabilities[m_or_u][levels]
return probabilities
def _complete_case_expression(col_settings, spark):
default_case_statements = _get_default_case_statements_functions(spark)
levels = col_settings["num_levels"]
if "custom_name" in col_settings:
col_name_for_case_fn = col_settings["custom_name"]
else:
col_name_for_case_fn = col_settings["col_name"]
if "case_expression" not in col_settings:
data_type = col_settings["data_type"]
case_fn = _get_default_case_statement_fn(
default_case_statements, data_type, levels
)
col_settings["case_expression"] = case_fn(
col_name_for_case_fn, col_name_for_case_fn
)
else:
_check_no_obvious_problem_with_case_statement(col_settings["case_expression"])
old_case_stmt = col_settings["case_expression"]
new_case_stmt = _add_as_gamma_to_case_statement(
old_case_stmt, col_name_for_case_fn
)
col_settings["case_expression"] = new_case_stmt
def _complete_probabilities(col_settings: dict, mu_probabilities: str):
"""
Args:
col_settings (dict): Column settings dictionary
mu_probabilities (str): Either 'm_probabilities' or 'u_probabilities'
"""
if mu_probabilities not in col_settings:
levels = col_settings["num_levels"]
probs = _get_default_probabilities(mu_probabilities, levels)
col_settings[mu_probabilities] = probs
def complete_settings_dict(settings_dict: dict, spark: SparkSession):
"""Auto-populate any missing settings from the settings dictionary using the 'sensible defaults' that
are specified in the json schema (./splink/files/settings_jsonschema.json)
Args:
settings_dict (dict): The settings dictionary
spark: The SparkSession
Returns:
dict: A `splink` settings dictionary with all keys populated.
"""
settings_dict = deepcopy(settings_dict)
# Complete non-column settings from their default values if not exist
non_col_keys = [
"link_type",
"em_convergence",
"source_dataset_column_name",
"unique_id_column_name",
"additional_columns_to_retain",
"retain_matching_columns",
"retain_intermediate_calculation_columns",
"max_iterations",
"proportion_of_matches",
]
for key in non_col_keys:
if key not in settings_dict:
settings_dict[key] = get_default_value_from_schema(
key, is_column_setting=False
)
if "blocking_rules" in settings_dict:
if len(settings_dict["blocking_rules"]) == 0:
warnings.warn(
"You have not specified any blocking rules, meaning all comparisons between the "
"input dataset(s) will be generated and blocking will not be used."
"For large input datasets, this will generally be computationally intractable "
"because it will generate comparisons equal to the number of rows squared."
)
c_cols = settings_dict["comparison_columns"]
for gamma_index, col_settings in enumerate(c_cols):
# Gamma index refers to the position in the comparison vector
# i.e. it's a counter for comparison columns
col_settings["gamma_index"] = gamma_index
# Populate non-existing keys from defaults
keys_for_defaults = [
"num_levels",
"data_type",
"term_frequency_adjustments",
"fix_u_probabilities",
"fix_m_probabilities",
]
for key in keys_for_defaults:
if key not in col_settings:
default = get_default_value_from_schema(key, is_column_setting=True)
col_settings[key] = default
# Doesn't need assignment because we're modify the col_settings dictionary
_complete_case_expression(col_settings, spark)
_complete_probabilities(col_settings, "m_probabilities")
_complete_probabilities(col_settings, "u_probabilities")
return settings_dict
def normalise_probabilities(settings_dict: dict):
"""Normalise all probabilities in a settings dictionary to sum
to one, of possible
Args:
settings_dict (dict): Splink settings dictionary
"""
c_cols = settings_dict["comparison_columns"]
for col_settings in c_cols:
for p in ["m_probabilities", "u_probabilities"]:
if p in col_settings:
if None not in col_settings[p]:
if sum(col_settings[p]) != 0:
col_settings[p] = _normalise_prob_list(col_settings[p])
return settings_dict
|
import warnings
from pyspark.sql.session import SparkSession
from copy import deepcopy
from .validate import get_default_value_from_schema
from .case_statements import (
_check_jaro_registered,
sql_gen_case_smnt_strict_equality_2,
sql_gen_case_stmt_levenshtein_rel_3,
sql_gen_case_stmt_levenshtein_rel_4,
sql_gen_case_stmt_jaro_3,
sql_gen_case_stmt_jaro_4,
sql_gen_case_stmt_numeric_float_equality_2,
sql_gen_case_stmt_numeric_perc_3,
sql_gen_case_stmt_numeric_perc_4,
_check_no_obvious_problem_with_case_statement,
_add_as_gamma_to_case_statement,
)
def _normalise_prob_list(prob_array: list):
sum_list = sum(prob_array)
return [i / sum_list for i in prob_array]
def _get_default_case_statements_functions(spark):
default_case_stmts = {
"numeric": {},
"string": {},
}
default_case_stmts["numeric"][2] = sql_gen_case_stmt_numeric_float_equality_2
default_case_stmts["numeric"][3] = sql_gen_case_stmt_numeric_perc_3
default_case_stmts["numeric"][4] = sql_gen_case_stmt_numeric_perc_4
jaro_exists = _check_jaro_registered(spark)
if jaro_exists:
default_case_stmts["string"][2] = sql_gen_case_smnt_strict_equality_2
default_case_stmts["string"][3] = sql_gen_case_stmt_jaro_3
default_case_stmts["string"][4] = sql_gen_case_stmt_jaro_4
else:
default_case_stmts["string"][2] = sql_gen_case_smnt_strict_equality_2
default_case_stmts["string"][3] = sql_gen_case_stmt_levenshtein_rel_3
default_case_stmts["string"][4] = sql_gen_case_stmt_levenshtein_rel_4
return default_case_stmts
def _get_default_case_statement_fn(default_statements, data_type, levels):
if data_type not in ["string", "numeric"]:
raise ValueError(
f"No default case statement available for data type {data_type}, "
"please specify a custom case_expression"
)
if levels > 4:
raise ValueError(
f"No default case statement available when levels > 4, "
"please specify a custom 'case_expression' within your settings dictionary"
)
return default_statements[data_type][levels]
def _get_default_probabilities(m_or_u, levels):
if levels > 6:
raise ValueError(
f"No default m and u probabilities available when levels > 6, "
"please specify custom values for 'm_probabilities' and 'u_probabilities' "
"within your settings dictionary"
)
# Note all m and u probabilities are automatically normalised to sum to 1
default_m_u_probabilities = {
"m_probabilities": {
2: _normalise_prob_list([1, 9]),
3: _normalise_prob_list([1, 2, 7]),
4: _normalise_prob_list([1, 1, 1, 7]),
5: _normalise_prob_list([0.33, 0.67, 1, 2, 6]),
6: _normalise_prob_list([0.33, 0.67, 1, 2, 3, 6]),
},
"u_probabilities": {
2: _normalise_prob_list([9, 1]),
3: _normalise_prob_list([7, 2, 1]),
4: _normalise_prob_list([7, 1, 1, 1]),
5: _normalise_prob_list([6, 2, 1, 0.33, 0.67]),
6: _normalise_prob_list([6, 3, 2, 1, 0.33, 0.67]),
},
}
probabilities = default_m_u_probabilities[m_or_u][levels]
return probabilities
def _complete_case_expression(col_settings, spark):
default_case_statements = _get_default_case_statements_functions(spark)
levels = col_settings["num_levels"]
if "custom_name" in col_settings:
col_name_for_case_fn = col_settings["custom_name"]
else:
col_name_for_case_fn = col_settings["col_name"]
if "case_expression" not in col_settings:
data_type = col_settings["data_type"]
case_fn = _get_default_case_statement_fn(
default_case_statements, data_type, levels
)
col_settings["case_expression"] = case_fn(
col_name_for_case_fn, col_name_for_case_fn
)
else:
_check_no_obvious_problem_with_case_statement(col_settings["case_expression"])
old_case_stmt = col_settings["case_expression"]
new_case_stmt = _add_as_gamma_to_case_statement(
old_case_stmt, col_name_for_case_fn
)
col_settings["case_expression"] = new_case_stmt
def _complete_probabilities(col_settings: dict, mu_probabilities: str):
"""
Args:
col_settings (dict): Column settings dictionary
mu_probabilities (str): Either 'm_probabilities' or 'u_probabilities'
"""
if mu_probabilities not in col_settings:
levels = col_settings["num_levels"]
probs = _get_default_probabilities(mu_probabilities, levels)
col_settings[mu_probabilities] = probs
def complete_settings_dict(settings_dict: dict, spark: SparkSession):
"""Auto-populate any missing settings from the settings dictionary using the 'sensible defaults' that
are specified in the json schema (./splink/files/settings_jsonschema.json)
Args:
settings_dict (dict): The settings dictionary
spark: The SparkSession
Returns:
dict: A `splink` settings dictionary with all keys populated.
"""
settings_dict = deepcopy(settings_dict)
# Complete non-column settings from their default values if not exist
non_col_keys = [
"link_type",
"em_convergence",
"source_dataset_column_name",
"unique_id_column_name",
"additional_columns_to_retain",
"retain_matching_columns",
"retain_intermediate_calculation_columns",
"max_iterations",
"proportion_of_matches",
]
for key in non_col_keys:
if key not in settings_dict:
settings_dict[key] = get_default_value_from_schema(
key, is_column_setting=False
)
if "blocking_rules" in settings_dict:
if len(settings_dict["blocking_rules"]) == 0:
warnings.warn(
"You have not specified any blocking rules, meaning all comparisons between the "
"input dataset(s) will be generated and blocking will not be used."
"For large input datasets, this will generally be computationally intractable "
"because it will generate comparisons equal to the number of rows squared."
)
c_cols = settings_dict["comparison_columns"]
for gamma_index, col_settings in enumerate(c_cols):
# Gamma index refers to the position in the comparison vector
# i.e. it's a counter for comparison columns
col_settings["gamma_index"] = gamma_index
# Populate non-existing keys from defaults
keys_for_defaults = [
"num_levels",
"data_type",
"term_frequency_adjustments",
"fix_u_probabilities",
"fix_m_probabilities",
]
for key in keys_for_defaults:
if key not in col_settings:
default = get_default_value_from_schema(key, is_column_setting=True)
col_settings[key] = default
# Doesn't need assignment because we're modify the col_settings dictionary
_complete_case_expression(col_settings, spark)
_complete_probabilities(col_settings, "m_probabilities")
_complete_probabilities(col_settings, "u_probabilities")
return settings_dict
def normalise_probabilities(settings_dict: dict):
"""Normalise all probabilities in a settings dictionary to sum
to one, of possible
Args:
settings_dict (dict): Splink settings dictionary
"""
c_cols = settings_dict["comparison_columns"]
for col_settings in c_cols:
for p in ["m_probabilities", "u_probabilities"]:
if p in col_settings:
if None not in col_settings[p]:
if sum(col_settings[p]) != 0:
col_settings[p] = _normalise_prob_list(col_settings[p])
return settings_dict
|
en
| 0.696327
|
# Note all m and u probabilities are automatically normalised to sum to 1 Args: col_settings (dict): Column settings dictionary mu_probabilities (str): Either 'm_probabilities' or 'u_probabilities' Auto-populate any missing settings from the settings dictionary using the 'sensible defaults' that are specified in the json schema (./splink/files/settings_jsonschema.json) Args: settings_dict (dict): The settings dictionary spark: The SparkSession Returns: dict: A `splink` settings dictionary with all keys populated. # Complete non-column settings from their default values if not exist # Gamma index refers to the position in the comparison vector # i.e. it's a counter for comparison columns # Populate non-existing keys from defaults # Doesn't need assignment because we're modify the col_settings dictionary Normalise all probabilities in a settings dictionary to sum to one, of possible Args: settings_dict (dict): Splink settings dictionary
| 2.111751
| 2
|
maintenance/models/maintenance.py
|
prorevizor/noc
| 0
|
6626014
|
# ---------------------------------------------------------------------
# Maintenance
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python
import datetime
import dateutil.parser
import operator
import re
from threading import Lock
# Third-party modules
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import (
StringField,
BooleanField,
ReferenceField,
DateTimeField,
ListField,
EmbeddedDocumentField,
)
import cachetools
# NOC modules
from .maintenancetype import MaintenanceType
from mongoengine.errors import ValidationError
from noc.sa.models.managedobject import ManagedObject
from noc.inv.models.networksegment import NetworkSegment
from noc.core.mongo.fields import ForeignKeyField, PlainReferenceField
from noc.core.model.decorator import on_save, on_delete_check
from noc.sa.models.objectdata import ObjectData
from noc.main.models.timepattern import TimePattern
from noc.main.models.template import Template
from noc.core.defer import call_later
from noc.sa.models.administrativedomain import AdministrativeDomain
from noc.core.service.pub import pub
id_lock = Lock()
class MaintenanceObject(EmbeddedDocument):
object = ForeignKeyField(ManagedObject)
class MaintenanceSegment(EmbeddedDocument):
segment = ReferenceField(NetworkSegment)
@on_save
@on_delete_check(
clean=[("maintenance.AffectedObjects", "maintenance")],
)
class Maintenance(Document):
meta = {
"collection": "noc.maintenance",
"strict": False,
"auto_create_index": False,
"indexes": [("start", "is_completed"), "administrative_domain"],
"legacy_collections": ["noc.maintainance"],
}
type = ReferenceField(MaintenanceType)
subject = StringField(required=True)
description = StringField()
start = DateTimeField()
stop = DateTimeField()
is_completed = BooleanField(default=False)
auto_confirm = BooleanField(default=True)
template = ForeignKeyField(Template)
contacts = StringField()
suppress_alarms = BooleanField()
# Escalate TT during maintenance
escalate_managed_object = ForeignKeyField(ManagedObject)
# Time pattern when maintenance is active
# None - active all the time
time_pattern = ForeignKeyField(TimePattern)
# Objects declared to be affected by maintenance
direct_objects = ListField(EmbeddedDocumentField(MaintenanceObject))
# Segments declared to be affected by maintenance
direct_segments = ListField(EmbeddedDocumentField(MaintenanceSegment))
# All Administrative Domain for all affected objects
administrative_domain = ListField(ForeignKeyField(AdministrativeDomain))
# Escalated TT ID in form
# <external system name>:<external tt id>
escalation_tt = StringField(required=False)
# @todo: Attachments
_id_cache = cachetools.TTLCache(maxsize=100, ttl=60)
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock)
def get_by_id(cls, id):
return Maintenance.objects.filter(id=id).first()
def update_affected_objects_maintenance(self):
call_later(
"noc.maintenance.models.maintenance.update_affected_objects",
60,
maintenance_id=self.id,
)
def auto_confirm_maintenance(self):
stop = datetime.datetime.strptime(self.stop, "%Y-%m-%dT%H:%M:%S")
now = datetime.datetime.now()
if stop > now:
delay = (stop - now).total_seconds()
call_later("noc.maintenance.models.maintenance.stop", delay, maintenance_id=self.id)
def save(self, *args, **kwargs):
created = False
if self._created:
created = self._created
if self.direct_objects:
if any(o_elem.object is None for o_elem in self.direct_objects):
raise ValidationError("Object line is Empty")
if self.direct_segments:
for elem in self.direct_segments:
try:
elem.segment = elem.segment
except Exception:
raise ValidationError("Segment line is Empty")
super().save(*args, **kwargs)
if created and (self.direct_objects or self.direct_segments):
self.update_affected_objects_maintenance()
if self.auto_confirm:
self.auto_confirm_maintenance()
def on_save(self):
if (
hasattr(self, "_changed_fields")
and "direct_objects" in self._changed_fields
or hasattr(self, "_changed_fields")
and "direct_segments" in self._changed_fields
):
self.update_affected_objects_maintenance()
if hasattr(self, "_changed_fields") and "stop" in self._changed_fields:
if not self.is_completed and self.auto_confirm:
self.auto_confirm_maintenance()
if hasattr(self, "_changed_fields") and "is_completed" in self._changed_fields:
AffectedObjects._get_collection().remove({"maintenance": self.id})
if self.escalate_managed_object:
if not self.is_completed and self.auto_confirm:
call_later(
"noc.services.escalator.maintenance.start_maintenance",
delay=max(
(
dateutil.parser.parse(self.start) - datetime.datetime.now()
).total_seconds(),
60,
),
scheduler="escalator",
pool=self.escalate_managed_object.escalator_shard,
maintenance_id=self.id,
)
if self.auto_confirm:
call_later(
"noc.services.escalator.maintenance.close_maintenance",
delay=max(
(
dateutil.parser.parse(self.stop) - datetime.datetime.now()
).total_seconds(),
60,
),
scheduler="escalator",
pool=self.escalate_managed_object.escalator_shard,
maintenance_id=self.id,
)
if self.is_completed and not self.auto_confirm:
call_later(
"noc.services.escalator.maintenance.close_maintenance",
scheduler="escalator",
pool=self.escalate_managed_object.escalator_shard,
maintenance_id=self.id,
)
@classmethod
def currently_affected(cls):
"""
Returns a list of currently affected object ids
"""
affected = set()
now = datetime.datetime.now()
for d in cls._get_collection().find(
{"start": {"$lte": now}, "stop": {"$gte": now}, "is_completed": False},
{"_id": 1, "time_pattern": 1},
):
if d.get("time_pattern"):
# Restrict to time pattern
tp = TimePattern.get_by_id(d["time_pattern"])
if tp and not tp.match(now):
continue
data = [
{"$match": {"maintenance": d["_id"]}},
{
"$project": {"_id": 0, "objects": "$affected_objects.object"},
},
]
for x in AffectedObjects._get_collection().aggregate(data):
affected.update(x["objects"])
return list(affected)
@classmethod
def get_object_maintenance(cls, mo):
"""
Returns a list of active maintenance for object
:param mo: Managed Object instance
:return: List of Maintenance instances or empty list
"""
r = []
now = datetime.datetime.now()
for m in Maintenance.objects.filter(start__lte=now, is_completed=False).order_by("start"):
if m.time_pattern and not m.time_pattern.match(now):
continue
if AffectedObjects.objects.filter(maintenance=m, affected_objects__object=mo.id):
r += [m]
return r
class AffectedObjects(Document):
meta = {
"collection": "noc.affectedobjects",
"strict": False,
"auto_create_index": False,
"indexes": ["affected_objects.object"],
}
maintenance = PlainReferenceField(Maintenance)
affected_objects = ListField(EmbeddedDocumentField(MaintenanceObject))
def update_affected_objects(maintenance_id):
"""
Calculate and fill affected objects
"""
def get_downlinks(objects):
r = set()
# Get all additional objects which may be affected
for d in ObjectData._get_collection().find({"uplinks": {"$in": list(objects)}}, {"_id": 1}):
if d["_id"] not in objects:
r.add(d["_id"])
if not r:
return r
# Leave only objects with all uplinks affected
rr = set()
for d in ObjectData._get_collection().find(
{"_id": {"$in": list(r)}}, {"_id": 1, "uplinks": 1}
):
if len([1 for u in d["uplinks"] if u in objects]) == len(d["uplinks"]):
rr.add(d["_id"])
return rr
def get_segment_objects(segment):
# Get objects belonging to segment
so = set(ManagedObject.objects.filter(segment=segment).values_list("id", flat=True))
# Get objects from underlying segments
for ns in NetworkSegment._get_collection().find({"parent": segment}, {"_id": 1}):
so |= get_segment_objects(ns["_id"])
return so
data = Maintenance.get_by_id(maintenance_id)
# Calculate affected objects
affected = set(o.object.id for o in data.direct_objects if o.object)
for o in data.direct_segments:
if o.segment:
affected |= get_segment_objects(o.segment.id)
while True:
r = get_downlinks(affected)
if not r:
break
affected |= r
# Calculate affected administrative_domain
affected_ad = list(
set(
ManagedObject.objects.filter(id__in=list(affected)).values_list(
"administrative_domain__id", flat=True
)
)
)
# @todo: Calculate affected objects considering topology
affected = [{"object": o} for o in sorted(affected)]
if affected:
Maintenance._get_collection().update(
{"_id": maintenance_id},
{"$set": {"administrative_domain": affected_ad}},
)
AffectedObjects._get_collection().update(
{"maintenance": maintenance_id}, {"$set": {"affected_objects": affected}}, upsert=True
)
def stop(maintenance_id):
rx_mail = re.compile(r"(?P<mail>[A-Za-z0-9\.\_\-]+\@[A-Za-z0-9\@\.\_\-]+)", re.MULTILINE)
# Find Active Maintenance
mai = Maintenance.get_by_id(maintenance_id)
mai.is_completed = True
# Find email addresses on Maintenance Contacts
if mai.template:
ctx = {"maintenance": mai}
contacts = rx_mail.findall(mai.contacts)
if contacts:
# Create message
subject = mai.template.render_subject(**ctx)
body = mai.template.render_body(**ctx)
for mail in contacts:
pub("mailsender", {"address": mail, "subject": subject, "body": body})
Maintenance._get_collection().update({"_id": maintenance_id}, {"$set": {"is_completed": True}})
AffectedObjects._get_collection().remove({"maintenance": maintenance_id})
|
# ---------------------------------------------------------------------
# Maintenance
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python
import datetime
import dateutil.parser
import operator
import re
from threading import Lock
# Third-party modules
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import (
StringField,
BooleanField,
ReferenceField,
DateTimeField,
ListField,
EmbeddedDocumentField,
)
import cachetools
# NOC modules
from .maintenancetype import MaintenanceType
from mongoengine.errors import ValidationError
from noc.sa.models.managedobject import ManagedObject
from noc.inv.models.networksegment import NetworkSegment
from noc.core.mongo.fields import ForeignKeyField, PlainReferenceField
from noc.core.model.decorator import on_save, on_delete_check
from noc.sa.models.objectdata import ObjectData
from noc.main.models.timepattern import TimePattern
from noc.main.models.template import Template
from noc.core.defer import call_later
from noc.sa.models.administrativedomain import AdministrativeDomain
from noc.core.service.pub import pub
id_lock = Lock()
class MaintenanceObject(EmbeddedDocument):
object = ForeignKeyField(ManagedObject)
class MaintenanceSegment(EmbeddedDocument):
segment = ReferenceField(NetworkSegment)
@on_save
@on_delete_check(
clean=[("maintenance.AffectedObjects", "maintenance")],
)
class Maintenance(Document):
meta = {
"collection": "noc.maintenance",
"strict": False,
"auto_create_index": False,
"indexes": [("start", "is_completed"), "administrative_domain"],
"legacy_collections": ["noc.maintainance"],
}
type = ReferenceField(MaintenanceType)
subject = StringField(required=True)
description = StringField()
start = DateTimeField()
stop = DateTimeField()
is_completed = BooleanField(default=False)
auto_confirm = BooleanField(default=True)
template = ForeignKeyField(Template)
contacts = StringField()
suppress_alarms = BooleanField()
# Escalate TT during maintenance
escalate_managed_object = ForeignKeyField(ManagedObject)
# Time pattern when maintenance is active
# None - active all the time
time_pattern = ForeignKeyField(TimePattern)
# Objects declared to be affected by maintenance
direct_objects = ListField(EmbeddedDocumentField(MaintenanceObject))
# Segments declared to be affected by maintenance
direct_segments = ListField(EmbeddedDocumentField(MaintenanceSegment))
# All Administrative Domain for all affected objects
administrative_domain = ListField(ForeignKeyField(AdministrativeDomain))
# Escalated TT ID in form
# <external system name>:<external tt id>
escalation_tt = StringField(required=False)
# @todo: Attachments
_id_cache = cachetools.TTLCache(maxsize=100, ttl=60)
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_id_cache"), lock=lambda _: id_lock)
def get_by_id(cls, id):
return Maintenance.objects.filter(id=id).first()
def update_affected_objects_maintenance(self):
call_later(
"noc.maintenance.models.maintenance.update_affected_objects",
60,
maintenance_id=self.id,
)
def auto_confirm_maintenance(self):
stop = datetime.datetime.strptime(self.stop, "%Y-%m-%dT%H:%M:%S")
now = datetime.datetime.now()
if stop > now:
delay = (stop - now).total_seconds()
call_later("noc.maintenance.models.maintenance.stop", delay, maintenance_id=self.id)
def save(self, *args, **kwargs):
created = False
if self._created:
created = self._created
if self.direct_objects:
if any(o_elem.object is None for o_elem in self.direct_objects):
raise ValidationError("Object line is Empty")
if self.direct_segments:
for elem in self.direct_segments:
try:
elem.segment = elem.segment
except Exception:
raise ValidationError("Segment line is Empty")
super().save(*args, **kwargs)
if created and (self.direct_objects or self.direct_segments):
self.update_affected_objects_maintenance()
if self.auto_confirm:
self.auto_confirm_maintenance()
def on_save(self):
if (
hasattr(self, "_changed_fields")
and "direct_objects" in self._changed_fields
or hasattr(self, "_changed_fields")
and "direct_segments" in self._changed_fields
):
self.update_affected_objects_maintenance()
if hasattr(self, "_changed_fields") and "stop" in self._changed_fields:
if not self.is_completed and self.auto_confirm:
self.auto_confirm_maintenance()
if hasattr(self, "_changed_fields") and "is_completed" in self._changed_fields:
AffectedObjects._get_collection().remove({"maintenance": self.id})
if self.escalate_managed_object:
if not self.is_completed and self.auto_confirm:
call_later(
"noc.services.escalator.maintenance.start_maintenance",
delay=max(
(
dateutil.parser.parse(self.start) - datetime.datetime.now()
).total_seconds(),
60,
),
scheduler="escalator",
pool=self.escalate_managed_object.escalator_shard,
maintenance_id=self.id,
)
if self.auto_confirm:
call_later(
"noc.services.escalator.maintenance.close_maintenance",
delay=max(
(
dateutil.parser.parse(self.stop) - datetime.datetime.now()
).total_seconds(),
60,
),
scheduler="escalator",
pool=self.escalate_managed_object.escalator_shard,
maintenance_id=self.id,
)
if self.is_completed and not self.auto_confirm:
call_later(
"noc.services.escalator.maintenance.close_maintenance",
scheduler="escalator",
pool=self.escalate_managed_object.escalator_shard,
maintenance_id=self.id,
)
@classmethod
def currently_affected(cls):
"""
Returns a list of currently affected object ids
"""
affected = set()
now = datetime.datetime.now()
for d in cls._get_collection().find(
{"start": {"$lte": now}, "stop": {"$gte": now}, "is_completed": False},
{"_id": 1, "time_pattern": 1},
):
if d.get("time_pattern"):
# Restrict to time pattern
tp = TimePattern.get_by_id(d["time_pattern"])
if tp and not tp.match(now):
continue
data = [
{"$match": {"maintenance": d["_id"]}},
{
"$project": {"_id": 0, "objects": "$affected_objects.object"},
},
]
for x in AffectedObjects._get_collection().aggregate(data):
affected.update(x["objects"])
return list(affected)
@classmethod
def get_object_maintenance(cls, mo):
"""
Returns a list of active maintenance for object
:param mo: Managed Object instance
:return: List of Maintenance instances or empty list
"""
r = []
now = datetime.datetime.now()
for m in Maintenance.objects.filter(start__lte=now, is_completed=False).order_by("start"):
if m.time_pattern and not m.time_pattern.match(now):
continue
if AffectedObjects.objects.filter(maintenance=m, affected_objects__object=mo.id):
r += [m]
return r
class AffectedObjects(Document):
meta = {
"collection": "noc.affectedobjects",
"strict": False,
"auto_create_index": False,
"indexes": ["affected_objects.object"],
}
maintenance = PlainReferenceField(Maintenance)
affected_objects = ListField(EmbeddedDocumentField(MaintenanceObject))
def update_affected_objects(maintenance_id):
"""
Calculate and fill affected objects
"""
def get_downlinks(objects):
r = set()
# Get all additional objects which may be affected
for d in ObjectData._get_collection().find({"uplinks": {"$in": list(objects)}}, {"_id": 1}):
if d["_id"] not in objects:
r.add(d["_id"])
if not r:
return r
# Leave only objects with all uplinks affected
rr = set()
for d in ObjectData._get_collection().find(
{"_id": {"$in": list(r)}}, {"_id": 1, "uplinks": 1}
):
if len([1 for u in d["uplinks"] if u in objects]) == len(d["uplinks"]):
rr.add(d["_id"])
return rr
def get_segment_objects(segment):
# Get objects belonging to segment
so = set(ManagedObject.objects.filter(segment=segment).values_list("id", flat=True))
# Get objects from underlying segments
for ns in NetworkSegment._get_collection().find({"parent": segment}, {"_id": 1}):
so |= get_segment_objects(ns["_id"])
return so
data = Maintenance.get_by_id(maintenance_id)
# Calculate affected objects
affected = set(o.object.id for o in data.direct_objects if o.object)
for o in data.direct_segments:
if o.segment:
affected |= get_segment_objects(o.segment.id)
while True:
r = get_downlinks(affected)
if not r:
break
affected |= r
# Calculate affected administrative_domain
affected_ad = list(
set(
ManagedObject.objects.filter(id__in=list(affected)).values_list(
"administrative_domain__id", flat=True
)
)
)
# @todo: Calculate affected objects considering topology
affected = [{"object": o} for o in sorted(affected)]
if affected:
Maintenance._get_collection().update(
{"_id": maintenance_id},
{"$set": {"administrative_domain": affected_ad}},
)
AffectedObjects._get_collection().update(
{"maintenance": maintenance_id}, {"$set": {"affected_objects": affected}}, upsert=True
)
def stop(maintenance_id):
rx_mail = re.compile(r"(?P<mail>[A-Za-z0-9\.\_\-]+\@[A-Za-z0-9\@\.\_\-]+)", re.MULTILINE)
# Find Active Maintenance
mai = Maintenance.get_by_id(maintenance_id)
mai.is_completed = True
# Find email addresses on Maintenance Contacts
if mai.template:
ctx = {"maintenance": mai}
contacts = rx_mail.findall(mai.contacts)
if contacts:
# Create message
subject = mai.template.render_subject(**ctx)
body = mai.template.render_body(**ctx)
for mail in contacts:
pub("mailsender", {"address": mail, "subject": subject, "body": body})
Maintenance._get_collection().update({"_id": maintenance_id}, {"$set": {"is_completed": True}})
AffectedObjects._get_collection().remove({"maintenance": maintenance_id})
|
en
| 0.712051
|
# --------------------------------------------------------------------- # Maintenance # --------------------------------------------------------------------- # Copyright (C) 2007-2020 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Python # Third-party modules # NOC modules # Escalate TT during maintenance # Time pattern when maintenance is active # None - active all the time # Objects declared to be affected by maintenance # Segments declared to be affected by maintenance # All Administrative Domain for all affected objects # Escalated TT ID in form # <external system name>:<external tt id> # @todo: Attachments Returns a list of currently affected object ids # Restrict to time pattern Returns a list of active maintenance for object :param mo: Managed Object instance :return: List of Maintenance instances or empty list Calculate and fill affected objects # Get all additional objects which may be affected # Leave only objects with all uplinks affected # Get objects belonging to segment # Get objects from underlying segments # Calculate affected objects # Calculate affected administrative_domain # @todo: Calculate affected objects considering topology # Find Active Maintenance # Find email addresses on Maintenance Contacts # Create message
| 1.70509
| 2
|
jp.atcoder/abc114/abc114_a/8393448.py
|
kagemeka/atcoder-submissions
| 1
|
6626015
|
<gh_stars>1-10
# 2019-11-11 16:07:35(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
celebratable = [7, 5, 3]
def main():
x = int(sys.stdin.readline().rstrip())
print('YES' if x in celebratable else 'NO')
if __name__ == "__main__":
main()
|
# 2019-11-11 16:07:35(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
celebratable = [7, 5, 3]
def main():
x = int(sys.stdin.readline().rstrip())
print('YES' if x in celebratable else 'NO')
if __name__ == "__main__":
main()
|
en
| 0.626924
|
# 2019-11-11 16:07:35(JST) # import collections # import math # from string import ascii_lowercase, ascii_uppercase, digits # from bisect import bisect_left as bi_l, bisect_right as bi_r # import itertools # from functools import reduce # import operator as op # from scipy.misc import comb # float # import numpy as np
| 2.710578
| 3
|
Ui_QTDialogVer2.py
|
PRJJOHN/Automation
| 0
|
6626016
|
<reponame>PRJJOHN/Automation
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'd:\biologue\Automation\QTDialogVer2.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1117, 537)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(860, 460, 121, 41))
self.pushButton.setObjectName("pushButton")
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(121, 51, 731, 391))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalSlider_3 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_3.setMaximum(2999)
self.horizontalSlider_3.setProperty("value", 0)
self.horizontalSlider_3.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_3.setObjectName("horizontalSlider_3")
self.verticalLayout.addWidget(self.horizontalSlider_3)
self.horizontalSlider = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider.setMaximum(2999)
self.horizontalSlider.setProperty("value", 0)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setObjectName("horizontalSlider")
self.verticalLayout.addWidget(self.horizontalSlider)
self.horizontalSlider_6 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_6.setMaximum(2999)
self.horizontalSlider_6.setProperty("value", 0)
self.horizontalSlider_6.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_6.setObjectName("horizontalSlider_6")
self.verticalLayout.addWidget(self.horizontalSlider_6)
self.horizontalSlider_5 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_5.setMaximum(2999)
self.horizontalSlider_5.setProperty("value", 0)
self.horizontalSlider_5.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_5.setObjectName("horizontalSlider_5")
self.verticalLayout.addWidget(self.horizontalSlider_5)
self.horizontalSlider_4 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_4.setMaximum(2999)
self.horizontalSlider_4.setProperty("value", 0)
self.horizontalSlider_4.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_4.setObjectName("horizontalSlider_4")
self.verticalLayout.addWidget(self.horizontalSlider_4)
self.horizontalSlider_7 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_7.setMaximum(2999)
self.horizontalSlider_7.setProperty("value", 0)
self.horizontalSlider_7.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_7.setObjectName("horizontalSlider_7")
self.verticalLayout.addWidget(self.horizontalSlider_7)
self.horizontalSlider_2 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_2.setMaximum(2999)
self.horizontalSlider_2.setProperty("value", 0)
self.horizontalSlider_2.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_2.setObjectName("horizontalSlider_2")
self.verticalLayout.addWidget(self.horizontalSlider_2)
self.layoutWidget1 = QtWidgets.QWidget(Dialog)
self.layoutWidget1.setGeometry(QtCore.QRect(860, 60, 121, 381))
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.lineEdit = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit.setFont(font)
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setObjectName("lineEdit")
self.verticalLayout_2.addWidget(self.lineEdit)
self.lineEdit_2 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2")
self.verticalLayout_2.addWidget(self.lineEdit_2)
self.lineEdit_3 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_3.setFont(font)
self.lineEdit_3.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_3.setObjectName("lineEdit_3")
self.verticalLayout_2.addWidget(self.lineEdit_3)
self.lineEdit_4 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_4.setFont(font)
self.lineEdit_4.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_4.setObjectName("lineEdit_4")
self.verticalLayout_2.addWidget(self.lineEdit_4)
self.lineEdit_5 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_5.setFont(font)
self.lineEdit_5.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_5.setObjectName("lineEdit_5")
self.verticalLayout_2.addWidget(self.lineEdit_5)
self.lineEdit_6 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_6.setFont(font)
self.lineEdit_6.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_6.setObjectName("lineEdit_6")
self.verticalLayout_2.addWidget(self.lineEdit_6)
self.lineEdit_7 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_7.setFont(font)
self.lineEdit_7.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_7.setObjectName("lineEdit_7")
self.verticalLayout_2.addWidget(self.lineEdit_7)
self.layoutWidget_2 = QtWidgets.QWidget(Dialog)
self.layoutWidget_2.setGeometry(QtCore.QRect(990, 60, 101, 371))
self.layoutWidget_2.setObjectName("layoutWidget_2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.layoutWidget_2)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_8 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.verticalLayout_4.addWidget(self.label_8)
self.label_9 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_9.setFont(font)
self.label_9.setAlignment(QtCore.Qt.AlignCenter)
self.label_9.setObjectName("label_9")
self.verticalLayout_4.addWidget(self.label_9)
self.label_10 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_10.setFont(font)
self.label_10.setAlignment(QtCore.Qt.AlignCenter)
self.label_10.setObjectName("label_10")
self.verticalLayout_4.addWidget(self.label_10)
self.label_11 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_11.setFont(font)
self.label_11.setAlignment(QtCore.Qt.AlignCenter)
self.label_11.setObjectName("label_11")
self.verticalLayout_4.addWidget(self.label_11)
self.label_12 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_12.setFont(font)
self.label_12.setAlignment(QtCore.Qt.AlignCenter)
self.label_12.setObjectName("label_12")
self.verticalLayout_4.addWidget(self.label_12)
self.label_13 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_13.setFont(font)
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.label_13.setObjectName("label_13")
self.verticalLayout_4.addWidget(self.label_13)
self.label_14 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_14.setFont(font)
self.label_14.setAlignment(QtCore.Qt.AlignCenter)
self.label_14.setObjectName("label_14")
self.verticalLayout_4.addWidget(self.label_14)
self.layoutWidget2 = QtWidgets.QWidget(Dialog)
self.layoutWidget2.setGeometry(QtCore.QRect(50, 60, 63, 361))
self.layoutWidget2.setObjectName("layoutWidget2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.layoutWidget2)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_3.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout_3.addWidget(self.label_2)
self.label_3 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout_3.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.verticalLayout_3.addWidget(self.label_4)
self.label_5 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.verticalLayout_3.addWidget(self.label_5)
self.label_6 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.verticalLayout_3.addWidget(self.label_6)
self.label_7 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_7.setFont(font)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.verticalLayout_3.addWidget(self.label_7)
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(710, 460, 101, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.retranslateUi(Dialog)
self.horizontalSlider_2.valueChanged['int'].connect(self.lineEdit_7.clear)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.horizontalSlider_3, self.horizontalSlider)
Dialog.setTabOrder(self.horizontalSlider, self.horizontalSlider_6)
Dialog.setTabOrder(self.horizontalSlider_6, self.horizontalSlider_5)
Dialog.setTabOrder(self.horizontalSlider_5, self.horizontalSlider_4)
Dialog.setTabOrder(self.horizontalSlider_4, self.horizontalSlider_7)
Dialog.setTabOrder(self.horizontalSlider_7, self.horizontalSlider_2)
Dialog.setTabOrder(self.horizontalSlider_2, self.lineEdit)
Dialog.setTabOrder(self.lineEdit, self.lineEdit_2)
Dialog.setTabOrder(self.lineEdit_2, self.lineEdit_3)
Dialog.setTabOrder(self.lineEdit_3, self.lineEdit_4)
Dialog.setTabOrder(self.lineEdit_4, self.lineEdit_5)
Dialog.setTabOrder(self.lineEdit_5, self.lineEdit_6)
Dialog.setTabOrder(self.lineEdit_6, self.lineEdit_7)
Dialog.setTabOrder(self.lineEdit_7, self.pushButton)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "INIT"))
self.lineEdit.setText(_translate("Dialog", "0.00"))
self.lineEdit_2.setText(_translate("Dialog", "0.00"))
self.lineEdit_3.setText(_translate("Dialog", "0.00"))
self.lineEdit_4.setText(_translate("Dialog", "0.00"))
self.lineEdit_5.setText(_translate("Dialog", "0.00"))
self.lineEdit_6.setText(_translate("Dialog", "0.00"))
self.lineEdit_7.setText(_translate("Dialog", "0.00"))
self.label_8.setText(_translate("Dialog", "0.0"))
self.label_9.setText(_translate("Dialog", "0.0"))
self.label_10.setText(_translate("Dialog", "0.0"))
self.label_11.setText(_translate("Dialog", "0.0"))
self.label_12.setText(_translate("Dialog", "0.0"))
self.label_13.setText(_translate("Dialog", "0.0"))
self.label_14.setText(_translate("Dialog", "0.0"))
self.label.setText(_translate("Dialog", "Back 2"))
self.label_2.setText(_translate("Dialog", "Back 4"))
self.label_3.setText(_translate("Dialog", "Back 6"))
self.label_4.setText(_translate("Dialog", "Back 8"))
self.label_5.setText(_translate("Dialog", "Back 12"))
self.label_6.setText(_translate("Dialog", "Back 14"))
self.label_7.setText(_translate("Dialog", "Back 24"))
self.pushButton_2.setText(_translate("Dialog", "InputVal"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'd:\biologue\Automation\QTDialogVer2.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(1117, 537)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(860, 460, 121, 41))
self.pushButton.setObjectName("pushButton")
self.layoutWidget = QtWidgets.QWidget(Dialog)
self.layoutWidget.setGeometry(QtCore.QRect(121, 51, 731, 391))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalSlider_3 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_3.setMaximum(2999)
self.horizontalSlider_3.setProperty("value", 0)
self.horizontalSlider_3.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_3.setObjectName("horizontalSlider_3")
self.verticalLayout.addWidget(self.horizontalSlider_3)
self.horizontalSlider = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider.setMaximum(2999)
self.horizontalSlider.setProperty("value", 0)
self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider.setObjectName("horizontalSlider")
self.verticalLayout.addWidget(self.horizontalSlider)
self.horizontalSlider_6 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_6.setMaximum(2999)
self.horizontalSlider_6.setProperty("value", 0)
self.horizontalSlider_6.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_6.setObjectName("horizontalSlider_6")
self.verticalLayout.addWidget(self.horizontalSlider_6)
self.horizontalSlider_5 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_5.setMaximum(2999)
self.horizontalSlider_5.setProperty("value", 0)
self.horizontalSlider_5.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_5.setObjectName("horizontalSlider_5")
self.verticalLayout.addWidget(self.horizontalSlider_5)
self.horizontalSlider_4 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_4.setMaximum(2999)
self.horizontalSlider_4.setProperty("value", 0)
self.horizontalSlider_4.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_4.setObjectName("horizontalSlider_4")
self.verticalLayout.addWidget(self.horizontalSlider_4)
self.horizontalSlider_7 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_7.setMaximum(2999)
self.horizontalSlider_7.setProperty("value", 0)
self.horizontalSlider_7.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_7.setObjectName("horizontalSlider_7")
self.verticalLayout.addWidget(self.horizontalSlider_7)
self.horizontalSlider_2 = QtWidgets.QSlider(self.layoutWidget)
self.horizontalSlider_2.setMaximum(2999)
self.horizontalSlider_2.setProperty("value", 0)
self.horizontalSlider_2.setOrientation(QtCore.Qt.Horizontal)
self.horizontalSlider_2.setObjectName("horizontalSlider_2")
self.verticalLayout.addWidget(self.horizontalSlider_2)
self.layoutWidget1 = QtWidgets.QWidget(Dialog)
self.layoutWidget1.setGeometry(QtCore.QRect(860, 60, 121, 381))
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.lineEdit = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit.setFont(font)
self.lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit.setObjectName("lineEdit")
self.verticalLayout_2.addWidget(self.lineEdit)
self.lineEdit_2 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_2.setObjectName("lineEdit_2")
self.verticalLayout_2.addWidget(self.lineEdit_2)
self.lineEdit_3 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_3.setFont(font)
self.lineEdit_3.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_3.setObjectName("lineEdit_3")
self.verticalLayout_2.addWidget(self.lineEdit_3)
self.lineEdit_4 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_4.setFont(font)
self.lineEdit_4.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_4.setObjectName("lineEdit_4")
self.verticalLayout_2.addWidget(self.lineEdit_4)
self.lineEdit_5 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_5.setFont(font)
self.lineEdit_5.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_5.setObjectName("lineEdit_5")
self.verticalLayout_2.addWidget(self.lineEdit_5)
self.lineEdit_6 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_6.setFont(font)
self.lineEdit_6.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_6.setObjectName("lineEdit_6")
self.verticalLayout_2.addWidget(self.lineEdit_6)
self.lineEdit_7 = QtWidgets.QLineEdit(self.layoutWidget1)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(16)
self.lineEdit_7.setFont(font)
self.lineEdit_7.setAlignment(QtCore.Qt.AlignCenter)
self.lineEdit_7.setObjectName("lineEdit_7")
self.verticalLayout_2.addWidget(self.lineEdit_7)
self.layoutWidget_2 = QtWidgets.QWidget(Dialog)
self.layoutWidget_2.setGeometry(QtCore.QRect(990, 60, 101, 371))
self.layoutWidget_2.setObjectName("layoutWidget_2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.layoutWidget_2)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_8 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_8.setFont(font)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.verticalLayout_4.addWidget(self.label_8)
self.label_9 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_9.setFont(font)
self.label_9.setAlignment(QtCore.Qt.AlignCenter)
self.label_9.setObjectName("label_9")
self.verticalLayout_4.addWidget(self.label_9)
self.label_10 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_10.setFont(font)
self.label_10.setAlignment(QtCore.Qt.AlignCenter)
self.label_10.setObjectName("label_10")
self.verticalLayout_4.addWidget(self.label_10)
self.label_11 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_11.setFont(font)
self.label_11.setAlignment(QtCore.Qt.AlignCenter)
self.label_11.setObjectName("label_11")
self.verticalLayout_4.addWidget(self.label_11)
self.label_12 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_12.setFont(font)
self.label_12.setAlignment(QtCore.Qt.AlignCenter)
self.label_12.setObjectName("label_12")
self.verticalLayout_4.addWidget(self.label_12)
self.label_13 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_13.setFont(font)
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.label_13.setObjectName("label_13")
self.verticalLayout_4.addWidget(self.label_13)
self.label_14 = QtWidgets.QLabel(self.layoutWidget_2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_14.setFont(font)
self.label_14.setAlignment(QtCore.Qt.AlignCenter)
self.label_14.setObjectName("label_14")
self.verticalLayout_4.addWidget(self.label_14)
self.layoutWidget2 = QtWidgets.QWidget(Dialog)
self.layoutWidget2.setGeometry(QtCore.QRect(50, 60, 63, 361))
self.layoutWidget2.setObjectName("layoutWidget2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.layoutWidget2)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_3.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.verticalLayout_3.addWidget(self.label_2)
self.label_3 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout_3.addWidget(self.label_3)
self.label_4 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.verticalLayout_3.addWidget(self.label_4)
self.label_5 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.verticalLayout_3.addWidget(self.label_5)
self.label_6 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_6.setFont(font)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.verticalLayout_3.addWidget(self.label_6)
self.label_7 = QtWidgets.QLabel(self.layoutWidget2)
font = QtGui.QFont()
font.setPointSize(12)
self.label_7.setFont(font)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.verticalLayout_3.addWidget(self.label_7)
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(710, 460, 101, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.retranslateUi(Dialog)
self.horizontalSlider_2.valueChanged['int'].connect(self.lineEdit_7.clear)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.horizontalSlider_3, self.horizontalSlider)
Dialog.setTabOrder(self.horizontalSlider, self.horizontalSlider_6)
Dialog.setTabOrder(self.horizontalSlider_6, self.horizontalSlider_5)
Dialog.setTabOrder(self.horizontalSlider_5, self.horizontalSlider_4)
Dialog.setTabOrder(self.horizontalSlider_4, self.horizontalSlider_7)
Dialog.setTabOrder(self.horizontalSlider_7, self.horizontalSlider_2)
Dialog.setTabOrder(self.horizontalSlider_2, self.lineEdit)
Dialog.setTabOrder(self.lineEdit, self.lineEdit_2)
Dialog.setTabOrder(self.lineEdit_2, self.lineEdit_3)
Dialog.setTabOrder(self.lineEdit_3, self.lineEdit_4)
Dialog.setTabOrder(self.lineEdit_4, self.lineEdit_5)
Dialog.setTabOrder(self.lineEdit_5, self.lineEdit_6)
Dialog.setTabOrder(self.lineEdit_6, self.lineEdit_7)
Dialog.setTabOrder(self.lineEdit_7, self.pushButton)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "INIT"))
self.lineEdit.setText(_translate("Dialog", "0.00"))
self.lineEdit_2.setText(_translate("Dialog", "0.00"))
self.lineEdit_3.setText(_translate("Dialog", "0.00"))
self.lineEdit_4.setText(_translate("Dialog", "0.00"))
self.lineEdit_5.setText(_translate("Dialog", "0.00"))
self.lineEdit_6.setText(_translate("Dialog", "0.00"))
self.lineEdit_7.setText(_translate("Dialog", "0.00"))
self.label_8.setText(_translate("Dialog", "0.0"))
self.label_9.setText(_translate("Dialog", "0.0"))
self.label_10.setText(_translate("Dialog", "0.0"))
self.label_11.setText(_translate("Dialog", "0.0"))
self.label_12.setText(_translate("Dialog", "0.0"))
self.label_13.setText(_translate("Dialog", "0.0"))
self.label_14.setText(_translate("Dialog", "0.0"))
self.label.setText(_translate("Dialog", "Back 2"))
self.label_2.setText(_translate("Dialog", "Back 4"))
self.label_3.setText(_translate("Dialog", "Back 6"))
self.label_4.setText(_translate("Dialog", "Back 8"))
self.label_5.setText(_translate("Dialog", "Back 12"))
self.label_6.setText(_translate("Dialog", "Back 14"))
self.label_7.setText(_translate("Dialog", "Back 24"))
self.pushButton_2.setText(_translate("Dialog", "InputVal"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
en
| 0.840786
|
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'd:\biologue\Automation\QTDialogVer2.ui' # # Created by: PyQt5 UI code generator 5.15.4 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing.
| 2.168651
| 2
|
python/ua_gec/stats.py
|
kaidisn/ua-gec
| 0
|
6626017
|
<reponame>kaidisn/ua-gec<filename>python/ua_gec/stats.py
#!/usr/bin/env python3
import spacy
class CorpusStatistics:
"""Compute corpus statistics. """
def __init__(self, corpus):
self.corpus = corpus
self.stats = {}
self.nlp = spacy.load("xx_ent_wiki_sm")
self.compute()
def compute(self):
docs = corpus.get_documents()
self.stats["Total"] = {}
self.stats["Total"]["All"] = self._subset_stats(docs)
self.stats["By gender"] = self._breakdown(docs, "gender")
self.stats["By region"] = self._breakdown(docs, "region")
self.stats["By native"] = self._breakdown(docs, "is_native")
self.stats["By occupation"] = self._breakdown(docs, "occupation")
self.stats["By submission type"] = self._breakdown(docs, "submission_type")
self.stats["By translation lang"] = self._breakdown(docs, "source_language")
def _subset_stats(self, docs):
stats = {}
stats["Documents"] = len(docs)
stats["Sentences"] = sum(self.count_sentences(doc.source) for doc in docs)
stats["Tokens"] = sum(self.count_tokens(doc.source) for doc in docs)
stats["Unique users"] = len(set(doc.meta.author_id for doc in docs))
return stats
def reset_stats(self):
pass
def pretty_print(self):
for top_key, subset in sorted(self.stats.items()):
print(f"# {top_key}")
for key, value in subset.items():
print(f"{key:<30} {value}")
print()
def count_sentences(self, s):
for _ in range(20):
s = s.replace("..", ".")
return s.count(".") + s.count("?") + s.count("!")
def count_tokens(self, s):
tokens = self.nlp(s)
return len(tokens)
def _breakdown(self, docs, field):
"""Compute statistics with breakdown by `field`.
Returns:
dict: field_class (str) => stats (dict[str, int])
"""
result = {}
values = sorted({getattr(doc.meta, field) for doc in docs})
for value in values:
subset = [doc for doc in docs if getattr(doc.meta, field) == value]
result[value] = self._subset_stats(subset)
return result
if __name__ == "__main__":
from ua_gec import Corpus
corpus = Corpus("all")
stats = CorpusStatistics(corpus)
stats.pretty_print()
|
#!/usr/bin/env python3
import spacy
class CorpusStatistics:
"""Compute corpus statistics. """
def __init__(self, corpus):
self.corpus = corpus
self.stats = {}
self.nlp = spacy.load("xx_ent_wiki_sm")
self.compute()
def compute(self):
docs = corpus.get_documents()
self.stats["Total"] = {}
self.stats["Total"]["All"] = self._subset_stats(docs)
self.stats["By gender"] = self._breakdown(docs, "gender")
self.stats["By region"] = self._breakdown(docs, "region")
self.stats["By native"] = self._breakdown(docs, "is_native")
self.stats["By occupation"] = self._breakdown(docs, "occupation")
self.stats["By submission type"] = self._breakdown(docs, "submission_type")
self.stats["By translation lang"] = self._breakdown(docs, "source_language")
def _subset_stats(self, docs):
stats = {}
stats["Documents"] = len(docs)
stats["Sentences"] = sum(self.count_sentences(doc.source) for doc in docs)
stats["Tokens"] = sum(self.count_tokens(doc.source) for doc in docs)
stats["Unique users"] = len(set(doc.meta.author_id for doc in docs))
return stats
def reset_stats(self):
pass
def pretty_print(self):
for top_key, subset in sorted(self.stats.items()):
print(f"# {top_key}")
for key, value in subset.items():
print(f"{key:<30} {value}")
print()
def count_sentences(self, s):
for _ in range(20):
s = s.replace("..", ".")
return s.count(".") + s.count("?") + s.count("!")
def count_tokens(self, s):
tokens = self.nlp(s)
return len(tokens)
def _breakdown(self, docs, field):
"""Compute statistics with breakdown by `field`.
Returns:
dict: field_class (str) => stats (dict[str, int])
"""
result = {}
values = sorted({getattr(doc.meta, field) for doc in docs})
for value in values:
subset = [doc for doc in docs if getattr(doc.meta, field) == value]
result[value] = self._subset_stats(subset)
return result
if __name__ == "__main__":
from ua_gec import Corpus
corpus = Corpus("all")
stats = CorpusStatistics(corpus)
stats.pretty_print()
|
en
| 0.607374
|
#!/usr/bin/env python3 Compute corpus statistics. Compute statistics with breakdown by `field`. Returns: dict: field_class (str) => stats (dict[str, int])
| 2.752972
| 3
|
liteasr/nets/feed_forward.py
|
Nazukixv/LiteASR
| 0
|
6626018
|
<gh_stars>0
import torch.nn as nn
class PositionwiseFeedForward(nn.Module):
def __init__(
self,
i_dim: int,
h_units: int,
dropout_rate: float,
activation: nn.Module = nn.ReLU(),
):
super().__init__()
self.fc1 = nn.Linear(i_dim, h_units)
self.fc2 = nn.Linear(h_units, i_dim)
self.dropout = nn.Dropout(dropout_rate)
self.activation = activation
def forward(self, x):
return self.fc2(self.dropout(self.activation(self.fc1(x))))
|
import torch.nn as nn
class PositionwiseFeedForward(nn.Module):
def __init__(
self,
i_dim: int,
h_units: int,
dropout_rate: float,
activation: nn.Module = nn.ReLU(),
):
super().__init__()
self.fc1 = nn.Linear(i_dim, h_units)
self.fc2 = nn.Linear(h_units, i_dim)
self.dropout = nn.Dropout(dropout_rate)
self.activation = activation
def forward(self, x):
return self.fc2(self.dropout(self.activation(self.fc1(x))))
|
none
| 1
| 2.921392
| 3
|
|
man_knife_ssl_check/source/conf.py
|
iennae/chef-docs
| 0
|
6626019
|
<filename>man_knife_ssl_check/source/conf.py
# -*- coding: utf-8 -*-
#
# Chef documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 22 13:50:49 2012.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'knife ssl check'
# copyright = u'This work is licensed under a Creative Commons Attribution 3.0 Unported License'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '0.0.0'
# The full version, including alpha/beta/rc tags.
#release = '0.0.0-0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# This is being used to define the version number for Chef, for now.
#
today = 'Chef 12.0'
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'emacs'
# highlight_language = 'ruby'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# A string of reStructuredText that will be included at the beginning of every source file that is read.
rst_prolog = """
.. include:: ../../swaps/swap_descriptions.txt
.. include:: ../../swaps/swap_names.txt
.. include:: ../../swaps/swap_notes.txt
"""
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'knife-ssl-check', u'The man page for the knife ssl check subcommand.',
[u'Chef'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
|
<filename>man_knife_ssl_check/source/conf.py
# -*- coding: utf-8 -*-
#
# Chef documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 22 13:50:49 2012.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'knife ssl check'
# copyright = u'This work is licensed under a Creative Commons Attribution 3.0 Unported License'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '0.0.0'
# The full version, including alpha/beta/rc tags.
#release = '0.0.0-0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# This is being used to define the version number for Chef, for now.
#
today = 'Chef 12.0'
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'emacs'
# highlight_language = 'ruby'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# A string of reStructuredText that will be included at the beginning of every source file that is read.
rst_prolog = """
.. include:: ../../swaps/swap_descriptions.txt
.. include:: ../../swaps/swap_names.txt
.. include:: ../../swaps/swap_notes.txt
"""
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'knife-ssl-check', u'The man page for the knife ssl check subcommand.',
[u'Chef'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
|
en
| 0.72064
|
# -*- coding: utf-8 -*- # # Chef documentation build configuration file, created by # sphinx-quickstart on Wed Feb 22 13:50:49 2012. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # Add any paths that contain templates here, relative to this directory. # The suffix of source filenames. # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. # General information about the project. # copyright = u'This work is licensed under a Creative Commons Attribution 3.0 Unported License' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. #version = '0.0.0' # The full version, including alpha/beta/rc tags. #release = '0.0.0-0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # # This is being used to define the version number for Chef, for now. # # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. # highlight_language = 'ruby' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # A string of reStructuredText that will be included at the beginning of every source file that is read. .. include:: ../../swaps/swap_descriptions.txt .. include:: ../../swaps/swap_names.txt .. include:: ../../swaps/swap_notes.txt # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). # If true, show URL addresses after external links. #man_show_urls = False
| 1.532711
| 2
|
dimka/core/app.py
|
madmis/dimka-binance
| 0
|
6626020
|
import argparse
import time
import logging
import os
from binance.client import Client
from dimka.core import config, models
class Application:
""" BitBot Application instance """
def __init__(self, bot_name: str):
self.bot_name = bot_name
self.__init_default_arguments()
self.config = config.Config()
self.log = logging.getLogger()
self.args = None
self.pair_info = None
def init(self):
self.args = self.__arg_parser.parse_args()
self.__init_logger()
self.__parse_config()
self.__init_db_conn()
self.config.params["bot_name"] = self.bot_name
def run(self):
client = Client(
self.config.params.get("key"),
self.config.params.get("secret"),
)
name = "dimka.bot.{}.bot".format(self.bot_name.lower())
mod = __import__(name, fromlist=[''])
class_ = getattr(mod, "Bot")
bot = class_(client, self.config, self.args)
while True:
try:
bot.run()
time.sleep(15)
except RestartBotException as e:
self.log.warning(str(e))
self.log.warning("Restart Bot")
time.sleep(e.timeout)
continue
except NotImplementedError as e:
self.log.error("{}".format(e))
break
except Exception as e:
self.log.exception("An error occurred: {}".format(e))
time.sleep(5)
def add_argument(self, *args, **kwargs):
"""
Add application console argument.
Can be used to add specific bot arguments.
"""
self.__arg_parser.add_argument(*args, **kwargs)
def __parse_config(self):
""" Parse application config """
self.config.parse_config(self.args)
def __init_logger(self):
""" Initialize application logger """
level = logging.WARNING
if self.args.debug is True:
level = logging.DEBUG
self.log = self.config.init_logger(level, self.bot_name)
def __init_db_conn(self):
""" Initialize database """
db_path = self.config.params.get("db_path")
create = not os.path.isfile(db_path)
self.log.notice("Initialize database:")
self.log.notice(" DB Path: {}".format(db_path))
db = models.database
db.init(db_path)
if create:
self.log.notice(" Create tables")
db.create_tables([
models.OrderInfo,
models.Ticker,
])
def __init_default_arguments(self):
""" Initialize ArgumentParser and set default arguments """
self.__arg_parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter
)
self.__arg_parser.add_argument(
"config",
type=str,
help="Application config yaml file (full path): /var/www/config/config.yaml",
)
self.__arg_parser.add_argument(
"--debug",
action="store_true",
help="Show debug info",
)
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
class RestartBotException(RuntimeError):
""" Exception to restart loop with bot.run """
def __init__(self, *args, timeout: int = 2, **kwargs):
super().__init__(*args, **kwargs)
self.timeout = timeout
|
import argparse
import time
import logging
import os
from binance.client import Client
from dimka.core import config, models
class Application:
""" BitBot Application instance """
def __init__(self, bot_name: str):
self.bot_name = bot_name
self.__init_default_arguments()
self.config = config.Config()
self.log = logging.getLogger()
self.args = None
self.pair_info = None
def init(self):
self.args = self.__arg_parser.parse_args()
self.__init_logger()
self.__parse_config()
self.__init_db_conn()
self.config.params["bot_name"] = self.bot_name
def run(self):
client = Client(
self.config.params.get("key"),
self.config.params.get("secret"),
)
name = "dimka.bot.{}.bot".format(self.bot_name.lower())
mod = __import__(name, fromlist=[''])
class_ = getattr(mod, "Bot")
bot = class_(client, self.config, self.args)
while True:
try:
bot.run()
time.sleep(15)
except RestartBotException as e:
self.log.warning(str(e))
self.log.warning("Restart Bot")
time.sleep(e.timeout)
continue
except NotImplementedError as e:
self.log.error("{}".format(e))
break
except Exception as e:
self.log.exception("An error occurred: {}".format(e))
time.sleep(5)
def add_argument(self, *args, **kwargs):
"""
Add application console argument.
Can be used to add specific bot arguments.
"""
self.__arg_parser.add_argument(*args, **kwargs)
def __parse_config(self):
""" Parse application config """
self.config.parse_config(self.args)
def __init_logger(self):
""" Initialize application logger """
level = logging.WARNING
if self.args.debug is True:
level = logging.DEBUG
self.log = self.config.init_logger(level, self.bot_name)
def __init_db_conn(self):
""" Initialize database """
db_path = self.config.params.get("db_path")
create = not os.path.isfile(db_path)
self.log.notice("Initialize database:")
self.log.notice(" DB Path: {}".format(db_path))
db = models.database
db.init(db_path)
if create:
self.log.notice(" Create tables")
db.create_tables([
models.OrderInfo,
models.Ticker,
])
def __init_default_arguments(self):
""" Initialize ArgumentParser and set default arguments """
self.__arg_parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter
)
self.__arg_parser.add_argument(
"config",
type=str,
help="Application config yaml file (full path): /var/www/config/config.yaml",
)
self.__arg_parser.add_argument(
"--debug",
action="store_true",
help="Show debug info",
)
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
class RestartBotException(RuntimeError):
""" Exception to restart loop with bot.run """
def __init__(self, *args, timeout: int = 2, **kwargs):
super().__init__(*args, **kwargs)
self.timeout = timeout
|
en
| 0.416428
|
BitBot Application instance Add application console argument. Can be used to add specific bot arguments. Parse application config Initialize application logger Initialize database Initialize ArgumentParser and set default arguments Exception to restart loop with bot.run
| 2.265196
| 2
|
blog/forms.py
|
arminadm/django_projects
| 0
|
6626021
|
<filename>blog/forms.py
from django import forms
from blog.models import Comment
from captcha.fields import CaptchaField
class commentForm(forms.ModelForm):
captcha = CaptchaField()
class Meta:
model = Comment
fields = ['post', 'author', 'email', 'message']
|
<filename>blog/forms.py
from django import forms
from blog.models import Comment
from captcha.fields import CaptchaField
class commentForm(forms.ModelForm):
captcha = CaptchaField()
class Meta:
model = Comment
fields = ['post', 'author', 'email', 'message']
|
none
| 1
| 2.139858
| 2
|
|
tests/acquisition/covid_hosp/state_timeseries/test_network.py
|
chinandrew/delphi-epidata
| 1
|
6626022
|
"""Unit tests for network.py."""
# standard library
import unittest
from unittest.mock import patch
from unittest.mock import sentinel
from delphi.epidata.acquisition.covid_hosp.state_timeseries.network import Network
# py3tester coverage target
__test_target__ = \
'delphi.epidata.acquisition.covid_hosp.state_timeseries.network'
class NetworkTests(unittest.TestCase):
def test_fetch_metadata(self):
"""Fetch metadata as JSON."""
with patch.object(Network, 'fetch_metadata_for_dataset') as func:
func.return_value = sentinel.json
result = Network.fetch_metadata()
self.assertEqual(result, sentinel.json)
func.assert_called_once_with(dataset_id=Network.DATASET_ID)
|
"""Unit tests for network.py."""
# standard library
import unittest
from unittest.mock import patch
from unittest.mock import sentinel
from delphi.epidata.acquisition.covid_hosp.state_timeseries.network import Network
# py3tester coverage target
__test_target__ = \
'delphi.epidata.acquisition.covid_hosp.state_timeseries.network'
class NetworkTests(unittest.TestCase):
def test_fetch_metadata(self):
"""Fetch metadata as JSON."""
with patch.object(Network, 'fetch_metadata_for_dataset') as func:
func.return_value = sentinel.json
result = Network.fetch_metadata()
self.assertEqual(result, sentinel.json)
func.assert_called_once_with(dataset_id=Network.DATASET_ID)
|
en
| 0.869806
|
Unit tests for network.py. # standard library # py3tester coverage target Fetch metadata as JSON.
| 2.438478
| 2
|
lncrawl/sources/wattpad.py
|
betabeast12/lightnovel-crawler
| 1
|
6626023
|
<reponame>betabeast12/lightnovel-crawler<gh_stars>1-10
# -*- coding: utf-8 -*-
from time import time
import logging
import re
from urllib.parse import urlparse
from ..utils.crawler import Crawler
logger = logging.getLogger(__name__)
chapter_info_url = 'https://www.wattpad.com/v4/parts/%s?fields=id,title,pages,text_url&_=%d'
class WattpadCrawler(Crawler):
base_url = [
'https://www.wattpad.com/',
'https://my.w.tt/',
]
def initialize(self):
self.home_url = self.base_url[0]
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_title = soup.select('h1')[0].get_text().strip()
logger.info('Novel title: %s', self.novel_title)
self.novel_cover = self.absolute_url(
soup.select('div.cover.cover-lg img')[0]['src'])
logger.info('Novel cover: %s', self.novel_cover)
self.novel_author = soup.select(
'div.author-info strong a')[0].get_text()
logger.info('Novel author: %s', self.novel_author)
#description = soup.select('h2.description')[0].get_text()
chapters = soup.select('ul.table-of-contents a')
# chapters.reverse()
vols = set([])
for a in chapters:
chap_id = len(self.chapters) + 1
vol_id = len(self.chapters) // 100 + 1
vols.add(vol_id)
self.chapters.append({
'id': chap_id,
'volume': vol_id,
'url': self.absolute_url(a['href']),
'title': a.text.strip() or ('Chapter %d' % chap_id),
})
# end for
self.volumes = [{'id': i} for i in vols]
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format.'''
# soup = self.get_soup(chapter['url'])
# pages = int(re.search(
# '[1-9]', re.search('("pages":)([1-9])', str(soup)).group(0)).group(0))
# #chapter['title'] = soup.select('h2')[0].get_text().strip()
# contents = []
# for i in range(1, pages+1):
# page_url = chapter['url'] + "/page/" + str(i)
# logger.info('Get body text from %s', page_url)
# soup_page = self.get_soup(page_url)
# for p in soup_page.select('pre p'):
# contents.append(p.text)
# return '<p>' + '</p><p>'.join(contents) + '</p>'
chapter_id = urlparse(chapter['url']).path.split('-')[0].strip('/')
info_url = chapter_info_url % (chapter_id, int(time() * 1000))
logger.info('Getting info %s', info_url)
data = self.get_json(info_url)
chapter['title'] = data['title']
text_url = data['text_url']['text']
logger.info('Getting text %s', text_url)
text = self.get_response(text_url).content.decode('utf-8')
text = re.sub(r'<p data-p-id="[a-f0-9]+>"', '<p>', text)
return text
# end def
# end class
|
# -*- coding: utf-8 -*-
from time import time
import logging
import re
from urllib.parse import urlparse
from ..utils.crawler import Crawler
logger = logging.getLogger(__name__)
chapter_info_url = 'https://www.wattpad.com/v4/parts/%s?fields=id,title,pages,text_url&_=%d'
class WattpadCrawler(Crawler):
base_url = [
'https://www.wattpad.com/',
'https://my.w.tt/',
]
def initialize(self):
self.home_url = self.base_url[0]
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_title = soup.select('h1')[0].get_text().strip()
logger.info('Novel title: %s', self.novel_title)
self.novel_cover = self.absolute_url(
soup.select('div.cover.cover-lg img')[0]['src'])
logger.info('Novel cover: %s', self.novel_cover)
self.novel_author = soup.select(
'div.author-info strong a')[0].get_text()
logger.info('Novel author: %s', self.novel_author)
#description = soup.select('h2.description')[0].get_text()
chapters = soup.select('ul.table-of-contents a')
# chapters.reverse()
vols = set([])
for a in chapters:
chap_id = len(self.chapters) + 1
vol_id = len(self.chapters) // 100 + 1
vols.add(vol_id)
self.chapters.append({
'id': chap_id,
'volume': vol_id,
'url': self.absolute_url(a['href']),
'title': a.text.strip() or ('Chapter %d' % chap_id),
})
# end for
self.volumes = [{'id': i} for i in vols]
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format.'''
# soup = self.get_soup(chapter['url'])
# pages = int(re.search(
# '[1-9]', re.search('("pages":)([1-9])', str(soup)).group(0)).group(0))
# #chapter['title'] = soup.select('h2')[0].get_text().strip()
# contents = []
# for i in range(1, pages+1):
# page_url = chapter['url'] + "/page/" + str(i)
# logger.info('Get body text from %s', page_url)
# soup_page = self.get_soup(page_url)
# for p in soup_page.select('pre p'):
# contents.append(p.text)
# return '<p>' + '</p><p>'.join(contents) + '</p>'
chapter_id = urlparse(chapter['url']).path.split('-')[0].strip('/')
info_url = chapter_info_url % (chapter_id, int(time() * 1000))
logger.info('Getting info %s', info_url)
data = self.get_json(info_url)
chapter['title'] = data['title']
text_url = data['text_url']['text']
logger.info('Getting text %s', text_url)
text = self.get_response(text_url).content.decode('utf-8')
text = re.sub(r'<p data-p-id="[a-f0-9]+>"', '<p>', text)
return text
# end def
# end class
|
en
| 0.374887
|
# -*- coding: utf-8 -*- Get novel title, autor, cover etc #description = soup.select('h2.description')[0].get_text() # chapters.reverse() # end for # end def Download body of a single chapter and return as clean html format. # soup = self.get_soup(chapter['url']) # pages = int(re.search( # '[1-9]', re.search('("pages":)([1-9])', str(soup)).group(0)).group(0)) # #chapter['title'] = soup.select('h2')[0].get_text().strip() # contents = [] # for i in range(1, pages+1): # page_url = chapter['url'] + "/page/" + str(i) # logger.info('Get body text from %s', page_url) # soup_page = self.get_soup(page_url) # for p in soup_page.select('pre p'): # contents.append(p.text) # return '<p>' + '</p><p>'.join(contents) + '</p>' # end def # end class
| 3.053099
| 3
|
MultiPManager/__init__.py
|
sebastiantrianac/SoftTLON
| 0
|
6626024
|
# __init__.py
import sys
import stomp
import dill as pickle
import time
|
# __init__.py
import sys
import stomp
import dill as pickle
import time
|
ar
| 0.447093
|
# __init__.py
| 1.005283
| 1
|
xlsxwriter/test/comparison/test_header_image01.py
|
haiyangd/XlsxWriter
| 3
|
6626025
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, <NAME>, <EMAIL>
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
from ...compatibility import BytesIO
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'header_image01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.image_dir = test_dir + 'images/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header('&L&G',
{'image_left': self.image_dir + 'red.jpg'})
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
worksheet.set_header('&L&G',
{'image_left': self.image_dir + 'red.jpg'})
workbook.close()
self.assertExcelEqual()
def test_create_file_from_bytesio(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
image_file = open(self.image_dir + 'red.jpg', 'rb')
image_data = BytesIO(image_file.read())
image_file.close()
worksheet.set_header('&L&G',
{'image_left': 'red.jpg',
'image_data_left': image_data})
workbook.close()
self.assertExcelEqual()
def test_create_file_from_bytesio_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
image_file = open(self.image_dir + 'red.jpg', 'rb')
image_data = BytesIO(image_file.read())
image_file.close()
worksheet.set_header('&L&G',
{'image_left': 'red.jpg',
'image_data_left': image_data})
workbook.close()
self.assertExcelEqual()
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, <NAME>, <EMAIL>
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
from ...compatibility import BytesIO
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'header_image01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.image_dir = test_dir + 'images/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/worksheets/sheet1.xml': ['<pageMargins', '<pageSetup']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header('&L&G',
{'image_left': self.image_dir + 'red.jpg'})
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
worksheet.set_header('&L&G',
{'image_left': self.image_dir + 'red.jpg'})
workbook.close()
self.assertExcelEqual()
def test_create_file_from_bytesio(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
image_file = open(self.image_dir + 'red.jpg', 'rb')
image_data = BytesIO(image_file.read())
image_file.close()
worksheet.set_header('&L&G',
{'image_left': 'red.jpg',
'image_data_left': image_data})
workbook.close()
self.assertExcelEqual()
def test_create_file_from_bytesio_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
image_file = open(self.image_dir + 'red.jpg', 'rb')
image_data = BytesIO(image_file.read())
image_file.close()
worksheet.set_header('&L&G',
{'image_left': 'red.jpg',
'image_data_left': image_data})
workbook.close()
self.assertExcelEqual()
|
en
| 0.64347
|
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2017, <NAME>, <EMAIL> # Test file created by XlsxWriter against a file created by Excel. Test the creation of a simple XlsxWriter file with image(s). Test the creation of a simple XlsxWriter file with image(s). Test the creation of a simple XlsxWriter file with image(s). Test the creation of a simple XlsxWriter file with image(s).
| 2.745639
| 3
|
test_liteproto.py
|
lybicat/lite-protobuf
| 1
|
6626026
|
<gh_stars>1-10
from unittest import TestCase
from liteproto import load
from liteproto import loads
class TestLiteProto(TestCase):
def test_load_proto_file(self):
load('ut.proto', 'Pair')
def test_load_proto_string(self):
loads('''syntax = "proto2";
message Ack{
enum ConfirmationStatus{
ACK = 1;
NACK = 2;
}
required ConfirmationStatus confirmation = 1;
required uint32 transactionId = 2;
optional string reason = 3;
}''')
|
from unittest import TestCase
from liteproto import load
from liteproto import loads
class TestLiteProto(TestCase):
def test_load_proto_file(self):
load('ut.proto', 'Pair')
def test_load_proto_string(self):
loads('''syntax = "proto2";
message Ack{
enum ConfirmationStatus{
ACK = 1;
NACK = 2;
}
required ConfirmationStatus confirmation = 1;
required uint32 transactionId = 2;
optional string reason = 3;
}''')
|
en
| 0.289122
|
syntax = "proto2"; message Ack{ enum ConfirmationStatus{ ACK = 1; NACK = 2; } required ConfirmationStatus confirmation = 1; required uint32 transactionId = 2; optional string reason = 3; }
| 2.463058
| 2
|
authentication/configuration.py
|
anae09/electionWebService
| 0
|
6626027
|
from datetime import timedelta
import os
db = os.environ["DATABASE_URL"];
class Configuration:
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:root@{}/authentication".format(db);
JWT_SECRET_KEY = "ANA_ANA_ANA";
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1);
JWT_REFRESH_TOKEN_EXPIRES = timedelta(days=30);
|
from datetime import timedelta
import os
db = os.environ["DATABASE_URL"];
class Configuration:
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:root@{}/authentication".format(db);
JWT_SECRET_KEY = "ANA_ANA_ANA";
JWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1);
JWT_REFRESH_TOKEN_EXPIRES = timedelta(days=30);
|
none
| 1
| 2.449218
| 2
|
|
Imaging/sensor.py
|
CHEN-yongquan/Asteroid_CPO_seeker
| 2
|
6626028
|
<filename>Imaging/sensor.py
import numpy as np
import attitude_utils as attu
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import env_utils as envu
class Sensor(object):
def __init__(self, seeker, max_range_intensity=0.0, attitude_parameterization=attu.Quaternion_attitude, use_range=True,
pool_type='max', offset=np.asarray([0,0]), state_type=None, optflow_scale=1.0 ,
apf_tau1=300, apf_v0=0.5, use_dp=True, landing_site_range=0.0, debug=False):
self.debug = debug
self.ap = attitude_parameterization
self.use_range = use_range
self.stabilized = True
self.landing_site_range = landing_site_range
self.use_dp = use_dp
self.seeker = seeker
print(seeker.get_optical_axis(np.identity(3)))
self.max_range_intensity = max_range_intensity
self.seeker_angles = None
self.pixel_int = None
self.optflow_scale = optflow_scale
self.apf_v0 = apf_v0
self.apf_tau1 = apf_tau1
self.track_func = self.track_func1
#self.c_dvec = None
self.offset = offset
if pool_type == 'ave':
self.pool_func = self.ave_pool_forward_reshape
print('using average pooling')
else:
self.pool_func = self.max_pool_forward_reshape
print('using max pooling')
if state_type is None:
self.state_type=Range_sensor.simple_state
else:
self.state_type = state_type
print('V4: Output State type: ', state_type)
def reset(self, lander_state):
self.seeker.reset()
self.initial_attitude = lander_state['attitude'].copy()
self.seeker_angles = None
self.cs_angles = None
self.pixel_int = None
self.image_f = None
self.image_c = None
self.full_image = None
self.last_seeker_angles = None
self.last_pixel_int = None
def get_seeker_angles(self, agent_state, object_locations=np.zeros(3), render=False ):
agent_location = agent_state['position']
agent_velocity = agent_state['velocity']
out_of_fov = False
if len(object_locations.shape) < 2:
object_locations = np.expand_dims(object_locations,axis=0)
object_intensities = np.linalg.norm(agent_location-object_locations,axis=1)
if self.stabilized:
agent_q = self.initial_attitude
else:
agent_q = agent_state['attitude']
self.agent_q = agent_q
seeker_angles, pixel_int = self.seeker.get_seeker_angles(agent_location, agent_q, object_locations, object_intensities)
if render:
self.render(seeker_angles, pixel_int)
#pixel_int = np.squeeze(pixel_int)
#print('sensor: ', pixel_int, np.linalg.norm(agent_location))
self.fov_violation = seeker_angles.shape[0] < 1
if seeker_angles.shape[0] < 1:
seeker_angles = 1.0*np.expand_dims(1.1*self.seeker.fov/2*np.ones(2), axis=0)
else:
seeker_angles = seeker_angles
pixel_vc = envu.get_vc(agent_location, agent_velocity)
return seeker_angles, pixel_int, pixel_vc
def get_image_state(self, agent_state, object_locations ):
agent_location = agent_state['position']
agent_velocity = agent_state['velocity']
seeker_angles, pixel_int , pixel_vc = self.get_seeker_angles( agent_state, object_locations=object_locations )
seeker_angles = np.squeeze(seeker_angles)
self.traj_seeker_angles = seeker_angles.copy()
pixel_int = np.squeeze(pixel_int)
self.pixel_int = pixel_int
if self.fov_violation:
du = 0.0
dv = 0.0
elif self.last_seeker_angles is not None:
#print('PC2: ', seeker_angles, self.last_seeker_angles)
du = 1.0*(seeker_angles[0] - self.last_seeker_angles[0])
dv = 1.0*(seeker_angles[1] - self.last_seeker_angles[1])
else:
du = 0.0
dv = 0.0
du *= self.optflow_scale
dv *= self.optflow_scale
self.du = du
self.dv = dv
if self.fov_violation :
pixel_int = 0.0
if self.fov_violation :
dp = 0.0
elif self.last_seeker_angles is not None:
dp = pixel_int - self.last_pixel_int
else:
dp = 0.0
self.last_seeker_angles = seeker_angles.copy()
self.last_pixel_int = pixel_int
self.cs_angles = seeker_angles - self.offset
self.seeker_angles = seeker_angles.copy()
if self.use_dp:
verr, t_go = self.track_func(pixel_int, dp)
else:
verr, t_go = self.track_func(pixel_int, pixel_vc)
state = self.state_type( self.cs_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go)
#if self.fov_violation:
# print(state)
# assert False
self.verr = verr
if self.debug and False:
print('2:',seeker_angles, state, self.cs_angles * (self.seeker.p_y//2))
return state
@staticmethod
def optflow_state(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv))
return state
@staticmethod
def range_dp_state0(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = verr
#print(state)
return state
@staticmethod
def range_dp_state1(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((pixel_int, dp))
#print(state)
return state
@staticmethod
def range_dp_state2(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((pixel_int, dp, t_go))
#print(state)
return state
@staticmethod
def optflow_state_range_dp00(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,pixel_int, dp, t_go))
return state
@staticmethod
def optflow_state_range_dp0(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,pixel_int, dp))
return state
@staticmethod
def optflow_state_range_dp1(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,verr, t_go))
return state
state = np.hstack((seeker_angles,du,dv,verr, t_go))
return state
@staticmethod
def optflow_state_range_dp2(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,verr))
return state
@staticmethod
def optflow_state_range_dp3(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,verr))
return state
def check_for_vio(self):
return self.fov_violation
def render(self, pixels, intensities):
u = pixels[:,0]
v = pixels[:,1]
image = self.max_range_intensity*np.ones((self.seeker.p_x,self.seeker.p_y))
image[v,u] = intensities
plt.figure()
plt.imshow(image, interpolation='nearest',cmap='gray')
plt.grid(True)
def max_pool_forward_reshape(self, x, stride, pool_height, pool_width):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
H, W = x.shape
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(H // pool_height, pool_height,
W // pool_width, pool_width)
out = x_reshaped.max(axis=1).max(axis=2)
return out
def ave_pool_forward_reshape(self, x, stride, pool_height, pool_width):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
H, W = x.shape
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(H // pool_height, pool_height,
W // pool_width, pool_width)
out = x_reshaped.mean(axis=1).mean(axis=2)
return out
def track_func0(self, r, dr):
if np.abs(dr) > 0:
t_go = np.abs(r / dr)
else:
t_go = 9999
vref = self.apf_v0 * (1. - np.exp(-t_go / self.apf_tau1))
verr = dr - vref
return verr, t_go
def track_func1(self, r, dr):
r -= self.landing_site_range
if np.abs(dr) > 0:
t_go = np.abs(r / dr)
else:
t_go = 9999
if r < 0:
t_go = 0.0
vref = self.apf_v0 * (1. - np.exp(-t_go / self.apf_tau1))
verr = dr - vref
#print('track: ',vref, r, dr, t_go)
return verr, t_go
|
<filename>Imaging/sensor.py
import numpy as np
import attitude_utils as attu
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import env_utils as envu
class Sensor(object):
def __init__(self, seeker, max_range_intensity=0.0, attitude_parameterization=attu.Quaternion_attitude, use_range=True,
pool_type='max', offset=np.asarray([0,0]), state_type=None, optflow_scale=1.0 ,
apf_tau1=300, apf_v0=0.5, use_dp=True, landing_site_range=0.0, debug=False):
self.debug = debug
self.ap = attitude_parameterization
self.use_range = use_range
self.stabilized = True
self.landing_site_range = landing_site_range
self.use_dp = use_dp
self.seeker = seeker
print(seeker.get_optical_axis(np.identity(3)))
self.max_range_intensity = max_range_intensity
self.seeker_angles = None
self.pixel_int = None
self.optflow_scale = optflow_scale
self.apf_v0 = apf_v0
self.apf_tau1 = apf_tau1
self.track_func = self.track_func1
#self.c_dvec = None
self.offset = offset
if pool_type == 'ave':
self.pool_func = self.ave_pool_forward_reshape
print('using average pooling')
else:
self.pool_func = self.max_pool_forward_reshape
print('using max pooling')
if state_type is None:
self.state_type=Range_sensor.simple_state
else:
self.state_type = state_type
print('V4: Output State type: ', state_type)
def reset(self, lander_state):
self.seeker.reset()
self.initial_attitude = lander_state['attitude'].copy()
self.seeker_angles = None
self.cs_angles = None
self.pixel_int = None
self.image_f = None
self.image_c = None
self.full_image = None
self.last_seeker_angles = None
self.last_pixel_int = None
def get_seeker_angles(self, agent_state, object_locations=np.zeros(3), render=False ):
agent_location = agent_state['position']
agent_velocity = agent_state['velocity']
out_of_fov = False
if len(object_locations.shape) < 2:
object_locations = np.expand_dims(object_locations,axis=0)
object_intensities = np.linalg.norm(agent_location-object_locations,axis=1)
if self.stabilized:
agent_q = self.initial_attitude
else:
agent_q = agent_state['attitude']
self.agent_q = agent_q
seeker_angles, pixel_int = self.seeker.get_seeker_angles(agent_location, agent_q, object_locations, object_intensities)
if render:
self.render(seeker_angles, pixel_int)
#pixel_int = np.squeeze(pixel_int)
#print('sensor: ', pixel_int, np.linalg.norm(agent_location))
self.fov_violation = seeker_angles.shape[0] < 1
if seeker_angles.shape[0] < 1:
seeker_angles = 1.0*np.expand_dims(1.1*self.seeker.fov/2*np.ones(2), axis=0)
else:
seeker_angles = seeker_angles
pixel_vc = envu.get_vc(agent_location, agent_velocity)
return seeker_angles, pixel_int, pixel_vc
def get_image_state(self, agent_state, object_locations ):
agent_location = agent_state['position']
agent_velocity = agent_state['velocity']
seeker_angles, pixel_int , pixel_vc = self.get_seeker_angles( agent_state, object_locations=object_locations )
seeker_angles = np.squeeze(seeker_angles)
self.traj_seeker_angles = seeker_angles.copy()
pixel_int = np.squeeze(pixel_int)
self.pixel_int = pixel_int
if self.fov_violation:
du = 0.0
dv = 0.0
elif self.last_seeker_angles is not None:
#print('PC2: ', seeker_angles, self.last_seeker_angles)
du = 1.0*(seeker_angles[0] - self.last_seeker_angles[0])
dv = 1.0*(seeker_angles[1] - self.last_seeker_angles[1])
else:
du = 0.0
dv = 0.0
du *= self.optflow_scale
dv *= self.optflow_scale
self.du = du
self.dv = dv
if self.fov_violation :
pixel_int = 0.0
if self.fov_violation :
dp = 0.0
elif self.last_seeker_angles is not None:
dp = pixel_int - self.last_pixel_int
else:
dp = 0.0
self.last_seeker_angles = seeker_angles.copy()
self.last_pixel_int = pixel_int
self.cs_angles = seeker_angles - self.offset
self.seeker_angles = seeker_angles.copy()
if self.use_dp:
verr, t_go = self.track_func(pixel_int, dp)
else:
verr, t_go = self.track_func(pixel_int, pixel_vc)
state = self.state_type( self.cs_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go)
#if self.fov_violation:
# print(state)
# assert False
self.verr = verr
if self.debug and False:
print('2:',seeker_angles, state, self.cs_angles * (self.seeker.p_y//2))
return state
@staticmethod
def optflow_state(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv))
return state
@staticmethod
def range_dp_state0(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = verr
#print(state)
return state
@staticmethod
def range_dp_state1(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((pixel_int, dp))
#print(state)
return state
@staticmethod
def range_dp_state2(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((pixel_int, dp, t_go))
#print(state)
return state
@staticmethod
def optflow_state_range_dp00(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,pixel_int, dp, t_go))
return state
@staticmethod
def optflow_state_range_dp0(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,pixel_int, dp))
return state
@staticmethod
def optflow_state_range_dp1(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,verr, t_go))
return state
state = np.hstack((seeker_angles,du,dv,verr, t_go))
return state
@staticmethod
def optflow_state_range_dp2(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,verr))
return state
@staticmethod
def optflow_state_range_dp3(seeker_angles, pixel_int, pixel_vc, du, dv, dp, verr, t_go):
state = np.hstack((seeker_angles,du,dv,verr))
return state
def check_for_vio(self):
return self.fov_violation
def render(self, pixels, intensities):
u = pixels[:,0]
v = pixels[:,1]
image = self.max_range_intensity*np.ones((self.seeker.p_x,self.seeker.p_y))
image[v,u] = intensities
plt.figure()
plt.imshow(image, interpolation='nearest',cmap='gray')
plt.grid(True)
def max_pool_forward_reshape(self, x, stride, pool_height, pool_width):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
H, W = x.shape
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(H // pool_height, pool_height,
W // pool_width, pool_width)
out = x_reshaped.max(axis=1).max(axis=2)
return out
def ave_pool_forward_reshape(self, x, stride, pool_height, pool_width):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
H, W = x.shape
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(H // pool_height, pool_height,
W // pool_width, pool_width)
out = x_reshaped.mean(axis=1).mean(axis=2)
return out
def track_func0(self, r, dr):
if np.abs(dr) > 0:
t_go = np.abs(r / dr)
else:
t_go = 9999
vref = self.apf_v0 * (1. - np.exp(-t_go / self.apf_tau1))
verr = dr - vref
return verr, t_go
def track_func1(self, r, dr):
r -= self.landing_site_range
if np.abs(dr) > 0:
t_go = np.abs(r / dr)
else:
t_go = 9999
if r < 0:
t_go = 0.0
vref = self.apf_v0 * (1. - np.exp(-t_go / self.apf_tau1))
verr = dr - vref
#print('track: ',vref, r, dr, t_go)
return verr, t_go
|
en
| 0.614396
|
#self.c_dvec = None #pixel_int = np.squeeze(pixel_int) #print('sensor: ', pixel_int, np.linalg.norm(agent_location)) #print('PC2: ', seeker_angles, self.last_seeker_angles) #if self.fov_violation: # print(state) # assert False #print(state) #print(state) #print(state) A fast implementation of the forward pass for the max pooling layer that uses some clever reshaping. This can only be used for square pooling regions that tile the input. A fast implementation of the forward pass for the max pooling layer that uses some clever reshaping. This can only be used for square pooling regions that tile the input. #print('track: ',vref, r, dr, t_go)
| 2.565557
| 3
|
openmdao/components/eq_constraint_comp.py
|
sebasgo/OpenMDAO
| 0
|
6626029
|
"""Define the EQConstraintComp class."""
from numbers import Number
import numpy as np
from openmdao.core.explicitcomponent import ExplicitComponent
from openmdao.utils import cs_safe
class EQConstraintComp(ExplicitComponent):
"""
A component that computes the difference between two inputs to test for equality.
Attributes
----------
_output_vars : dict
Cache the data provided during `add_eq_output`
so everything can be saved until setup is called.
"""
def __init__(self, name=None, eq_units=None, lhs_name=None, rhs_name=None, rhs_val=0.0,
use_mult=False, mult_name=None, mult_val=1.0, normalize=True, add_constraint=False,
ref=None, ref0=None, adder=None, scaler=None, **kwargs):
r"""
Initialize an EQConstraintComp, optionally add an output constraint to the model.
The EQConstraintComp allows for the creation of one or more output variables and
computes the values for those variables based on the following equation:
.. math::
name_{output} = \frac{name_{mult} \times name_{lhs} - name_{rhs} }{f_{norm}(name_{rhs})}
Where :math:`name_{lhs}` represents the left-hand-side of the equality,
:math:`name_{rhs}` represents the right-hand-side, and :math:`name_{mult}`
is an optional multiplier on the left hand side. If use_mult is True then
the default value of the multiplier is 1. The optional normalization function
:math:`f_{norm}` is computed as:
.. math::
f_{norm}(name_{rhs}) =
\begin{cases}
\left| name_{rhs} \right|, & \text{if normalize and } \left| name_{rhs} \right| \geq 2 \\
0.25 name_{rhs}^2 + 1, & \text{if normalize and } \left| name_{rhs} \right| < 2 \\
1, & \text{if not normalize}
\end{cases}
New output variables are created by calling `add_eq_output`.
Parameters
----------
name : str
The name of the output variable to be created.
eq_units : str or None
Units for the left-hand-side and right-hand-side of the difference equation.
lhs_name : str or None
Optional name for the LHS variable associated with the difference equation.
If None, the default will be used: 'lhs:{name}'.
rhs_name : str or None
Optional name for the RHS variable associated with the difference equation.
If None, the default will be used: 'rhs:{name}'.
rhs_val : int, float, or np.array
Default value for the RHS of the given output. Must be compatible
with the shape (optionally) given by the val or shape option in kwargs.
use_mult : bool
Specifies whether the LHS multiplier is to be used. If True, then an additional
input `mult_name` is created, with the default value given by `mult_val`, that
multiplies lhs. Default is False.
mult_name : str or None
Optional name for the LHS multiplier variable associated with the output
variable. If None, the default will be used: 'mult:{name}'.
mult_val : int, float, or np.array
Default value for the LHS multiplier of the given output. Must be compatible
with the shape (optionally) given by the val or shape option in kwargs.
normalize : bool
Specifies whether or not the resulting output should be normalized by the RHS. When
the RHS value is between [-2, 2], the normalization value is a quadratic function that
is close to one but still provides a C1 continuous function. When this option is True,
the user-provided ref/ref0 scaler/adder options below are typically unnecessary.
add_constraint : bool
Specifies whether to add an equality constraint.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver. This option is only
meaningful when add_constraint=True.
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver. This option is only
meaningful when add_constraint=True.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value for the driver. adder
is first in precedence. This option is only meaningful when add_constraint=True.
scaler : float or ndarray, optional
value to multiply the model value to get the scaled value for the driver. scaler
is second in precedence. This option is only meaningful when add_constraint=True.
**kwargs : dict
Additional arguments to be passed for the creation of the output variable.
(see `add_output` method).
"""
super().__init__()
self._output_vars = {}
if name is not None:
self.add_eq_output(name, eq_units, lhs_name, rhs_name, rhs_val,
use_mult, mult_name, mult_val, normalize, add_constraint, ref, ref0,
adder, scaler, **kwargs)
self._no_check_partials = True
def compute(self, inputs, outputs):
"""
Calculate the output for each equality constraint.
Parameters
----------
inputs : Vector
unscaled, dimensional input variables read via inputs[key]
outputs : Vector
unscaled, dimensional output variables read via outputs[key]
"""
if inputs._under_complex_step:
self._scale_factor = self._scale_factor.astype(np.complex)
else:
self._scale_factor = self._scale_factor.real
for name, options in self._output_vars.items():
lhs = inputs[options['lhs_name']]
rhs = inputs[options['rhs_name']]
# Compute scaling factors
# scale factor that normalizes by the rhs, except near 0
if options['normalize']:
# Indices where the rhs is near zero or not near zero
idxs_nz = np.where(cs_safe.abs(rhs) < 2)[0]
idxs_nnz = np.where(cs_safe.abs(rhs) >= 2)[0]
self._scale_factor[idxs_nnz] = 1.0 / cs_safe.abs(rhs[idxs_nnz])
self._scale_factor[idxs_nz] = 1.0 / (.25 * rhs[idxs_nz] ** 2 + 1)
else:
self._scale_factor[:] = 1.0
if options['use_mult']:
outputs[name] = (inputs[options['mult_name']] * lhs - rhs) * self._scale_factor
else:
outputs[name] = (lhs - rhs) * self._scale_factor
def compute_partials(self, inputs, partials):
"""
Compute sub-jacobian parts. The model is assumed to be in an unscaled state.
Parameters
----------
inputs : Vector
unscaled, dimensional input variables read via inputs[key]
partials : Jacobian
sub-jac components written to partials[output_name, input_name]
"""
if inputs._under_complex_step:
self._dscale_drhs = self._dscale_drhs.astype(np.complex)
else:
self._dscale_drhs = self._dscale_drhs.real
for name, options in self._output_vars.items():
lhs_name = options['lhs_name']
rhs_name = options['rhs_name']
lhs = inputs[lhs_name]
rhs = inputs[rhs_name]
if options['normalize']:
# Indices where the rhs is near zero or not near zero
idxs_nz = np.where(cs_safe.abs(rhs) < 2)[0]
idxs_nnz = np.where(cs_safe.abs(rhs) >= 2)[0]
# scale factor that normalizes by the rhs, except near 0
self._scale_factor[idxs_nnz] = 1.0 / cs_safe.abs(rhs[idxs_nnz])
self._scale_factor[idxs_nz] = 1.0 / (.25 * rhs[idxs_nz] ** 2 + 1)
self._dscale_drhs[idxs_nnz] = -np.sign(rhs[idxs_nnz]) / rhs[idxs_nnz]**2
self._dscale_drhs[idxs_nz] = -.5 * rhs[idxs_nz] / (.25 * rhs[idxs_nz] ** 2 + 1) ** 2
else:
self._scale_factor[:] = 1.0
self._dscale_drhs[:] = 0.0
if options['use_mult']:
mult_name = options['mult_name']
mult = inputs[mult_name]
# Partials of output wrt mult
deriv = lhs * self._scale_factor
partials[name, mult_name] = deriv.flatten()
else:
mult = 1.0
# Partials of output wrt rhs
deriv = (mult * lhs - rhs) * self._dscale_drhs - self._scale_factor
partials[name, rhs_name] = deriv.flatten()
# Partials of output wrt lhs
deriv = mult * self._scale_factor
partials[name, lhs_name] = deriv.flatten()
def add_eq_output(self, name, eq_units=None, lhs_name=None, rhs_name=None, rhs_val=0.0,
use_mult=False, mult_name=None, mult_val=1.0, normalize=True,
add_constraint=False, ref=None, ref0=None, adder=None, scaler=None, **kwargs):
"""
Add a new output variable computed via the difference equation.
This will create new inputs `lhs:name`, `rhs:name`, and `mult:name` that will
define the left and right sides of the difference equation, and a
multiplier for the left-hand-side.
Parameters
----------
name : str
The name of the output variable to be created.
eq_units : str or None
Units for the left-hand-side and right-hand-side of the difference equation.
lhs_name : str or None
Optional name for the LHS variable associated with the difference equation. If
None, the default will be used: 'lhs:{name}'.
rhs_name : str or None
Optional name for the RHS variable associated with the difference equation. If
None, the default will be used: 'rhs:{name}'.
rhs_val : int, float, or np.array
Default value for the RHS. Must be compatible with the shape (optionally)
given by the val or shape option in kwargs.
use_mult : bool
Specifies whether the LHS multiplier is to be used. If True, then an additional
input `mult_name` is created, with the default value given by `mult_val`, that
multiplies lhs. Default is False.
mult_name : str or None
Optional name for the LHS multiplier variable associated with the output
variable. If None, the default will be used: 'mult:{name}'.
mult_val : int, float, or np.array
Default value for the LHS multiplier. Must be compatible with the shape (optionally)
given by the val or shape option in kwargs.
normalize : bool
Specifies whether or not the resulting output should be normalized by a quadratic
function of the RHS. When this option is True, the user-provided ref/ref0 scaler/adder
options below are typically unnecessary.
add_constraint : bool
Specifies whether to add an equality constraint.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver. This option is only
meaningful when add_constraint=True.
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver. This option is only
meaningful when add_constraint=True.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value for the driver. adder
is first in precedence. This option is only meaningful when add_constraint=True.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value for the driver. scaler
is second in precedence. This option is only meaningful when add_constraint=True.
**kwargs : dict
Additional arguments to be passed for the creation of the output variable.
(see `add_output` method).
"""
self._output_vars[name] = options = {'kwargs': kwargs,
'eq_units': eq_units,
'lhs_name': lhs_name,
'rhs_name': rhs_name,
'rhs_val': rhs_val,
'use_mult': use_mult,
'mult_name': mult_name,
'mult_val': mult_val,
'normalize': normalize,
'add_constraint': add_constraint,
'ref': ref,
'ref0': ref0,
'adder': adder,
'scaler': scaler}
meta = self.add_output(name, **options['kwargs'])
shape = meta['shape']
for s in ('lhs', 'rhs', 'mult'):
if options['{0}_name'.format(s)] is None:
options['{0}_name'.format(s)] = '{0}:{1}'.format(s, name)
self.add_input(options['lhs_name'],
val=np.ones(shape),
units=options['eq_units'])
self.add_input(options['rhs_name'],
val=options['rhs_val'] * np.ones(shape),
units=options['eq_units'])
if options['use_mult']:
self.add_input(options['mult_name'],
val=options['mult_val'] * np.ones(shape),
units=None)
self._scale_factor = np.ones(shape)
self._dscale_drhs = np.ones(shape)
ar = np.arange(np.prod(shape))
self.declare_partials(of=name, wrt=options['lhs_name'], rows=ar, cols=ar, val=1.0)
self.declare_partials(of=name, wrt=options['rhs_name'], rows=ar, cols=ar, val=1.0)
if options['use_mult']:
self.declare_partials(of=name, wrt=options['mult_name'], rows=ar, cols=ar, val=1.0)
if options['add_constraint']:
self.add_constraint(name, equals=0., ref0=options['ref0'], ref=options['ref'],
adder=options['adder'], scaler=options['scaler'])
|
"""Define the EQConstraintComp class."""
from numbers import Number
import numpy as np
from openmdao.core.explicitcomponent import ExplicitComponent
from openmdao.utils import cs_safe
class EQConstraintComp(ExplicitComponent):
"""
A component that computes the difference between two inputs to test for equality.
Attributes
----------
_output_vars : dict
Cache the data provided during `add_eq_output`
so everything can be saved until setup is called.
"""
def __init__(self, name=None, eq_units=None, lhs_name=None, rhs_name=None, rhs_val=0.0,
use_mult=False, mult_name=None, mult_val=1.0, normalize=True, add_constraint=False,
ref=None, ref0=None, adder=None, scaler=None, **kwargs):
r"""
Initialize an EQConstraintComp, optionally add an output constraint to the model.
The EQConstraintComp allows for the creation of one or more output variables and
computes the values for those variables based on the following equation:
.. math::
name_{output} = \frac{name_{mult} \times name_{lhs} - name_{rhs} }{f_{norm}(name_{rhs})}
Where :math:`name_{lhs}` represents the left-hand-side of the equality,
:math:`name_{rhs}` represents the right-hand-side, and :math:`name_{mult}`
is an optional multiplier on the left hand side. If use_mult is True then
the default value of the multiplier is 1. The optional normalization function
:math:`f_{norm}` is computed as:
.. math::
f_{norm}(name_{rhs}) =
\begin{cases}
\left| name_{rhs} \right|, & \text{if normalize and } \left| name_{rhs} \right| \geq 2 \\
0.25 name_{rhs}^2 + 1, & \text{if normalize and } \left| name_{rhs} \right| < 2 \\
1, & \text{if not normalize}
\end{cases}
New output variables are created by calling `add_eq_output`.
Parameters
----------
name : str
The name of the output variable to be created.
eq_units : str or None
Units for the left-hand-side and right-hand-side of the difference equation.
lhs_name : str or None
Optional name for the LHS variable associated with the difference equation.
If None, the default will be used: 'lhs:{name}'.
rhs_name : str or None
Optional name for the RHS variable associated with the difference equation.
If None, the default will be used: 'rhs:{name}'.
rhs_val : int, float, or np.array
Default value for the RHS of the given output. Must be compatible
with the shape (optionally) given by the val or shape option in kwargs.
use_mult : bool
Specifies whether the LHS multiplier is to be used. If True, then an additional
input `mult_name` is created, with the default value given by `mult_val`, that
multiplies lhs. Default is False.
mult_name : str or None
Optional name for the LHS multiplier variable associated with the output
variable. If None, the default will be used: 'mult:{name}'.
mult_val : int, float, or np.array
Default value for the LHS multiplier of the given output. Must be compatible
with the shape (optionally) given by the val or shape option in kwargs.
normalize : bool
Specifies whether or not the resulting output should be normalized by the RHS. When
the RHS value is between [-2, 2], the normalization value is a quadratic function that
is close to one but still provides a C1 continuous function. When this option is True,
the user-provided ref/ref0 scaler/adder options below are typically unnecessary.
add_constraint : bool
Specifies whether to add an equality constraint.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver. This option is only
meaningful when add_constraint=True.
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver. This option is only
meaningful when add_constraint=True.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value for the driver. adder
is first in precedence. This option is only meaningful when add_constraint=True.
scaler : float or ndarray, optional
value to multiply the model value to get the scaled value for the driver. scaler
is second in precedence. This option is only meaningful when add_constraint=True.
**kwargs : dict
Additional arguments to be passed for the creation of the output variable.
(see `add_output` method).
"""
super().__init__()
self._output_vars = {}
if name is not None:
self.add_eq_output(name, eq_units, lhs_name, rhs_name, rhs_val,
use_mult, mult_name, mult_val, normalize, add_constraint, ref, ref0,
adder, scaler, **kwargs)
self._no_check_partials = True
def compute(self, inputs, outputs):
"""
Calculate the output for each equality constraint.
Parameters
----------
inputs : Vector
unscaled, dimensional input variables read via inputs[key]
outputs : Vector
unscaled, dimensional output variables read via outputs[key]
"""
if inputs._under_complex_step:
self._scale_factor = self._scale_factor.astype(np.complex)
else:
self._scale_factor = self._scale_factor.real
for name, options in self._output_vars.items():
lhs = inputs[options['lhs_name']]
rhs = inputs[options['rhs_name']]
# Compute scaling factors
# scale factor that normalizes by the rhs, except near 0
if options['normalize']:
# Indices where the rhs is near zero or not near zero
idxs_nz = np.where(cs_safe.abs(rhs) < 2)[0]
idxs_nnz = np.where(cs_safe.abs(rhs) >= 2)[0]
self._scale_factor[idxs_nnz] = 1.0 / cs_safe.abs(rhs[idxs_nnz])
self._scale_factor[idxs_nz] = 1.0 / (.25 * rhs[idxs_nz] ** 2 + 1)
else:
self._scale_factor[:] = 1.0
if options['use_mult']:
outputs[name] = (inputs[options['mult_name']] * lhs - rhs) * self._scale_factor
else:
outputs[name] = (lhs - rhs) * self._scale_factor
def compute_partials(self, inputs, partials):
"""
Compute sub-jacobian parts. The model is assumed to be in an unscaled state.
Parameters
----------
inputs : Vector
unscaled, dimensional input variables read via inputs[key]
partials : Jacobian
sub-jac components written to partials[output_name, input_name]
"""
if inputs._under_complex_step:
self._dscale_drhs = self._dscale_drhs.astype(np.complex)
else:
self._dscale_drhs = self._dscale_drhs.real
for name, options in self._output_vars.items():
lhs_name = options['lhs_name']
rhs_name = options['rhs_name']
lhs = inputs[lhs_name]
rhs = inputs[rhs_name]
if options['normalize']:
# Indices where the rhs is near zero or not near zero
idxs_nz = np.where(cs_safe.abs(rhs) < 2)[0]
idxs_nnz = np.where(cs_safe.abs(rhs) >= 2)[0]
# scale factor that normalizes by the rhs, except near 0
self._scale_factor[idxs_nnz] = 1.0 / cs_safe.abs(rhs[idxs_nnz])
self._scale_factor[idxs_nz] = 1.0 / (.25 * rhs[idxs_nz] ** 2 + 1)
self._dscale_drhs[idxs_nnz] = -np.sign(rhs[idxs_nnz]) / rhs[idxs_nnz]**2
self._dscale_drhs[idxs_nz] = -.5 * rhs[idxs_nz] / (.25 * rhs[idxs_nz] ** 2 + 1) ** 2
else:
self._scale_factor[:] = 1.0
self._dscale_drhs[:] = 0.0
if options['use_mult']:
mult_name = options['mult_name']
mult = inputs[mult_name]
# Partials of output wrt mult
deriv = lhs * self._scale_factor
partials[name, mult_name] = deriv.flatten()
else:
mult = 1.0
# Partials of output wrt rhs
deriv = (mult * lhs - rhs) * self._dscale_drhs - self._scale_factor
partials[name, rhs_name] = deriv.flatten()
# Partials of output wrt lhs
deriv = mult * self._scale_factor
partials[name, lhs_name] = deriv.flatten()
def add_eq_output(self, name, eq_units=None, lhs_name=None, rhs_name=None, rhs_val=0.0,
use_mult=False, mult_name=None, mult_val=1.0, normalize=True,
add_constraint=False, ref=None, ref0=None, adder=None, scaler=None, **kwargs):
"""
Add a new output variable computed via the difference equation.
This will create new inputs `lhs:name`, `rhs:name`, and `mult:name` that will
define the left and right sides of the difference equation, and a
multiplier for the left-hand-side.
Parameters
----------
name : str
The name of the output variable to be created.
eq_units : str or None
Units for the left-hand-side and right-hand-side of the difference equation.
lhs_name : str or None
Optional name for the LHS variable associated with the difference equation. If
None, the default will be used: 'lhs:{name}'.
rhs_name : str or None
Optional name for the RHS variable associated with the difference equation. If
None, the default will be used: 'rhs:{name}'.
rhs_val : int, float, or np.array
Default value for the RHS. Must be compatible with the shape (optionally)
given by the val or shape option in kwargs.
use_mult : bool
Specifies whether the LHS multiplier is to be used. If True, then an additional
input `mult_name` is created, with the default value given by `mult_val`, that
multiplies lhs. Default is False.
mult_name : str or None
Optional name for the LHS multiplier variable associated with the output
variable. If None, the default will be used: 'mult:{name}'.
mult_val : int, float, or np.array
Default value for the LHS multiplier. Must be compatible with the shape (optionally)
given by the val or shape option in kwargs.
normalize : bool
Specifies whether or not the resulting output should be normalized by a quadratic
function of the RHS. When this option is True, the user-provided ref/ref0 scaler/adder
options below are typically unnecessary.
add_constraint : bool
Specifies whether to add an equality constraint.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver. This option is only
meaningful when add_constraint=True.
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver. This option is only
meaningful when add_constraint=True.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value for the driver. adder
is first in precedence. This option is only meaningful when add_constraint=True.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value for the driver. scaler
is second in precedence. This option is only meaningful when add_constraint=True.
**kwargs : dict
Additional arguments to be passed for the creation of the output variable.
(see `add_output` method).
"""
self._output_vars[name] = options = {'kwargs': kwargs,
'eq_units': eq_units,
'lhs_name': lhs_name,
'rhs_name': rhs_name,
'rhs_val': rhs_val,
'use_mult': use_mult,
'mult_name': mult_name,
'mult_val': mult_val,
'normalize': normalize,
'add_constraint': add_constraint,
'ref': ref,
'ref0': ref0,
'adder': adder,
'scaler': scaler}
meta = self.add_output(name, **options['kwargs'])
shape = meta['shape']
for s in ('lhs', 'rhs', 'mult'):
if options['{0}_name'.format(s)] is None:
options['{0}_name'.format(s)] = '{0}:{1}'.format(s, name)
self.add_input(options['lhs_name'],
val=np.ones(shape),
units=options['eq_units'])
self.add_input(options['rhs_name'],
val=options['rhs_val'] * np.ones(shape),
units=options['eq_units'])
if options['use_mult']:
self.add_input(options['mult_name'],
val=options['mult_val'] * np.ones(shape),
units=None)
self._scale_factor = np.ones(shape)
self._dscale_drhs = np.ones(shape)
ar = np.arange(np.prod(shape))
self.declare_partials(of=name, wrt=options['lhs_name'], rows=ar, cols=ar, val=1.0)
self.declare_partials(of=name, wrt=options['rhs_name'], rows=ar, cols=ar, val=1.0)
if options['use_mult']:
self.declare_partials(of=name, wrt=options['mult_name'], rows=ar, cols=ar, val=1.0)
if options['add_constraint']:
self.add_constraint(name, equals=0., ref0=options['ref0'], ref=options['ref'],
adder=options['adder'], scaler=options['scaler'])
|
en
| 0.67768
|
Define the EQConstraintComp class. A component that computes the difference between two inputs to test for equality. Attributes ---------- _output_vars : dict Cache the data provided during `add_eq_output` so everything can be saved until setup is called. Initialize an EQConstraintComp, optionally add an output constraint to the model. The EQConstraintComp allows for the creation of one or more output variables and computes the values for those variables based on the following equation: .. math:: name_{output} = \frac{name_{mult} \times name_{lhs} - name_{rhs} }{f_{norm}(name_{rhs})} Where :math:`name_{lhs}` represents the left-hand-side of the equality, :math:`name_{rhs}` represents the right-hand-side, and :math:`name_{mult}` is an optional multiplier on the left hand side. If use_mult is True then the default value of the multiplier is 1. The optional normalization function :math:`f_{norm}` is computed as: .. math:: f_{norm}(name_{rhs}) = \begin{cases} \left| name_{rhs} \right|, & \text{if normalize and } \left| name_{rhs} \right| \geq 2 \\ 0.25 name_{rhs}^2 + 1, & \text{if normalize and } \left| name_{rhs} \right| < 2 \\ 1, & \text{if not normalize} \end{cases} New output variables are created by calling `add_eq_output`. Parameters ---------- name : str The name of the output variable to be created. eq_units : str or None Units for the left-hand-side and right-hand-side of the difference equation. lhs_name : str or None Optional name for the LHS variable associated with the difference equation. If None, the default will be used: 'lhs:{name}'. rhs_name : str or None Optional name for the RHS variable associated with the difference equation. If None, the default will be used: 'rhs:{name}'. rhs_val : int, float, or np.array Default value for the RHS of the given output. Must be compatible with the shape (optionally) given by the val or shape option in kwargs. use_mult : bool Specifies whether the LHS multiplier is to be used. If True, then an additional input `mult_name` is created, with the default value given by `mult_val`, that multiplies lhs. Default is False. mult_name : str or None Optional name for the LHS multiplier variable associated with the output variable. If None, the default will be used: 'mult:{name}'. mult_val : int, float, or np.array Default value for the LHS multiplier of the given output. Must be compatible with the shape (optionally) given by the val or shape option in kwargs. normalize : bool Specifies whether or not the resulting output should be normalized by the RHS. When the RHS value is between [-2, 2], the normalization value is a quadratic function that is close to one but still provides a C1 continuous function. When this option is True, the user-provided ref/ref0 scaler/adder options below are typically unnecessary. add_constraint : bool Specifies whether to add an equality constraint. ref : float or ndarray, optional Value of response variable that scales to 1.0 in the driver. This option is only meaningful when add_constraint=True. ref0 : float or ndarray, optional Value of response variable that scales to 0.0 in the driver. This option is only meaningful when add_constraint=True. adder : float or ndarray, optional Value to add to the model value to get the scaled value for the driver. adder is first in precedence. This option is only meaningful when add_constraint=True. scaler : float or ndarray, optional value to multiply the model value to get the scaled value for the driver. scaler is second in precedence. This option is only meaningful when add_constraint=True. **kwargs : dict Additional arguments to be passed for the creation of the output variable. (see `add_output` method). Calculate the output for each equality constraint. Parameters ---------- inputs : Vector unscaled, dimensional input variables read via inputs[key] outputs : Vector unscaled, dimensional output variables read via outputs[key] # Compute scaling factors # scale factor that normalizes by the rhs, except near 0 # Indices where the rhs is near zero or not near zero Compute sub-jacobian parts. The model is assumed to be in an unscaled state. Parameters ---------- inputs : Vector unscaled, dimensional input variables read via inputs[key] partials : Jacobian sub-jac components written to partials[output_name, input_name] # Indices where the rhs is near zero or not near zero # scale factor that normalizes by the rhs, except near 0 # Partials of output wrt mult # Partials of output wrt rhs # Partials of output wrt lhs Add a new output variable computed via the difference equation. This will create new inputs `lhs:name`, `rhs:name`, and `mult:name` that will define the left and right sides of the difference equation, and a multiplier for the left-hand-side. Parameters ---------- name : str The name of the output variable to be created. eq_units : str or None Units for the left-hand-side and right-hand-side of the difference equation. lhs_name : str or None Optional name for the LHS variable associated with the difference equation. If None, the default will be used: 'lhs:{name}'. rhs_name : str or None Optional name for the RHS variable associated with the difference equation. If None, the default will be used: 'rhs:{name}'. rhs_val : int, float, or np.array Default value for the RHS. Must be compatible with the shape (optionally) given by the val or shape option in kwargs. use_mult : bool Specifies whether the LHS multiplier is to be used. If True, then an additional input `mult_name` is created, with the default value given by `mult_val`, that multiplies lhs. Default is False. mult_name : str or None Optional name for the LHS multiplier variable associated with the output variable. If None, the default will be used: 'mult:{name}'. mult_val : int, float, or np.array Default value for the LHS multiplier. Must be compatible with the shape (optionally) given by the val or shape option in kwargs. normalize : bool Specifies whether or not the resulting output should be normalized by a quadratic function of the RHS. When this option is True, the user-provided ref/ref0 scaler/adder options below are typically unnecessary. add_constraint : bool Specifies whether to add an equality constraint. ref : float or ndarray, optional Value of response variable that scales to 1.0 in the driver. This option is only meaningful when add_constraint=True. ref0 : float or ndarray, optional Value of response variable that scales to 0.0 in the driver. This option is only meaningful when add_constraint=True. adder : float or ndarray, optional Value to add to the model value to get the scaled value for the driver. adder is first in precedence. This option is only meaningful when add_constraint=True. scaler : float or ndarray, optional Value to multiply the model value to get the scaled value for the driver. scaler is second in precedence. This option is only meaningful when add_constraint=True. **kwargs : dict Additional arguments to be passed for the creation of the output variable. (see `add_output` method).
| 2.970779
| 3
|
exercicios/Lista5/Q7.py
|
AlexandrePeBrito/CursoUdemyPython
| 0
|
6626030
|
<gh_stars>0
#7. Faça uma função que receba uma temperatura em graus Celsius e retorne-a convertida
#em graus Fahrenheit. A fórmula de conversão é: F = C + (9.0/5.0) + 32.0, sendo F a
#temperatura em Fahrenheit e C' a temperatura em Celsius.
def convertCF(temp):
return temp+(9/5)+32
valor=float(input("Informe a temperatura em Celsius: "))
f=convertCF(valor)
print(f"A temperatura em Fahrenheit eh {f}")
|
#7. Faça uma função que receba uma temperatura em graus Celsius e retorne-a convertida
#em graus Fahrenheit. A fórmula de conversão é: F = C + (9.0/5.0) + 32.0, sendo F a
#temperatura em Fahrenheit e C' a temperatura em Celsius.
def convertCF(temp):
return temp+(9/5)+32
valor=float(input("Informe a temperatura em Celsius: "))
f=convertCF(valor)
print(f"A temperatura em Fahrenheit eh {f}")
|
pt
| 0.974808
|
#7. Faça uma função que receba uma temperatura em graus Celsius e retorne-a convertida #em graus Fahrenheit. A fórmula de conversão é: F = C + (9.0/5.0) + 32.0, sendo F a #temperatura em Fahrenheit e C' a temperatura em Celsius.
| 4.10781
| 4
|
cssTkinter/html_processor.py
|
rug-gui/cssTk
| 4
|
6626031
|
def fail():
raise RuntimeError()
def parse_html(html):
from bs4 import BeautifulSoup
b=BeautifulSoup(html, "html.parser")
if not b:
fail()
if not len(b.select("html"))==1:
fail()
return b
|
def fail():
raise RuntimeError()
def parse_html(html):
from bs4 import BeautifulSoup
b=BeautifulSoup(html, "html.parser")
if not b:
fail()
if not len(b.select("html"))==1:
fail()
return b
|
none
| 1
| 3.120721
| 3
|
|
tools/get_pr_ut.py
|
a6802739/Paddle
| 2
|
6626032
|
<filename>tools/get_pr_ut.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" For the PR that only modified the unit test, get cases in pull request. """
import os
import json
import re
import sys
import time
import subprocess
import requests
from github import Github
PADDLE_ROOT = os.getenv('PADDLE_ROOT', '/paddle/')
PADDLE_ROOT += '/'
PADDLE_ROOT = PADDLE_ROOT.replace('//', '/')
class PRChecker(object):
""" PR Checker. """
def __init__(self):
self.github = Github(os.getenv('GITHUB_API_TOKEN'), timeout=60)
self.repo = self.github.get_repo('PaddlePaddle/Paddle')
self.py_prog_oneline = re.compile('\d+\|\s*#.*')
self.py_prog_multiline_a = re.compile('\d+\|\s*r?""".*?"""', re.DOTALL)
self.py_prog_multiline_b = re.compile("\d+\|\s*r?'''.*?'''", re.DOTALL)
self.cc_prog_online = re.compile('\d+\|\s*//.*')
self.cc_prog_multiline = re.compile('\d+\|\s*/\*.*?\*/', re.DOTALL)
self.lineno_prog = re.compile('@@ \-\d+,\d+ \+(\d+),(\d+) @@')
self.pr = None
self.suffix = ''
self.full_case = False
def init(self):
""" Get pull request. """
pr_id = os.getenv('GIT_PR_ID')
if not pr_id:
print('PREC No PR ID')
exit(0)
suffix = os.getenv('PREC_SUFFIX')
if suffix:
self.suffix = suffix
self.pr = self.repo.get_pull(int(pr_id))
last_commit = None
ix = 0
while True:
commits = self.pr.get_commits().get_page(ix)
for c in commits:
last_commit = c.commit
else:
break
ix = ix + 1
if last_commit.message.find('test=allcase') != -1:
print('PREC test=allcase is set')
self.full_case = True
#todo: exception
def __wget_with_retry(self, url):
ix = 1
proxy = '--no-proxy'
while ix < 6:
if ix // 2 == 0:
proxy = ''
else:
proxy = '--no-proxy'
code = subprocess.call(
'wget -q {} --no-check-certificate {}'.format(proxy, url),
shell=True)
if code == 0:
return True
print(
'PREC download {} error, retry {} time(s) after {} secs.[proxy_option={}]'.
format(url, ix, ix * 10, proxy))
time.sleep(ix * 10)
ix += 1
return False
def get_pr_files(self):
""" Get files in pull request. """
page = 0
file_list = []
while True:
files = self.pr.get_files().get_page(page)
if not files:
break
for f in files:
file_list.append(PADDLE_ROOT + f.filename)
page += 1
return file_list
def __get_comment_by_filetype(self, content, filetype):
result = []
if filetype == 'py':
result = self.__get_comment_by_prog(content, self.py_prog_oneline)
result.extend(
self.__get_comment_by_prog(content, self.py_prog_multiline_a))
result.extend(
self.__get_comment_by_prog(content, self.py_prog_multiline_b))
if filetype == 'cc':
result = self.__get_comment_by_prog(content, self.cc_prog_oneline)
result.extend(
self.__get_comment_by_prog(content, self.cc_prog_multiline))
return result
def __get_comment_by_prog(self, content, prog):
result_list = prog.findall(content)
if not result_list:
return []
result = []
for u in result_list:
result.extend(u.split('\n'))
return result
def get_comment_of_file(self, f):
#content = self.repo.get_contents(f.replace(PADDLE_ROOT, ''), 'pull/').decoded_content
#todo: get file from github
with open(f) as fd:
lines = fd.readlines()
lineno = 1
inputs = ''
for line in lines:
#for line in content.split('\n'):
#input += str(lineno) + '|' + line + '\n'
inputs += str(lineno) + '|' + line
lineno += 1
fietype = ''
if f.endswith('.h') or f.endswith('.cc') or f.endswith('.cu'):
filetype = 'cc'
if f.endswith('.py'):
filetype = 'py'
else:
return []
return self.__get_comment_by_filetype(inputs, filetype)
def get_pr_diff_lines(self):
file_to_diff_lines = {}
r = requests.get(self.pr.diff_url)
data = r.text
data = data.split('\n')
ix = 0
while ix < len(data):
if data[ix].startswith('+++'):
if data[ix].rstrip('\r\n') == '+++ /dev/null':
ix += 1
continue
filename = data[ix][6:]
ix += 1
while ix < len(data):
result = self.lineno_prog.match(data[ix])
if not result:
break
lineno = int(result.group(1))
length = int(result.group(2))
ix += 1
end = ix + length
while ix < end:
if data[ix][0] == '-':
end += 1
if data[ix][0] == '+':
line_list = file_to_diff_lines.get(filename)
line = '{}{}'.format(lineno,
data[ix].replace('+', '|', 1))
if line_list:
line_list.append(line)
else:
file_to_diff_lines[filename] = [line, ]
if data[ix][0] != '-':
lineno += 1
ix += 1
ix += 1
return file_to_diff_lines
def is_only_comment(self, f):
file_to_diff_lines = self.get_pr_diff_lines()
comment_lines = self.get_comment_of_file(f)
diff_lines = file_to_diff_lines.get(f.replace(PADDLE_ROOT, '', 1))
if not diff_lines:
return False
for l in diff_lines:
if l not in comment_lines:
return False
print('PREC {} is only comment'.format(f))
return True
def get_pr_ut(self):
""" Get unit tests in pull request. """
if self.full_case:
return ''
check_added_ut = False
ut_list = []
file_ut_map = None
ret = self.__wget_with_retry(
'https://sys-p0.bj.bcebos.com/prec/file_ut.json{}'.format(
self.suffix))
if not ret:
print('PREC download file_ut.json failed')
exit(1)
with open('file_ut.json' + self.suffix) as jsonfile:
file_ut_map = json.load(jsonfile)
for f in self.get_pr_files():
if f not in file_ut_map:
if f.endswith('.md'):
ut_list.append('md_placeholder')
elif f.endswith('.h') or f.endswith('.cu'):
if self.is_only_comment(f):
ut_list.append('h_cu_comment_placeholder')
else:
print(
'PREC dismatch: {} not in file ut map and not md or comment'.
format(f))
return ''
elif f.endswith('.cc') or f.endswith('.py') or f.endswith(
'.cu'):
if f.find('test_') != -1 or f.find('_test') != -1:
print('PREC {} need check new ut'.format(f))
check_added_ut = True
elif self.is_only_comment(f):
ut_list.append('nomap_comment_placeholder')
else:
print(
'PREC dismatch: {} not in file ut map and not new ut or comment'.
format(f))
return ''
else:
print('PREC dismatch: {} not in file ut map'.format(f))
return ''
else:
if self.is_only_comment(f):
ut_list.append('map_comment_placeholder')
else:
ut_list.extend(file_ut_map.get(f))
ut_list = list(set(ut_list))
if check_added_ut:
with open('{}/added_ut'.format(PADDLE_ROOT)) as utfile:
for ut in utfile:
print('PREC NEW UT: {}'.format(ut.rstrip('\r\n')))
ut_list.append(ut.rstrip('\r\n'))
if ut_list:
ret = self.__wget_with_retry(
'https://sys-p0.bj.bcebos.com/prec/prec_delta{}'.format(
self.suffix))
if ret:
with open('prec_delta' + self.suffix) as delta:
for ut in delta:
ut_list.append(ut.rstrip('\r\n'))
else:
print('PREC download prec_delta failed')
exit(1)
return '\n'.join(ut_list)
if __name__ == '__main__':
pr_checker = PRChecker()
pr_checker.init()
#print(pr_checker.get_pr_ut())
with open('ut_list', 'w') as f:
f.write(pr_checker.get_pr_ut())
|
<filename>tools/get_pr_ut.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" For the PR that only modified the unit test, get cases in pull request. """
import os
import json
import re
import sys
import time
import subprocess
import requests
from github import Github
PADDLE_ROOT = os.getenv('PADDLE_ROOT', '/paddle/')
PADDLE_ROOT += '/'
PADDLE_ROOT = PADDLE_ROOT.replace('//', '/')
class PRChecker(object):
""" PR Checker. """
def __init__(self):
self.github = Github(os.getenv('GITHUB_API_TOKEN'), timeout=60)
self.repo = self.github.get_repo('PaddlePaddle/Paddle')
self.py_prog_oneline = re.compile('\d+\|\s*#.*')
self.py_prog_multiline_a = re.compile('\d+\|\s*r?""".*?"""', re.DOTALL)
self.py_prog_multiline_b = re.compile("\d+\|\s*r?'''.*?'''", re.DOTALL)
self.cc_prog_online = re.compile('\d+\|\s*//.*')
self.cc_prog_multiline = re.compile('\d+\|\s*/\*.*?\*/', re.DOTALL)
self.lineno_prog = re.compile('@@ \-\d+,\d+ \+(\d+),(\d+) @@')
self.pr = None
self.suffix = ''
self.full_case = False
def init(self):
""" Get pull request. """
pr_id = os.getenv('GIT_PR_ID')
if not pr_id:
print('PREC No PR ID')
exit(0)
suffix = os.getenv('PREC_SUFFIX')
if suffix:
self.suffix = suffix
self.pr = self.repo.get_pull(int(pr_id))
last_commit = None
ix = 0
while True:
commits = self.pr.get_commits().get_page(ix)
for c in commits:
last_commit = c.commit
else:
break
ix = ix + 1
if last_commit.message.find('test=allcase') != -1:
print('PREC test=allcase is set')
self.full_case = True
#todo: exception
def __wget_with_retry(self, url):
ix = 1
proxy = '--no-proxy'
while ix < 6:
if ix // 2 == 0:
proxy = ''
else:
proxy = '--no-proxy'
code = subprocess.call(
'wget -q {} --no-check-certificate {}'.format(proxy, url),
shell=True)
if code == 0:
return True
print(
'PREC download {} error, retry {} time(s) after {} secs.[proxy_option={}]'.
format(url, ix, ix * 10, proxy))
time.sleep(ix * 10)
ix += 1
return False
def get_pr_files(self):
""" Get files in pull request. """
page = 0
file_list = []
while True:
files = self.pr.get_files().get_page(page)
if not files:
break
for f in files:
file_list.append(PADDLE_ROOT + f.filename)
page += 1
return file_list
def __get_comment_by_filetype(self, content, filetype):
result = []
if filetype == 'py':
result = self.__get_comment_by_prog(content, self.py_prog_oneline)
result.extend(
self.__get_comment_by_prog(content, self.py_prog_multiline_a))
result.extend(
self.__get_comment_by_prog(content, self.py_prog_multiline_b))
if filetype == 'cc':
result = self.__get_comment_by_prog(content, self.cc_prog_oneline)
result.extend(
self.__get_comment_by_prog(content, self.cc_prog_multiline))
return result
def __get_comment_by_prog(self, content, prog):
result_list = prog.findall(content)
if not result_list:
return []
result = []
for u in result_list:
result.extend(u.split('\n'))
return result
def get_comment_of_file(self, f):
#content = self.repo.get_contents(f.replace(PADDLE_ROOT, ''), 'pull/').decoded_content
#todo: get file from github
with open(f) as fd:
lines = fd.readlines()
lineno = 1
inputs = ''
for line in lines:
#for line in content.split('\n'):
#input += str(lineno) + '|' + line + '\n'
inputs += str(lineno) + '|' + line
lineno += 1
fietype = ''
if f.endswith('.h') or f.endswith('.cc') or f.endswith('.cu'):
filetype = 'cc'
if f.endswith('.py'):
filetype = 'py'
else:
return []
return self.__get_comment_by_filetype(inputs, filetype)
def get_pr_diff_lines(self):
file_to_diff_lines = {}
r = requests.get(self.pr.diff_url)
data = r.text
data = data.split('\n')
ix = 0
while ix < len(data):
if data[ix].startswith('+++'):
if data[ix].rstrip('\r\n') == '+++ /dev/null':
ix += 1
continue
filename = data[ix][6:]
ix += 1
while ix < len(data):
result = self.lineno_prog.match(data[ix])
if not result:
break
lineno = int(result.group(1))
length = int(result.group(2))
ix += 1
end = ix + length
while ix < end:
if data[ix][0] == '-':
end += 1
if data[ix][0] == '+':
line_list = file_to_diff_lines.get(filename)
line = '{}{}'.format(lineno,
data[ix].replace('+', '|', 1))
if line_list:
line_list.append(line)
else:
file_to_diff_lines[filename] = [line, ]
if data[ix][0] != '-':
lineno += 1
ix += 1
ix += 1
return file_to_diff_lines
def is_only_comment(self, f):
file_to_diff_lines = self.get_pr_diff_lines()
comment_lines = self.get_comment_of_file(f)
diff_lines = file_to_diff_lines.get(f.replace(PADDLE_ROOT, '', 1))
if not diff_lines:
return False
for l in diff_lines:
if l not in comment_lines:
return False
print('PREC {} is only comment'.format(f))
return True
def get_pr_ut(self):
""" Get unit tests in pull request. """
if self.full_case:
return ''
check_added_ut = False
ut_list = []
file_ut_map = None
ret = self.__wget_with_retry(
'https://sys-p0.bj.bcebos.com/prec/file_ut.json{}'.format(
self.suffix))
if not ret:
print('PREC download file_ut.json failed')
exit(1)
with open('file_ut.json' + self.suffix) as jsonfile:
file_ut_map = json.load(jsonfile)
for f in self.get_pr_files():
if f not in file_ut_map:
if f.endswith('.md'):
ut_list.append('md_placeholder')
elif f.endswith('.h') or f.endswith('.cu'):
if self.is_only_comment(f):
ut_list.append('h_cu_comment_placeholder')
else:
print(
'PREC dismatch: {} not in file ut map and not md or comment'.
format(f))
return ''
elif f.endswith('.cc') or f.endswith('.py') or f.endswith(
'.cu'):
if f.find('test_') != -1 or f.find('_test') != -1:
print('PREC {} need check new ut'.format(f))
check_added_ut = True
elif self.is_only_comment(f):
ut_list.append('nomap_comment_placeholder')
else:
print(
'PREC dismatch: {} not in file ut map and not new ut or comment'.
format(f))
return ''
else:
print('PREC dismatch: {} not in file ut map'.format(f))
return ''
else:
if self.is_only_comment(f):
ut_list.append('map_comment_placeholder')
else:
ut_list.extend(file_ut_map.get(f))
ut_list = list(set(ut_list))
if check_added_ut:
with open('{}/added_ut'.format(PADDLE_ROOT)) as utfile:
for ut in utfile:
print('PREC NEW UT: {}'.format(ut.rstrip('\r\n')))
ut_list.append(ut.rstrip('\r\n'))
if ut_list:
ret = self.__wget_with_retry(
'https://sys-p0.bj.bcebos.com/prec/prec_delta{}'.format(
self.suffix))
if ret:
with open('prec_delta' + self.suffix) as delta:
for ut in delta:
ut_list.append(ut.rstrip('\r\n'))
else:
print('PREC download prec_delta failed')
exit(1)
return '\n'.join(ut_list)
if __name__ == '__main__':
pr_checker = PRChecker()
pr_checker.init()
#print(pr_checker.get_pr_ut())
with open('ut_list', 'w') as f:
f.write(pr_checker.get_pr_ut())
|
en
| 0.714033
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. For the PR that only modified the unit test, get cases in pull request. PR Checker. #.*') .*? .*? Get pull request. #todo: exception Get files in pull request. #content = self.repo.get_contents(f.replace(PADDLE_ROOT, ''), 'pull/').decoded_content #todo: get file from github #for line in content.split('\n'): #input += str(lineno) + '|' + line + '\n' Get unit tests in pull request. #print(pr_checker.get_pr_ut())
| 2.395192
| 2
|
CircuitPython_Made_Easy_On_CPX/cpx_play_file_buttons/code.py
|
albinger/Adafruit_Learning_System_Guides
| 0
|
6626033
|
# SPDX-FileCopyrightText: 2017 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
from adafruit_circuitplayground.express import cpx
while True:
if cpx.button_a:
cpx.play_file("Wild_Eep.wav")
if cpx.button_b:
cpx.play_file("Coin.wav")
|
# SPDX-FileCopyrightText: 2017 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
from adafruit_circuitplayground.express import cpx
while True:
if cpx.button_a:
cpx.play_file("Wild_Eep.wav")
if cpx.button_b:
cpx.play_file("Coin.wav")
|
en
| 0.293691
|
# SPDX-FileCopyrightText: 2017 <NAME> for Adafruit Industries # # SPDX-License-Identifier: MIT
| 2.225828
| 2
|
test.py
|
ACkuku/mx-rcnn
| 0
|
6626034
|
import argparse
import ast
import pprint
import mxnet as mx
from mxnet.module import Module
import numpy as np
from tqdm import tqdm
from symdata.bbox import im_detect
from symdata.loader import TestLoader
from symnet.logger import logger
from symnet.model import load_param, check_shape
def test_net(sym, imdb, args):
# print config
logger.info('called with args\n{}'.format(pprint.pformat(vars(args))))
# setup context
ctx = mx.gpu(args.gpu)
# load testing data
test_data = TestLoader(imdb.roidb, batch_size=1, short=args.img_short_side, max_size=args.img_long_side,
mean=args.img_pixel_means, std=args.img_pixel_stds)
# load params
arg_params, aux_params = load_param(args.params, ctx=ctx)
# produce shape max possible
data_names = ['data', 'im_info']
label_names = None
data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))]
label_shapes = None
# check shapes
check_shape(sym, data_shapes, arg_params, aux_params)
# create and bind module
mod = Module(sym, data_names, label_names, context=ctx)
mod.bind(data_shapes, label_shapes, for_training=False)
mod.init_params(arg_params=arg_params, aux_params=aux_params)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(imdb.num_images)]
for _ in range(imdb.num_classes)]
# start detection
with tqdm(total=imdb.num_images) as pbar:
for i, data_batch in enumerate(test_data):
# forward
im_info = data_batch.data[1][0]
mod.forward(data_batch)
rois, scores, bbox_deltas = mod.get_outputs()
rois = rois[:, 1:]
scores = scores[0]
bbox_deltas = bbox_deltas[0]
det = im_detect(rois, scores, bbox_deltas, im_info,
bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh,
conf_thresh=args.rcnn_conf_thresh, use_soft_nms=args.use_soft_nms,
soft_nms_thresh=args.soft_nms_thresh, max_per_image=args.max_per_image)
for j in range(1, imdb.num_classes):
indexes = np.where(det[:, 0] == j)[0]
all_boxes[j][i] = np.concatenate((det[:, -4:], det[:, [1]]), axis=-1)[indexes, :]
pbar.update(data_batch.data[0].shape[0])
# evaluate model
imdb.evaluate_detections(all_boxes)
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--network', type=str, default='vgg16', help='base network')
parser.add_argument('--params', type=str, default='', help='path to trained model')
parser.add_argument('--dataset', type=str, default='voc', help='training dataset')
parser.add_argument('--imageset', type=str, default='', help='imageset splits')
parser.add_argument('--gpu', type=int, default=0, help='gpu device eg. 0')
# faster rcnn params
parser.add_argument('--img-short-side', type=int, default=600)
parser.add_argument('--img-long-side', type=int, default=1000)
parser.add_argument('--img-pixel-means', type=str, default='(0.0, 0.0, 0.0)')
parser.add_argument('--img-pixel-stds', type=str, default='(1.0, 1.0, 1.0)')
parser.add_argument('--rpn-feat-stride', type=int, default=16)
parser.add_argument('--rpn-anchor-scales', type=str, default='(8, 16, 32)')
parser.add_argument('--rpn-anchor-ratios', type=str, default='(0.5, 1, 2)')
parser.add_argument('--rpn-pre-nms-topk', type=int, default=6000)
parser.add_argument('--rpn-post-nms-topk', type=int, default=300)
parser.add_argument('--rpn-nms-thresh', type=float, default=0.7)
parser.add_argument('--rpn-min-size', type=int, default=16)
parser.add_argument('--rcnn-num-classes', type=int, default=21)
parser.add_argument('--rcnn-feat-stride', type=int, default=16)
parser.add_argument('--rcnn-pooled-size', type=str, default='(14, 14)')
parser.add_argument('--rcnn-batch-size', type=int, default=1)
parser.add_argument('--rcnn-bbox-stds', type=str, default='(0.1, 0.1, 0.2, 0.2)')
parser.add_argument('--rcnn-nms-thresh', type=float, default=0.3)
parser.add_argument('--rcnn-conf-thresh', type=float, default=1e-3)
# Add soft nms by liusm 20180929
parser.add_argument('--use-soft-nms', type=bool, default=True)
parser.add_argument('--soft-nms-thresh', type=float, default=0.6)
parser.add_argument('--max-per-image', type=int, default=100)
# if use deformable conv add by liusm 20181009
parser.add_argument('--use-deformable-conv', action='store_true')
args = parser.parse_args()
args.img_pixel_means = ast.literal_eval(args.img_pixel_means)
args.img_pixel_stds = ast.literal_eval(args.img_pixel_stds)
args.rpn_anchor_scales = ast.literal_eval(args.rpn_anchor_scales)
args.rpn_anchor_ratios = ast.literal_eval(args.rpn_anchor_ratios)
args.rcnn_pooled_size = ast.literal_eval(args.rcnn_pooled_size)
args.rcnn_bbox_stds = ast.literal_eval(args.rcnn_bbox_stds)
return args
def get_voc(args):
from symimdb.pascal_voc import PascalVOC
if not args.imageset:
args.imageset = '2007_test'
args.rcnn_num_classes = len(PascalVOC.classes)
return PascalVOC(args.imageset, 'data', 'data/VOCdevkit')
def get_coco(args):
from symimdb.coco import coco
if not args.imageset:
args.imageset = 'val2017'
args.rcnn_num_classes = len(coco.classes)
return coco(args.imageset, 'data', 'data/coco')
def get_vgg16_test(args):
from symnet.symbol_vgg import get_vgg_test
if not args.params:
args.params = 'model/vgg16-0010.params'
args.img_pixel_means = (123.68, 116.779, 103.939)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.net_fixed_params = ['conv1', 'conv2']
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (7, 7)
return get_vgg_test(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size)
def get_resnet50_test(args):
from symnet.symbol_resnet import get_resnet_test
if not args.params:
args.params = 'model/resnet50-0010.params'
args.img_pixel_means = (0.0, 0.0, 0.0)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (14, 14)
return get_resnet_test(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size,
units=(3, 4, 6, 3), filter_list=(256, 512, 1024, 2048))
def get_resnet101_test(args):
from symnet.symbol_resnet import get_resnet_test
if not args.params:
args.params = 'model/resnet101-0010.params'
args.img_pixel_means = (0.0, 0.0, 0.0)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (14, 14)
return get_resnet_test(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size,
units=(3, 4, 23, 3), filter_list=(256, 512, 1024, 2048))
def get_dataset(dataset, args):
datasets = {
'voc': get_voc,
'coco': get_coco
}
if dataset not in datasets:
raise ValueError("dataset {} not supported".format(dataset))
return datasets[dataset](args)
def get_network(network, args):
networks = {
'vgg16': get_vgg16_test,
'resnet50': get_resnet50_test,
'resnet101': get_resnet101_test
}
if network not in networks:
raise ValueError("network {} not supported".format(network))
return networks[network](args)
def main():
args = parse_args()
imdb = get_dataset(args.dataset, args)
sym = get_network(args.network, args)
test_net(sym, imdb, args)
if __name__ == '__main__':
main()
|
import argparse
import ast
import pprint
import mxnet as mx
from mxnet.module import Module
import numpy as np
from tqdm import tqdm
from symdata.bbox import im_detect
from symdata.loader import TestLoader
from symnet.logger import logger
from symnet.model import load_param, check_shape
def test_net(sym, imdb, args):
# print config
logger.info('called with args\n{}'.format(pprint.pformat(vars(args))))
# setup context
ctx = mx.gpu(args.gpu)
# load testing data
test_data = TestLoader(imdb.roidb, batch_size=1, short=args.img_short_side, max_size=args.img_long_side,
mean=args.img_pixel_means, std=args.img_pixel_stds)
# load params
arg_params, aux_params = load_param(args.params, ctx=ctx)
# produce shape max possible
data_names = ['data', 'im_info']
label_names = None
data_shapes = [('data', (1, 3, args.img_long_side, args.img_long_side)), ('im_info', (1, 3))]
label_shapes = None
# check shapes
check_shape(sym, data_shapes, arg_params, aux_params)
# create and bind module
mod = Module(sym, data_names, label_names, context=ctx)
mod.bind(data_shapes, label_shapes, for_training=False)
mod.init_params(arg_params=arg_params, aux_params=aux_params)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(imdb.num_images)]
for _ in range(imdb.num_classes)]
# start detection
with tqdm(total=imdb.num_images) as pbar:
for i, data_batch in enumerate(test_data):
# forward
im_info = data_batch.data[1][0]
mod.forward(data_batch)
rois, scores, bbox_deltas = mod.get_outputs()
rois = rois[:, 1:]
scores = scores[0]
bbox_deltas = bbox_deltas[0]
det = im_detect(rois, scores, bbox_deltas, im_info,
bbox_stds=args.rcnn_bbox_stds, nms_thresh=args.rcnn_nms_thresh,
conf_thresh=args.rcnn_conf_thresh, use_soft_nms=args.use_soft_nms,
soft_nms_thresh=args.soft_nms_thresh, max_per_image=args.max_per_image)
for j in range(1, imdb.num_classes):
indexes = np.where(det[:, 0] == j)[0]
all_boxes[j][i] = np.concatenate((det[:, -4:], det[:, [1]]), axis=-1)[indexes, :]
pbar.update(data_batch.data[0].shape[0])
# evaluate model
imdb.evaluate_detections(all_boxes)
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--network', type=str, default='vgg16', help='base network')
parser.add_argument('--params', type=str, default='', help='path to trained model')
parser.add_argument('--dataset', type=str, default='voc', help='training dataset')
parser.add_argument('--imageset', type=str, default='', help='imageset splits')
parser.add_argument('--gpu', type=int, default=0, help='gpu device eg. 0')
# faster rcnn params
parser.add_argument('--img-short-side', type=int, default=600)
parser.add_argument('--img-long-side', type=int, default=1000)
parser.add_argument('--img-pixel-means', type=str, default='(0.0, 0.0, 0.0)')
parser.add_argument('--img-pixel-stds', type=str, default='(1.0, 1.0, 1.0)')
parser.add_argument('--rpn-feat-stride', type=int, default=16)
parser.add_argument('--rpn-anchor-scales', type=str, default='(8, 16, 32)')
parser.add_argument('--rpn-anchor-ratios', type=str, default='(0.5, 1, 2)')
parser.add_argument('--rpn-pre-nms-topk', type=int, default=6000)
parser.add_argument('--rpn-post-nms-topk', type=int, default=300)
parser.add_argument('--rpn-nms-thresh', type=float, default=0.7)
parser.add_argument('--rpn-min-size', type=int, default=16)
parser.add_argument('--rcnn-num-classes', type=int, default=21)
parser.add_argument('--rcnn-feat-stride', type=int, default=16)
parser.add_argument('--rcnn-pooled-size', type=str, default='(14, 14)')
parser.add_argument('--rcnn-batch-size', type=int, default=1)
parser.add_argument('--rcnn-bbox-stds', type=str, default='(0.1, 0.1, 0.2, 0.2)')
parser.add_argument('--rcnn-nms-thresh', type=float, default=0.3)
parser.add_argument('--rcnn-conf-thresh', type=float, default=1e-3)
# Add soft nms by liusm 20180929
parser.add_argument('--use-soft-nms', type=bool, default=True)
parser.add_argument('--soft-nms-thresh', type=float, default=0.6)
parser.add_argument('--max-per-image', type=int, default=100)
# if use deformable conv add by liusm 20181009
parser.add_argument('--use-deformable-conv', action='store_true')
args = parser.parse_args()
args.img_pixel_means = ast.literal_eval(args.img_pixel_means)
args.img_pixel_stds = ast.literal_eval(args.img_pixel_stds)
args.rpn_anchor_scales = ast.literal_eval(args.rpn_anchor_scales)
args.rpn_anchor_ratios = ast.literal_eval(args.rpn_anchor_ratios)
args.rcnn_pooled_size = ast.literal_eval(args.rcnn_pooled_size)
args.rcnn_bbox_stds = ast.literal_eval(args.rcnn_bbox_stds)
return args
def get_voc(args):
from symimdb.pascal_voc import PascalVOC
if not args.imageset:
args.imageset = '2007_test'
args.rcnn_num_classes = len(PascalVOC.classes)
return PascalVOC(args.imageset, 'data', 'data/VOCdevkit')
def get_coco(args):
from symimdb.coco import coco
if not args.imageset:
args.imageset = 'val2017'
args.rcnn_num_classes = len(coco.classes)
return coco(args.imageset, 'data', 'data/coco')
def get_vgg16_test(args):
from symnet.symbol_vgg import get_vgg_test
if not args.params:
args.params = 'model/vgg16-0010.params'
args.img_pixel_means = (123.68, 116.779, 103.939)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.net_fixed_params = ['conv1', 'conv2']
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (7, 7)
return get_vgg_test(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size)
def get_resnet50_test(args):
from symnet.symbol_resnet import get_resnet_test
if not args.params:
args.params = 'model/resnet50-0010.params'
args.img_pixel_means = (0.0, 0.0, 0.0)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (14, 14)
return get_resnet_test(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size,
units=(3, 4, 6, 3), filter_list=(256, 512, 1024, 2048))
def get_resnet101_test(args):
from symnet.symbol_resnet import get_resnet_test
if not args.params:
args.params = 'model/resnet101-0010.params'
args.img_pixel_means = (0.0, 0.0, 0.0)
args.img_pixel_stds = (1.0, 1.0, 1.0)
args.rpn_feat_stride = 16
args.rcnn_feat_stride = 16
args.rcnn_pooled_size = (14, 14)
return get_resnet_test(anchor_scales=args.rpn_anchor_scales, anchor_ratios=args.rpn_anchor_ratios,
rpn_feature_stride=args.rpn_feat_stride, rpn_pre_topk=args.rpn_pre_nms_topk,
rpn_post_topk=args.rpn_post_nms_topk, rpn_nms_thresh=args.rpn_nms_thresh,
rpn_min_size=args.rpn_min_size,
num_classes=args.rcnn_num_classes, rcnn_feature_stride=args.rcnn_feat_stride,
rcnn_pooled_size=args.rcnn_pooled_size, rcnn_batch_size=args.rcnn_batch_size,
units=(3, 4, 23, 3), filter_list=(256, 512, 1024, 2048))
def get_dataset(dataset, args):
datasets = {
'voc': get_voc,
'coco': get_coco
}
if dataset not in datasets:
raise ValueError("dataset {} not supported".format(dataset))
return datasets[dataset](args)
def get_network(network, args):
networks = {
'vgg16': get_vgg16_test,
'resnet50': get_resnet50_test,
'resnet101': get_resnet101_test
}
if network not in networks:
raise ValueError("network {} not supported".format(network))
return networks[network](args)
def main():
args = parse_args()
imdb = get_dataset(args.dataset, args)
sym = get_network(args.network, args)
test_net(sym, imdb, args)
if __name__ == '__main__':
main()
|
en
| 0.596236
|
# print config # setup context # load testing data # load params # produce shape max possible # check shapes # create and bind module # all detections are collected into: # all_boxes[cls][image] = N x 5 array of detections in # (x1, y1, x2, y2, score) # start detection # forward # evaluate model # faster rcnn params # Add soft nms by liusm 20180929 # if use deformable conv add by liusm 20181009
| 2.06621
| 2
|
SOLID LAB/02_OCP/animal.py
|
borko81/SU_OOP_2021
| 0
|
6626035
|
<reponame>borko81/SU_OOP_2021
from abc import ABC, abstractmethod
class SomeAnimal(ABC):
@abstractmethod
def __repr__(self):
pass
class Cat(SomeAnimal):
def __repr__(self):
return "meow"
class Dog(SomeAnimal):
def __repr__(self):
return "wolf-wolf"
class Animal:
def __init__(self, species):
self.species = species
def get_species(self):
return self.species
def animal_sound(animals: list):
for animal in animals:
print(animal.get_species())
animals = [Animal(Cat()), Animal(Dog())]
animal_sound(animals)
|
from abc import ABC, abstractmethod
class SomeAnimal(ABC):
@abstractmethod
def __repr__(self):
pass
class Cat(SomeAnimal):
def __repr__(self):
return "meow"
class Dog(SomeAnimal):
def __repr__(self):
return "wolf-wolf"
class Animal:
def __init__(self, species):
self.species = species
def get_species(self):
return self.species
def animal_sound(animals: list):
for animal in animals:
print(animal.get_species())
animals = [Animal(Cat()), Animal(Dog())]
animal_sound(animals)
|
none
| 1
| 3.918344
| 4
|
|
qa/L0_lifecycle/lifecycle_test.py
|
kpedro88/triton-inference-server
| 0
|
6626036
|
<filename>qa/L0_lifecycle/lifecycle_test.py<gh_stars>0
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
from builtins import range
from future.utils import iteritems
import os
import shutil
import time
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
import tritongrpcclient as grpcclient
import tritonhttpclient as httpclient
from tritonclientutils import InferenceServerException
class LifeCycleTest(tu.TestResultCollector):
def _infer_success_models(self,
model_base_names,
versions,
tensor_shape,
swap=False):
for base_name in model_base_names:
try:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
# FIXME is_server_ready should be true here DLIS-1296
# self.assertTrue(triton_client.is_server_ready())
for v in versions:
self.assertTrue(
triton_client.is_model_ready(model_name, str(v)))
for v in versions:
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=v,
swap=(swap or (v == 3)))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_noexit(self):
# Server was started with invalid args and
# --exit-on-error=false so expect it to be running with
# SERVER_FAILED_TO_INITIALIZE status.
# Server is not live and not ready regardless of --strict-readiness
try:
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
self.assertFalse(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
md = triton_client.get_server_metadata()
self.assertEqual(os.environ["TRITON_SERVER_VERSION"], md.version)
self.assertEqual("triton", md.name)
except InferenceServerException as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
self.assertFalse(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
md = triton_client.get_server_metadata()
self.assertEqual(os.environ["TRITON_SERVER_VERSION"], md['version'])
self.assertEqual("triton", md['name'])
except InferenceServerException as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_modelfail(self):
# --strict-readiness=true so server is live but not ready
tensor_shape = (1, 16)
# Server was started but with a model that fails to load
try:
model_name = tu.get_model_name('graphdef', np.float32, np.float32,
np.float32)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Inferencing with the missing model should fail.
try:
iu.infer_exact(self, 'graphdef', tensor_shape, 1, np.float32,
np.float32, np.float32)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' has no available versions"
))
# And other models should be loaded successfully
try:
for base_name in ["savedmodel", 'netdef']:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_modelfail_nostrict(self):
# --strict-readiness=false so server is live and ready
tensor_shape = (1, 16)
# Server was started but with a model that fails to load
try:
model_name = tu.get_model_name('graphdef', np.float32, np.float32,
np.float32)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Inferencing with the missing model should fail.
try:
iu.infer_exact(self, 'graphdef', tensor_shape, 1, np.float32,
np.float32, np.float32)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' has no available versions"
))
# And other models should be loaded successfully
try:
for base_name in ["savedmodel", 'netdef']:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_no_model_config(self):
tensor_shape = (1, 16)
# Server was started but with a model that fails to be polled
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
model_name = tu.get_model_name('graphdef', np.float32,
np.float32, np.float32)
# expecting ready because not strict readiness
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
md = triton_client.get_model_metadata(model_name, "1")
self.assertTrue(
False, "expected model '" + model_name +
"' to be ignored due to polling failure")
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' is not found"
))
# And other models should be loaded successfully
try:
for base_name in ["savedmodel", 'netdef']:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name, "1"))
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_init_error_modelfail(self):
# --strict-readiness=true so server is live but not ready
# Server was started but with models that fail to load
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
# one model uses sequence batcher while the other uses dynamic batcher
model_names = [
"custom_sequence_int32", "custom_int32_int32_int32"
]
for model_name in model_names:
self.assertFalse(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# And other models should be loaded successfully
try:
for base_name in ["graphdef", "savedmodel", 'netdef']:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
tensor_shape = (1, 16)
for base_name in ["graphdef", "savedmodel", 'netdef']:
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_model_no_version(self):
# --strict-readiness=true so server is live but not ready
tensor_shape = (1, 16)
# Server was started but with a model that fails to load
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
model_name = tu.get_model_name('graphdef', np.float32,
np.float32, np.float32)
self.assertFalse(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Sanity check that other models are loaded properly
try:
for base_name in ["savedmodel", "netdef"]:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name))
for version in ["1", "3"]:
model_name = tu.get_model_name("plan", np.float32,
np.float32, np.float32)
self.assertTrue(
triton_client.is_model_ready(model_name, version))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
for base_name in ["savedmodel", "netdef"]:
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
for version in [1, 3]:
iu.infer_exact(self,
'plan',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
iu.infer_exact(self, 'graphdef', tensor_shape, 1, np.float32,
np.float32, np.float32)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' has no available versions"
))
def test_parse_ignore_zero_prefixed_version(self):
tensor_shape = (1, 16)
# Server was started but only version 1 is loaded
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
model_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
# swap=False for version 1
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=False)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_model_load_unload(self):
tensor_shape = (1, 16)
savedmodel_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
netdef_name = tu.get_model_name('netdef', np.float32, np.float32,
np.float32)
# Make sure savedmodel model is not in the status (because
# initially it is not in the model repository)
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Add savedmodel model to the model repository and give it time to
# load. Make sure that it has a status and is ready.
try:
shutil.copytree(savedmodel_name, "models/" + savedmodel_name)
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on the just loaded model
try:
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Make sure savedmodel has execution stats
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats["model_stats"]), 2)
for idx in range(len(stats["model_stats"])):
self.assertEqual(stats["model_stats"][idx]["name"],
savedmodel_name)
if stats["model_stats"][idx]["version"] == "1":
self.assertEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
else:
self.assertNotEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats.model_stats), 2)
for idx in range(len(stats.model_stats)):
self.assertEqual(stats.model_stats[idx].name, savedmodel_name)
if stats.model_stats[idx].version == "1":
self.assertEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
else:
self.assertNotEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove savedmodel model from the model repository and give it
# time to unload. Make sure that it is no longer available.
try:
shutil.rmtree("models/" + savedmodel_name)
time.sleep(5) # wait for model to unload
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Model is removed so inference should fail
try:
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
self.assertTrue(
False,
"expected error for unavailable model " + savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'savedmodel_float32_float32_float32' has no available versions"
))
# Add back the same model. The status/stats should be reset.
try:
shutil.copytree(savedmodel_name, "models/" + savedmodel_name)
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats["model_stats"]), 2)
self.assertEqual(stats["model_stats"][0]["name"], savedmodel_name)
self.assertEqual(stats["model_stats"][1]["name"], savedmodel_name)
self.assertEqual(
stats["model_stats"][0]["inference_stats"]["success"]["count"],
0)
self.assertEqual(
stats["model_stats"][1]["inference_stats"]["success"]["count"],
0)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats.model_stats), 2)
self.assertEqual(stats.model_stats[0].name, savedmodel_name)
self.assertEqual(stats.model_stats[1].name, savedmodel_name)
self.assertEqual(stats.model_stats[0].inference_stats.success.count,
0)
self.assertEqual(stats.model_stats[1].inference_stats.success.count,
0)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove netdef model from the model repository and give it
# time to unload. Make sure that it is unavailable.
try:
shutil.rmtree("models/" + netdef_name)
time.sleep(5) # wait for model to unload
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertFalse(triton_client.is_model_ready(netdef_name, "1"))
self.assertFalse(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Model is removed so inference should fail
try:
iu.infer_exact(self,
'netdef',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
self.assertTrue(
False, "expected error for unavailable model " + netdef_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'netdef_float32_float32_float32' has no available versions"
))
def test_dynamic_model_load_unload_disabled(self):
tensor_shape = (1, 16)
savedmodel_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
netdef_name = tu.get_model_name('netdef', np.float32, np.float32,
np.float32)
# Make sure savedmodel model is not in the status (because
# initially it is not in the model repository)
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Add savedmodel model to the model repository and give it time to
# load. But it shouldn't load because dynamic loading is disabled.
try:
shutil.copytree(savedmodel_name, "models/" + savedmodel_name)
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference which should fail because the model isn't there
try:
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
self.assertTrue(
False,
"expected error for unavailable model " + savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'savedmodel_float32_float32_float32' is not found"
))
# Remove one of the original models from the model repository.
# Unloading is disabled so it should remain available in the status.
try:
shutil.rmtree("models/" + netdef_name)
time.sleep(5) # wait for model to unload (but it shouldn't)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference to make sure model still being served even
# though deleted from model repository
try:
iu.infer_exact(self,
'netdef',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_version_load_unload(self):
tensor_shape = (1, 16)
graphdef_name = tu.get_model_name('graphdef', np.int32, np.int32,
np.int32)
# There are 3 versions. Make sure that all have status and are
# ready.
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on version 1 to make sure it is available
try:
iu.infer_exact(self,
'graphdef',
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
swap=False,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Make sure only version 1 has execution stats in the status.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
stats = triton_client.get_inference_statistics(graphdef_name)
self.assertEqual(len(stats["model_stats"]), 3)
for idx in range(len(stats["model_stats"])):
self.assertEqual(stats["model_stats"][idx]["name"],
graphdef_name)
if stats["model_stats"][idx]["version"] == "1":
self.assertNotEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
else:
self.assertEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
stats = triton_client.get_inference_statistics(graphdef_name)
self.assertEqual(len(stats.model_stats), 3)
for idx in range(len(stats.model_stats)):
self.assertEqual(stats.model_stats[idx].name, graphdef_name)
if stats.model_stats[idx].version == "1":
self.assertNotEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
else:
self.assertEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove version 1 from the model repository and give it time to
# unload. Make sure that it is unavailable.
try:
shutil.rmtree("models/" + graphdef_name + "/1")
time.sleep(5) # wait for version to unload
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Version is removed so inference should fail
try:
iu.infer_exact(self,
'graphdef',
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
swap=False,
model_version=1)
self.assertTrue(
False, "expected error for unavailable model " + graphdef_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_int32_int32_int32' version 1 is not at ready state"
))
# Add another version to the model repository.
try:
shutil.copytree("models/" + graphdef_name + "/2",
"models/" + graphdef_name + "/7")
time.sleep(5) # wait for version to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "7"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_version_load_unload_disabled(self):
tensor_shape = (1, 16)
graphdef_name = tu.get_model_name('graphdef', np.int32, np.int32,
np.int32)
# Add a new version to the model repository and give it time to
# load. But it shouldn't load because dynamic loading is
# disabled.
try:
shutil.copytree("models/" + graphdef_name + "/2",
"models/" + graphdef_name + "/7")
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "7"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove one of the original versions from the model repository.
# Unloading is disabled so it should remain available
# in the status.
try:
shutil.rmtree("models/" + graphdef_name + "/1")
time.sleep(5) # wait for version to unload (but it shouldn't)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "7"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference to make sure model still being served even
# though version deleted from model repository
try:
iu.infer_exact(self,
'graphdef',
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
swap=False,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_model_modify(self):
models_base = ('savedmodel', 'plan')
models_shape = ((1, 16), (1, 16))
models = list()
for m in models_base:
models.append(
tu.get_model_name(m, np.float32, np.float32, np.float32))
# Make sure savedmodel and plan are in the status
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on the model, both versions 1 and 3
for version in (1, 3):
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Change the model configuration to use wrong label file
for base_name, model_name in zip(models_base, models):
shutil.copyfile("config.pbtxt.wrong." + base_name,
"models/" + model_name + "/config.pbtxt")
time.sleep(5) # wait for models to reload
for model_name in models:
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version,
output0_raw=False)
self.assertTrue(
False,
"expected error for wrong label for " + model_name)
except AssertionError as ex:
self.assertTrue("'label9" in str(ex) and "!=" in str(ex),
str(ex))
# Change the model configuration to use correct label file and to have
# the default version policy (so that only version 3) is available.
for base_name, model_name in zip(models_base, models):
shutil.copyfile("config.pbtxt." + base_name,
"models/" + model_name + "/config.pbtxt")
time.sleep(5) # wait for models to reload
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Attempt inferencing using version 1, should fail since
# change in model policy makes that no longer available.
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=False,
model_version=1)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
# Version 3 should continue to work...
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True,
model_version=3)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_file_delete(self):
models_base = ('savedmodel', 'plan')
models_shape = ((1, 16), (1, 16))
models = list()
for m in models_base:
models.append(
tu.get_model_name(m, np.float32, np.float32, np.float32))
# Make sure savedmodel and plan are in the status
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on the model, both versions 1 and 3
for version in (1, 3):
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Delete model configuration, which cause model to be
# re-loaded and use autofilled config, which means that
# version policy will be latest and so only version 3 will be
# available
for model_name in models:
os.remove("models/" + model_name + "/config.pbtxt")
time.sleep(5) # wait for models to reload
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Only version 3 (latest) should work...
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True,
model_version=3)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=False,
model_version=1)
self.assertTrue(
False,
"expected error for unavailable model " + graphdef_name)
except Exception as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
def test_multiple_model_repository_polling(self):
model_shape = (1, 16)
savedmodel_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
# Models should be loaded successfully and infer
# successfully. Initially savedmodel only has version 1.
self._infer_success_models([
"savedmodel",
], (1,), model_shape)
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Add the savedmodel to the second model repository, should cause
# it to be unloaded due to duplication
shutil.copytree(savedmodel_name, "models_0/" + savedmodel_name)
time.sleep(5) # wait for models to reload
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Remove the savedmodel from the first model repository, the
# model from the second model repository should be loaded
# properly. In the second model repository savedmodel should
# have versions 1 and 3.
shutil.rmtree("models/" + savedmodel_name)
time.sleep(5) # wait for model to unload
self._infer_success_models(["savedmodel", "graphdef", 'netdef'], (1, 3),
model_shape)
def test_multiple_model_repository_control(self):
# similar to test_multiple_model_repository_polling, but the
# model load/unload is controlled by the API
model_shape = (1, 16)
savedmodel_name = tu.get_model_name("savedmodel", np.float32,
np.float32, np.float32)
model_bases = ['savedmodel', "graphdef", 'netdef']
# Initially models are not loaded
for base in model_bases:
try:
model_name = tu.get_model_name(base, np.float32, np.float32,
np.float32)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Load all models, here we use GRPC
for base in model_bases:
try:
model_name = tu.get_model_name(base, np.float32, np.float32,
np.float32)
triton_client = grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)
triton_client.load_model(model_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Models should be loaded successfully and infer
# successfully. Initially savedmodel only has version 1.
self._infer_success_models([
"savedmodel",
], (1,), model_shape)
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Add the savedmodel to the second model repository. Because
# not polling this doesn't change any model state, all models
# are still loaded and available.
shutil.copytree(savedmodel_name, "models_0/" + savedmodel_name)
self._infer_success_models([
"savedmodel",
], (1,), model_shape)
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Reload savedmodel which will cause it to unload because it
# is in 2 model repositories. Use HTTP here.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load '{}'".format(savedmodel_name)))
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Remove the savedmodel from the first model repository and
# explicitly load savedmodel. The savedmodel from the second
# model repository should be loaded properly. In the second
# model repository savedmodel should have versions 1 and 3.
shutil.rmtree("models/" + savedmodel_name)
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load '{}'".format(savedmodel_name)))
self._infer_success_models(["savedmodel", "graphdef", 'netdef'], (1, 3),
model_shape)
def test_model_control(self):
model_shape = (1, 16)
onnx_name = tu.get_model_name('onnx', np.float32,
np.float32, np.float32)
ensemble_prefix = "simple_"
ensemble_name = ensemble_prefix + onnx_name
# Make sure no models are loaded
for model_name in (onnx_name, ensemble_name):
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Load non-existent model
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.load_model("unknown_model")
self.assertTrue(False, "expected unknown model failure")
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load 'unknown_model', no version is available"))
# Load ensemble model, the dependent model should be polled and loaded
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(ensemble_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (1, 3), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
# Delete model configuration for onnx, which will cause
# the autofiller to use the latest version policy so that only
# version 3 will be available if the models are re-loaded
for model_name in (onnx_name,):
os.remove("models/" + model_name + "/config.pbtxt")
self._infer_success_models([
"onnx",
], (1, 3), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
# Reload models, only version 3 should be available for onnx
for model_name in (onnx_name, ensemble_name):
try:
triton_client = grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)
triton_client.load_model(model_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
for model_name in (onnx_name,):
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload non-existing model, nothing should happen
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.unload_model("unknown_model")
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload the depending model, as side effect, the ensemble model will be
# forced to be unloaded
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
for model_name in (onnx_name, ensemble_name):
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Explicitly unload the ensemble and load the depending
# model. The ensemble model should not be reloaded because it
# was explicitly unloaded.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(ensemble_name)
triton_client.load_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(ensemble_name, "1"))
self.assertFalse(
triton_client.is_model_ready(ensemble_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_multiple_model_repository_control_startup_models(self):
model_shape = (1, 16)
onnx_name = tu.get_model_name('onnx', np.float32,
np.float32, np.float32)
plan_name = tu.get_model_name('plan', np.float32, np.float32,
np.float32)
ensemble_prefix = "simple_"
onnx_ensemble_name = ensemble_prefix + onnx_name
plan_ensemble_name = ensemble_prefix + plan_name
# Make sure unloaded models are not in the status
for base in ("netdef",):
model_name = tu.get_model_name(base, np.float32, np.float32,
np.float32)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# And loaded models work properly
self._infer_success_models([
"onnx",
], (1, 3), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
self._infer_success_models([
"plan",
], (1, 3), model_shape)
# Load non-existing model
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.load_model("unknown_model")
self.assertTrue(False, "expected unknown model failure")
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load 'unknown_model', no version is available"))
# Load plan ensemble model, the dependent model is already
# loaded via command-line
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(plan_ensemble_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"plan",
], (1, 3), model_shape)
self._infer_success_models([
"simple_plan",
], (1, 3),
model_shape,
swap=True)
# Delete model configuration, which will cause the autofiller
# to use the latest version policy so that only version 3 will
# be available if the models are re-loaded
os.remove("models/" + onnx_name + "/config.pbtxt")
self._infer_success_models([
"plan",
], (1, 3), model_shape)
self._infer_success_models([
"simple_plan",
], (1, 3),
model_shape,
swap=True)
# Reload onnx, only version 3 should be available
try:
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
triton_client.load_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(onnx_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload non-existing model, nothing should happen
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.unload_model("unknown_model")
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload the onnx, as side effect, the ensemble model
# will be forced to be unloaded
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
for model_name in [onnx_name, onnx_ensemble_name]:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Explicitly unload the onnx ensemble and load the
# depending model. The ensemble model should not be reloaded
# because it was explicitly unloaded.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(onnx_ensemble_name)
triton_client.load_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
self._infer_success_models([
"plan",
], (1, 3), model_shape)
self._infer_success_models([
"simple_plan",
], (1, 3),
model_shape,
swap=True)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(onnx_ensemble_name, "1"))
self.assertFalse(
triton_client.is_model_ready(onnx_ensemble_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_model_repository_index(self):
# use model control EXPLIT and --load-model to load a subset of models
# in model repository
tensor_shape = (1, 16)
model_bases = ["graphdef", "savedmodel", "simple_savedmodel"]
# Sanity check on loaded models
# 3 models should be loaded:
# simple_savedmodel_float32_float32_float32
# savedmodel_float32_float32_float32
# graphdef_float32_float32_float32
for model_base in model_bases:
try:
model_name = tu.get_model_name(model_base, np.float32,
np.float32, np.float32)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Check model repository index
# All models should be in ready state except netdef_float32_float32_float32
# which appears in two repositories.
model_bases.append("simple_graphdef")
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
index = triton_client.get_model_repository_index()
indexed = list()
self.assertEqual(len(index), 8)
for i in index:
indexed.append(i["name"])
if i["name"] == "netdef_float32_float32_float32":
self.assertEqual(i["state"], "UNAVAILABLE")
self.assertEqual(
i["reason"],
"model appears in two or more repositories")
for model_base in model_bases:
model_name = tu.get_model_name(model_base, np.float32,
np.float32, np.float32)
self.assertTrue(model_name in indexed)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
index = triton_client.get_model_repository_index()
indexed = list()
self.assertEqual(len(index.models), 8)
for i in index.models:
indexed.append(i.name)
if i.name == "netdef_float32_float32_float32":
self.assertEqual(i.state, "UNAVAILABLE")
self.assertEqual(
i.reason, "model appears in two or more repositories")
for model_base in model_bases:
model_name = tu.get_model_name(model_base, np.float32,
np.float32, np.float32)
self.assertTrue(model_name in indexed)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
if __name__ == '__main__':
unittest.main()
|
<filename>qa/L0_lifecycle/lifecycle_test.py<gh_stars>0
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
from builtins import range
from future.utils import iteritems
import os
import shutil
import time
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
import tritongrpcclient as grpcclient
import tritonhttpclient as httpclient
from tritonclientutils import InferenceServerException
class LifeCycleTest(tu.TestResultCollector):
def _infer_success_models(self,
model_base_names,
versions,
tensor_shape,
swap=False):
for base_name in model_base_names:
try:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
# FIXME is_server_ready should be true here DLIS-1296
# self.assertTrue(triton_client.is_server_ready())
for v in versions:
self.assertTrue(
triton_client.is_model_ready(model_name, str(v)))
for v in versions:
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=v,
swap=(swap or (v == 3)))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_noexit(self):
# Server was started with invalid args and
# --exit-on-error=false so expect it to be running with
# SERVER_FAILED_TO_INITIALIZE status.
# Server is not live and not ready regardless of --strict-readiness
try:
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
self.assertFalse(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
md = triton_client.get_server_metadata()
self.assertEqual(os.environ["TRITON_SERVER_VERSION"], md.version)
self.assertEqual("triton", md.name)
except InferenceServerException as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
self.assertFalse(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
md = triton_client.get_server_metadata()
self.assertEqual(os.environ["TRITON_SERVER_VERSION"], md['version'])
self.assertEqual("triton", md['name'])
except InferenceServerException as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_modelfail(self):
# --strict-readiness=true so server is live but not ready
tensor_shape = (1, 16)
# Server was started but with a model that fails to load
try:
model_name = tu.get_model_name('graphdef', np.float32, np.float32,
np.float32)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Inferencing with the missing model should fail.
try:
iu.infer_exact(self, 'graphdef', tensor_shape, 1, np.float32,
np.float32, np.float32)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' has no available versions"
))
# And other models should be loaded successfully
try:
for base_name in ["savedmodel", 'netdef']:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_modelfail_nostrict(self):
# --strict-readiness=false so server is live and ready
tensor_shape = (1, 16)
# Server was started but with a model that fails to load
try:
model_name = tu.get_model_name('graphdef', np.float32, np.float32,
np.float32)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Inferencing with the missing model should fail.
try:
iu.infer_exact(self, 'graphdef', tensor_shape, 1, np.float32,
np.float32, np.float32)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' has no available versions"
))
# And other models should be loaded successfully
try:
for base_name in ["savedmodel", 'netdef']:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_no_model_config(self):
tensor_shape = (1, 16)
# Server was started but with a model that fails to be polled
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
model_name = tu.get_model_name('graphdef', np.float32,
np.float32, np.float32)
# expecting ready because not strict readiness
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
md = triton_client.get_model_metadata(model_name, "1")
self.assertTrue(
False, "expected model '" + model_name +
"' to be ignored due to polling failure")
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' is not found"
))
# And other models should be loaded successfully
try:
for base_name in ["savedmodel", 'netdef']:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name, "1"))
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_init_error_modelfail(self):
# --strict-readiness=true so server is live but not ready
# Server was started but with models that fail to load
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
# one model uses sequence batcher while the other uses dynamic batcher
model_names = [
"custom_sequence_int32", "custom_int32_int32_int32"
]
for model_name in model_names:
self.assertFalse(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# And other models should be loaded successfully
try:
for base_name in ["graphdef", "savedmodel", 'netdef']:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
tensor_shape = (1, 16)
for base_name in ["graphdef", "savedmodel", 'netdef']:
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_parse_error_model_no_version(self):
# --strict-readiness=true so server is live but not ready
tensor_shape = (1, 16)
# Server was started but with a model that fails to load
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertFalse(triton_client.is_server_ready())
model_name = tu.get_model_name('graphdef', np.float32,
np.float32, np.float32)
self.assertFalse(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Sanity check that other models are loaded properly
try:
for base_name in ["savedmodel", "netdef"]:
model_name = tu.get_model_name(base_name, np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name))
for version in ["1", "3"]:
model_name = tu.get_model_name("plan", np.float32,
np.float32, np.float32)
self.assertTrue(
triton_client.is_model_ready(model_name, version))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
for base_name in ["savedmodel", "netdef"]:
iu.infer_exact(self,
base_name,
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
for version in [1, 3]:
iu.infer_exact(self,
'plan',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
iu.infer_exact(self, 'graphdef', tensor_shape, 1, np.float32,
np.float32, np.float32)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_float32_float32_float32' has no available versions"
))
def test_parse_ignore_zero_prefixed_version(self):
tensor_shape = (1, 16)
# Server was started but only version 1 is loaded
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
model_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
self.assertTrue(triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
# swap=False for version 1
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=False)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_model_load_unload(self):
tensor_shape = (1, 16)
savedmodel_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
netdef_name = tu.get_model_name('netdef', np.float32, np.float32,
np.float32)
# Make sure savedmodel model is not in the status (because
# initially it is not in the model repository)
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Add savedmodel model to the model repository and give it time to
# load. Make sure that it has a status and is ready.
try:
shutil.copytree(savedmodel_name, "models/" + savedmodel_name)
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on the just loaded model
try:
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Make sure savedmodel has execution stats
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats["model_stats"]), 2)
for idx in range(len(stats["model_stats"])):
self.assertEqual(stats["model_stats"][idx]["name"],
savedmodel_name)
if stats["model_stats"][idx]["version"] == "1":
self.assertEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
else:
self.assertNotEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats.model_stats), 2)
for idx in range(len(stats.model_stats)):
self.assertEqual(stats.model_stats[idx].name, savedmodel_name)
if stats.model_stats[idx].version == "1":
self.assertEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
else:
self.assertNotEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove savedmodel model from the model repository and give it
# time to unload. Make sure that it is no longer available.
try:
shutil.rmtree("models/" + savedmodel_name)
time.sleep(5) # wait for model to unload
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Model is removed so inference should fail
try:
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
self.assertTrue(
False,
"expected error for unavailable model " + savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'savedmodel_float32_float32_float32' has no available versions"
))
# Add back the same model. The status/stats should be reset.
try:
shutil.copytree(savedmodel_name, "models/" + savedmodel_name)
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats["model_stats"]), 2)
self.assertEqual(stats["model_stats"][0]["name"], savedmodel_name)
self.assertEqual(stats["model_stats"][1]["name"], savedmodel_name)
self.assertEqual(
stats["model_stats"][0]["inference_stats"]["success"]["count"],
0)
self.assertEqual(
stats["model_stats"][1]["inference_stats"]["success"]["count"],
0)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
stats = triton_client.get_inference_statistics(savedmodel_name)
self.assertEqual(len(stats.model_stats), 2)
self.assertEqual(stats.model_stats[0].name, savedmodel_name)
self.assertEqual(stats.model_stats[1].name, savedmodel_name)
self.assertEqual(stats.model_stats[0].inference_stats.success.count,
0)
self.assertEqual(stats.model_stats[1].inference_stats.success.count,
0)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove netdef model from the model repository and give it
# time to unload. Make sure that it is unavailable.
try:
shutil.rmtree("models/" + netdef_name)
time.sleep(5) # wait for model to unload
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertTrue(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertFalse(triton_client.is_model_ready(netdef_name, "1"))
self.assertFalse(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Model is removed so inference should fail
try:
iu.infer_exact(self,
'netdef',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
self.assertTrue(
False, "expected error for unavailable model " + netdef_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'netdef_float32_float32_float32' has no available versions"
))
def test_dynamic_model_load_unload_disabled(self):
tensor_shape = (1, 16)
savedmodel_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
netdef_name = tu.get_model_name('netdef', np.float32, np.float32,
np.float32)
# Make sure savedmodel model is not in the status (because
# initially it is not in the model repository)
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Add savedmodel model to the model repository and give it time to
# load. But it shouldn't load because dynamic loading is disabled.
try:
shutil.copytree(savedmodel_name, "models/" + savedmodel_name)
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference which should fail because the model isn't there
try:
iu.infer_exact(self,
'savedmodel',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
self.assertTrue(
False,
"expected error for unavailable model " + savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'savedmodel_float32_float32_float32' is not found"
))
# Remove one of the original models from the model repository.
# Unloading is disabled so it should remain available in the status.
try:
shutil.rmtree("models/" + netdef_name)
time.sleep(5) # wait for model to unload (but it shouldn't)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(netdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference to make sure model still being served even
# though deleted from model repository
try:
iu.infer_exact(self,
'netdef',
tensor_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_version_load_unload(self):
tensor_shape = (1, 16)
graphdef_name = tu.get_model_name('graphdef', np.int32, np.int32,
np.int32)
# There are 3 versions. Make sure that all have status and are
# ready.
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on version 1 to make sure it is available
try:
iu.infer_exact(self,
'graphdef',
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
swap=False,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Make sure only version 1 has execution stats in the status.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
stats = triton_client.get_inference_statistics(graphdef_name)
self.assertEqual(len(stats["model_stats"]), 3)
for idx in range(len(stats["model_stats"])):
self.assertEqual(stats["model_stats"][idx]["name"],
graphdef_name)
if stats["model_stats"][idx]["version"] == "1":
self.assertNotEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
else:
self.assertEqual(
stats["model_stats"][idx]["inference_stats"]["success"]
["count"], 0)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
stats = triton_client.get_inference_statistics(graphdef_name)
self.assertEqual(len(stats.model_stats), 3)
for idx in range(len(stats.model_stats)):
self.assertEqual(stats.model_stats[idx].name, graphdef_name)
if stats.model_stats[idx].version == "1":
self.assertNotEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
else:
self.assertEqual(
stats.model_stats[idx].inference_stats.success.count, 0)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove version 1 from the model repository and give it time to
# unload. Make sure that it is unavailable.
try:
shutil.rmtree("models/" + graphdef_name + "/1")
time.sleep(5) # wait for version to unload
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Version is removed so inference should fail
try:
iu.infer_exact(self,
'graphdef',
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
swap=False,
model_version=1)
self.assertTrue(
False, "expected error for unavailable model " + graphdef_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"Request for unknown model: 'graphdef_int32_int32_int32' version 1 is not at ready state"
))
# Add another version to the model repository.
try:
shutil.copytree("models/" + graphdef_name + "/2",
"models/" + graphdef_name + "/7")
time.sleep(5) # wait for version to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "7"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_version_load_unload_disabled(self):
tensor_shape = (1, 16)
graphdef_name = tu.get_model_name('graphdef', np.int32, np.int32,
np.int32)
# Add a new version to the model repository and give it time to
# load. But it shouldn't load because dynamic loading is
# disabled.
try:
shutil.copytree("models/" + graphdef_name + "/2",
"models/" + graphdef_name + "/7")
time.sleep(5) # wait for model to load
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "7"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Remove one of the original versions from the model repository.
# Unloading is disabled so it should remain available
# in the status.
try:
shutil.rmtree("models/" + graphdef_name + "/1")
time.sleep(5) # wait for version to unload (but it shouldn't)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "1"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "2"))
self.assertTrue(triton_client.is_model_ready(
graphdef_name, "3"))
self.assertFalse(
triton_client.is_model_ready(graphdef_name, "7"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference to make sure model still being served even
# though version deleted from model repository
try:
iu.infer_exact(self,
'graphdef',
tensor_shape,
1,
np.int32,
np.int32,
np.int32,
swap=False,
model_version=1)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_model_modify(self):
models_base = ('savedmodel', 'plan')
models_shape = ((1, 16), (1, 16))
models = list()
for m in models_base:
models.append(
tu.get_model_name(m, np.float32, np.float32, np.float32))
# Make sure savedmodel and plan are in the status
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on the model, both versions 1 and 3
for version in (1, 3):
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Change the model configuration to use wrong label file
for base_name, model_name in zip(models_base, models):
shutil.copyfile("config.pbtxt.wrong." + base_name,
"models/" + model_name + "/config.pbtxt")
time.sleep(5) # wait for models to reload
for model_name in models:
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version,
output0_raw=False)
self.assertTrue(
False,
"expected error for wrong label for " + model_name)
except AssertionError as ex:
self.assertTrue("'label9" in str(ex) and "!=" in str(ex),
str(ex))
# Change the model configuration to use correct label file and to have
# the default version policy (so that only version 3) is available.
for base_name, model_name in zip(models_base, models):
shutil.copyfile("config.pbtxt." + base_name,
"models/" + model_name + "/config.pbtxt")
time.sleep(5) # wait for models to reload
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Attempt inferencing using version 1, should fail since
# change in model policy makes that no longer available.
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=False,
model_version=1)
self.assertTrue(
False, "expected error for unavailable model " + model_name)
except Exception as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
# Version 3 should continue to work...
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True,
model_version=3)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_dynamic_file_delete(self):
models_base = ('savedmodel', 'plan')
models_shape = ((1, 16), (1, 16))
models = list()
for m in models_base:
models.append(
tu.get_model_name(m, np.float32, np.float32, np.float32))
# Make sure savedmodel and plan are in the status
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Run inference on the model, both versions 1 and 3
for version in (1, 3):
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=(version == 3),
model_version=version)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Delete model configuration, which cause model to be
# re-loaded and use autofilled config, which means that
# version policy will be latest and so only version 3 will be
# available
for model_name in models:
os.remove("models/" + model_name + "/config.pbtxt")
time.sleep(5) # wait for models to reload
for model_name in models:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertTrue(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Only version 3 (latest) should work...
for model_name, model_shape in zip(models_base, models_shape):
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=True,
model_version=3)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
try:
iu.infer_exact(self,
model_name,
model_shape,
1,
np.float32,
np.float32,
np.float32,
swap=False,
model_version=1)
self.assertTrue(
False,
"expected error for unavailable model " + graphdef_name)
except Exception as ex:
self.assertTrue(
ex.message().startswith("Request for unknown model"))
def test_multiple_model_repository_polling(self):
model_shape = (1, 16)
savedmodel_name = tu.get_model_name('savedmodel', np.float32,
np.float32, np.float32)
# Models should be loaded successfully and infer
# successfully. Initially savedmodel only has version 1.
self._infer_success_models([
"savedmodel",
], (1,), model_shape)
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Add the savedmodel to the second model repository, should cause
# it to be unloaded due to duplication
shutil.copytree(savedmodel_name, "models_0/" + savedmodel_name)
time.sleep(5) # wait for models to reload
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Remove the savedmodel from the first model repository, the
# model from the second model repository should be loaded
# properly. In the second model repository savedmodel should
# have versions 1 and 3.
shutil.rmtree("models/" + savedmodel_name)
time.sleep(5) # wait for model to unload
self._infer_success_models(["savedmodel", "graphdef", 'netdef'], (1, 3),
model_shape)
def test_multiple_model_repository_control(self):
# similar to test_multiple_model_repository_polling, but the
# model load/unload is controlled by the API
model_shape = (1, 16)
savedmodel_name = tu.get_model_name("savedmodel", np.float32,
np.float32, np.float32)
model_bases = ['savedmodel', "graphdef", 'netdef']
# Initially models are not loaded
for base in model_bases:
try:
model_name = tu.get_model_name(base, np.float32, np.float32,
np.float32)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Load all models, here we use GRPC
for base in model_bases:
try:
model_name = tu.get_model_name(base, np.float32, np.float32,
np.float32)
triton_client = grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)
triton_client.load_model(model_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Models should be loaded successfully and infer
# successfully. Initially savedmodel only has version 1.
self._infer_success_models([
"savedmodel",
], (1,), model_shape)
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Add the savedmodel to the second model repository. Because
# not polling this doesn't change any model state, all models
# are still loaded and available.
shutil.copytree(savedmodel_name, "models_0/" + savedmodel_name)
self._infer_success_models([
"savedmodel",
], (1,), model_shape)
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Reload savedmodel which will cause it to unload because it
# is in 2 model repositories. Use HTTP here.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load '{}'".format(savedmodel_name)))
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "1"))
self.assertFalse(
triton_client.is_model_ready(savedmodel_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models(["graphdef", 'netdef'], (1, 3), model_shape)
# Remove the savedmodel from the first model repository and
# explicitly load savedmodel. The savedmodel from the second
# model repository should be loaded properly. In the second
# model repository savedmodel should have versions 1 and 3.
shutil.rmtree("models/" + savedmodel_name)
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(savedmodel_name)
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load '{}'".format(savedmodel_name)))
self._infer_success_models(["savedmodel", "graphdef", 'netdef'], (1, 3),
model_shape)
def test_model_control(self):
model_shape = (1, 16)
onnx_name = tu.get_model_name('onnx', np.float32,
np.float32, np.float32)
ensemble_prefix = "simple_"
ensemble_name = ensemble_prefix + onnx_name
# Make sure no models are loaded
for model_name in (onnx_name, ensemble_name):
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Load non-existent model
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.load_model("unknown_model")
self.assertTrue(False, "expected unknown model failure")
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load 'unknown_model', no version is available"))
# Load ensemble model, the dependent model should be polled and loaded
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(ensemble_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (1, 3), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
# Delete model configuration for onnx, which will cause
# the autofiller to use the latest version policy so that only
# version 3 will be available if the models are re-loaded
for model_name in (onnx_name,):
os.remove("models/" + model_name + "/config.pbtxt")
self._infer_success_models([
"onnx",
], (1, 3), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
# Reload models, only version 3 should be available for onnx
for model_name in (onnx_name, ensemble_name):
try:
triton_client = grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)
triton_client.load_model(model_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
for model_name in (onnx_name,):
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload non-existing model, nothing should happen
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.unload_model("unknown_model")
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload the depending model, as side effect, the ensemble model will be
# forced to be unloaded
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
for model_name in (onnx_name, ensemble_name):
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Explicitly unload the ensemble and load the depending
# model. The ensemble model should not be reloaded because it
# was explicitly unloaded.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(ensemble_name)
triton_client.load_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(ensemble_name, "1"))
self.assertFalse(
triton_client.is_model_ready(ensemble_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_multiple_model_repository_control_startup_models(self):
model_shape = (1, 16)
onnx_name = tu.get_model_name('onnx', np.float32,
np.float32, np.float32)
plan_name = tu.get_model_name('plan', np.float32, np.float32,
np.float32)
ensemble_prefix = "simple_"
onnx_ensemble_name = ensemble_prefix + onnx_name
plan_ensemble_name = ensemble_prefix + plan_name
# Make sure unloaded models are not in the status
for base in ("netdef",):
model_name = tu.get_model_name(base, np.float32, np.float32,
np.float32)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# And loaded models work properly
self._infer_success_models([
"onnx",
], (1, 3), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
self._infer_success_models([
"plan",
], (1, 3), model_shape)
# Load non-existing model
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.load_model("unknown_model")
self.assertTrue(False, "expected unknown model failure")
except Exception as ex:
self.assertTrue(ex.message().startswith(
"failed to load 'unknown_model', no version is available"))
# Load plan ensemble model, the dependent model is already
# loaded via command-line
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.load_model(plan_ensemble_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"plan",
], (1, 3), model_shape)
self._infer_success_models([
"simple_plan",
], (1, 3),
model_shape,
swap=True)
# Delete model configuration, which will cause the autofiller
# to use the latest version policy so that only version 3 will
# be available if the models are re-loaded
os.remove("models/" + onnx_name + "/config.pbtxt")
self._infer_success_models([
"plan",
], (1, 3), model_shape)
self._infer_success_models([
"simple_plan",
], (1, 3),
model_shape,
swap=True)
# Reload onnx, only version 3 should be available
try:
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
triton_client.load_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
self._infer_success_models([
"simple_onnx",
], (1, 3),
model_shape,
swap=True)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(onnx_name, "1"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload non-existing model, nothing should happen
for triton_client in (httpclient.InferenceServerClient("localhost:8000",
verbose=True),
grpcclient.InferenceServerClient("localhost:8001",
verbose=True)):
try:
triton_client.unload_model("unknown_model")
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Unload the onnx, as side effect, the ensemble model
# will be forced to be unloaded
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
for model_name in [onnx_name, onnx_ensemble_name]:
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(model_name, "1"))
self.assertFalse(
triton_client.is_model_ready(model_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Explicitly unload the onnx ensemble and load the
# depending model. The ensemble model should not be reloaded
# because it was explicitly unloaded.
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
triton_client.unload_model(onnx_ensemble_name)
triton_client.load_model(onnx_name)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
self._infer_success_models([
"onnx",
], (3,), model_shape)
self._infer_success_models([
"plan",
], (1, 3), model_shape)
self._infer_success_models([
"simple_plan",
], (1, 3),
model_shape,
swap=True)
try:
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertFalse(
triton_client.is_model_ready(onnx_ensemble_name, "1"))
self.assertFalse(
triton_client.is_model_ready(onnx_ensemble_name, "3"))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
def test_model_repository_index(self):
# use model control EXPLIT and --load-model to load a subset of models
# in model repository
tensor_shape = (1, 16)
model_bases = ["graphdef", "savedmodel", "simple_savedmodel"]
# Sanity check on loaded models
# 3 models should be loaded:
# simple_savedmodel_float32_float32_float32
# savedmodel_float32_float32_float32
# graphdef_float32_float32_float32
for model_base in model_bases:
try:
model_name = tu.get_model_name(model_base, np.float32,
np.float32, np.float32)
for triton_client in (httpclient.InferenceServerClient(
"localhost:8000", verbose=True),
grpcclient.InferenceServerClient(
"localhost:8001", verbose=True)):
self.assertTrue(triton_client.is_server_live())
self.assertTrue(triton_client.is_server_ready())
self.assertTrue(triton_client.is_model_ready(model_name))
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
# Check model repository index
# All models should be in ready state except netdef_float32_float32_float32
# which appears in two repositories.
model_bases.append("simple_graphdef")
try:
triton_client = httpclient.InferenceServerClient("localhost:8000",
verbose=True)
index = triton_client.get_model_repository_index()
indexed = list()
self.assertEqual(len(index), 8)
for i in index:
indexed.append(i["name"])
if i["name"] == "netdef_float32_float32_float32":
self.assertEqual(i["state"], "UNAVAILABLE")
self.assertEqual(
i["reason"],
"model appears in two or more repositories")
for model_base in model_bases:
model_name = tu.get_model_name(model_base, np.float32,
np.float32, np.float32)
self.assertTrue(model_name in indexed)
triton_client = grpcclient.InferenceServerClient("localhost:8001",
verbose=True)
index = triton_client.get_model_repository_index()
indexed = list()
self.assertEqual(len(index.models), 8)
for i in index.models:
indexed.append(i.name)
if i.name == "netdef_float32_float32_float32":
self.assertEqual(i.state, "UNAVAILABLE")
self.assertEqual(
i.reason, "model appears in two or more repositories")
for model_base in model_bases:
model_name = tu.get_model_name(model_base, np.float32,
np.float32, np.float32)
self.assertTrue(model_name in indexed)
except Exception as ex:
self.assertTrue(False, "unexpected error {}".format(ex))
if __name__ == '__main__':
unittest.main()
|
en
| 0.890584
|
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # FIXME is_server_ready should be true here DLIS-1296 # self.assertTrue(triton_client.is_server_ready()) # Server was started with invalid args and # --exit-on-error=false so expect it to be running with # SERVER_FAILED_TO_INITIALIZE status. # Server is not live and not ready regardless of --strict-readiness # --strict-readiness=true so server is live but not ready # Server was started but with a model that fails to load # Inferencing with the missing model should fail. # And other models should be loaded successfully # --strict-readiness=false so server is live and ready # Server was started but with a model that fails to load # Inferencing with the missing model should fail. # And other models should be loaded successfully # Server was started but with a model that fails to be polled # expecting ready because not strict readiness # And other models should be loaded successfully # --strict-readiness=true so server is live but not ready # Server was started but with models that fail to load # one model uses sequence batcher while the other uses dynamic batcher # And other models should be loaded successfully # --strict-readiness=true so server is live but not ready # Server was started but with a model that fails to load # Sanity check that other models are loaded properly # Server was started but only version 1 is loaded # swap=False for version 1 # Make sure savedmodel model is not in the status (because # initially it is not in the model repository) # Add savedmodel model to the model repository and give it time to # load. Make sure that it has a status and is ready. # wait for model to load # Run inference on the just loaded model # Make sure savedmodel has execution stats # Remove savedmodel model from the model repository and give it # time to unload. Make sure that it is no longer available. # wait for model to unload # Model is removed so inference should fail # Add back the same model. The status/stats should be reset. # wait for model to load # Remove netdef model from the model repository and give it # time to unload. Make sure that it is unavailable. # wait for model to unload # Model is removed so inference should fail # Make sure savedmodel model is not in the status (because # initially it is not in the model repository) # Add savedmodel model to the model repository and give it time to # load. But it shouldn't load because dynamic loading is disabled. # wait for model to load # Run inference which should fail because the model isn't there # Remove one of the original models from the model repository. # Unloading is disabled so it should remain available in the status. # wait for model to unload (but it shouldn't) # Run inference to make sure model still being served even # though deleted from model repository # There are 3 versions. Make sure that all have status and are # ready. # Run inference on version 1 to make sure it is available # Make sure only version 1 has execution stats in the status. # Remove version 1 from the model repository and give it time to # unload. Make sure that it is unavailable. # wait for version to unload # Version is removed so inference should fail # Add another version to the model repository. # wait for version to load # Add a new version to the model repository and give it time to # load. But it shouldn't load because dynamic loading is # disabled. # wait for model to load # Remove one of the original versions from the model repository. # Unloading is disabled so it should remain available # in the status. # wait for version to unload (but it shouldn't) # Run inference to make sure model still being served even # though version deleted from model repository # Make sure savedmodel and plan are in the status # Run inference on the model, both versions 1 and 3 # Change the model configuration to use wrong label file # wait for models to reload # Change the model configuration to use correct label file and to have # the default version policy (so that only version 3) is available. # wait for models to reload # Attempt inferencing using version 1, should fail since # change in model policy makes that no longer available. # Version 3 should continue to work... # Make sure savedmodel and plan are in the status # Run inference on the model, both versions 1 and 3 # Delete model configuration, which cause model to be # re-loaded and use autofilled config, which means that # version policy will be latest and so only version 3 will be # available # wait for models to reload # Only version 3 (latest) should work... # Models should be loaded successfully and infer # successfully. Initially savedmodel only has version 1. # Add the savedmodel to the second model repository, should cause # it to be unloaded due to duplication # wait for models to reload # Remove the savedmodel from the first model repository, the # model from the second model repository should be loaded # properly. In the second model repository savedmodel should # have versions 1 and 3. # wait for model to unload # similar to test_multiple_model_repository_polling, but the # model load/unload is controlled by the API # Initially models are not loaded # Load all models, here we use GRPC # Models should be loaded successfully and infer # successfully. Initially savedmodel only has version 1. # Add the savedmodel to the second model repository. Because # not polling this doesn't change any model state, all models # are still loaded and available. # Reload savedmodel which will cause it to unload because it # is in 2 model repositories. Use HTTP here. # Remove the savedmodel from the first model repository and # explicitly load savedmodel. The savedmodel from the second # model repository should be loaded properly. In the second # model repository savedmodel should have versions 1 and 3. # Make sure no models are loaded # Load non-existent model # Load ensemble model, the dependent model should be polled and loaded # Delete model configuration for onnx, which will cause # the autofiller to use the latest version policy so that only # version 3 will be available if the models are re-loaded # Reload models, only version 3 should be available for onnx # Unload non-existing model, nothing should happen # Unload the depending model, as side effect, the ensemble model will be # forced to be unloaded # Explicitly unload the ensemble and load the depending # model. The ensemble model should not be reloaded because it # was explicitly unloaded. # Make sure unloaded models are not in the status # And loaded models work properly # Load non-existing model # Load plan ensemble model, the dependent model is already # loaded via command-line # Delete model configuration, which will cause the autofiller # to use the latest version policy so that only version 3 will # be available if the models are re-loaded # Reload onnx, only version 3 should be available # Unload non-existing model, nothing should happen # Unload the onnx, as side effect, the ensemble model # will be forced to be unloaded # Explicitly unload the onnx ensemble and load the # depending model. The ensemble model should not be reloaded # because it was explicitly unloaded. # use model control EXPLIT and --load-model to load a subset of models # in model repository # Sanity check on loaded models # 3 models should be loaded: # simple_savedmodel_float32_float32_float32 # savedmodel_float32_float32_float32 # graphdef_float32_float32_float32 # Check model repository index # All models should be in ready state except netdef_float32_float32_float32 # which appears in two repositories.
| 1.525813
| 2
|
rlbox/rand/sampler.py
|
ocraft/rl-sandbox
| 2
|
6626037
|
<gh_stars>1-10
import itertools
import numpy as np
SAMPLER_CACHE = 10000
def cache_gen(source):
values = source()
while True:
for value in values:
yield value
values = source()
class Sampler:
"""Provides precomputed random samples of various distribution."""
randn_gen = cache_gen(lambda: np.random.standard_normal(SAMPLER_CACHE))
rand_gen = cache_gen(lambda: np.random.random(SAMPLER_CACHE))
@classmethod
def standard_normal(cls, size=1):
return list(itertools.islice(cls.randn_gen, size))
@classmethod
def randn(cls):
return next(cls.randn_gen)
@classmethod
def rand(cls):
return next(cls.rand_gen)
@classmethod
def rint(cls, max_exclusive):
return np.random.randint(max_exclusive)
|
import itertools
import numpy as np
SAMPLER_CACHE = 10000
def cache_gen(source):
values = source()
while True:
for value in values:
yield value
values = source()
class Sampler:
"""Provides precomputed random samples of various distribution."""
randn_gen = cache_gen(lambda: np.random.standard_normal(SAMPLER_CACHE))
rand_gen = cache_gen(lambda: np.random.random(SAMPLER_CACHE))
@classmethod
def standard_normal(cls, size=1):
return list(itertools.islice(cls.randn_gen, size))
@classmethod
def randn(cls):
return next(cls.randn_gen)
@classmethod
def rand(cls):
return next(cls.rand_gen)
@classmethod
def rint(cls, max_exclusive):
return np.random.randint(max_exclusive)
|
en
| 0.873309
|
Provides precomputed random samples of various distribution.
| 3.081449
| 3
|
mars/tensor/expressions/fuse/core.py
|
lmatz/mars
| 1
|
6626038
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .... import operands
from ....tiles import NotSupportTile
from ..core import TensorOperandMixin
class TensorFuseChunk(operands.Fuse, TensorOperandMixin):
def __init__(self, dtype=None, **kw):
super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw)
@classmethod
def tile(cls, op):
raise NotSupportTile('FetchChunk is a chunk operand which does not support tile')
class TensorFuseChunkMixin(TensorOperandMixin):
__slots__ = ()
@classmethod
def tile(cls, op):
raise NotSupportTile('FetchChunk is a chunk operand which does not support tile')
def __call__(self, fuse_chunks):
head_chunk = fuse_chunks[0]
tail_chunk = fuse_chunks[-1]
setattr(self, '_operands', [c.op for c in fuse_chunks])
return self.new_chunk(head_chunk.inputs, tail_chunk.shape,
_composed=fuse_chunks, _key=tail_chunk.key)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .... import operands
from ....tiles import NotSupportTile
from ..core import TensorOperandMixin
class TensorFuseChunk(operands.Fuse, TensorOperandMixin):
def __init__(self, dtype=None, **kw):
super(TensorFuseChunk, self).__init__(_dtype=dtype, **kw)
@classmethod
def tile(cls, op):
raise NotSupportTile('FetchChunk is a chunk operand which does not support tile')
class TensorFuseChunkMixin(TensorOperandMixin):
__slots__ = ()
@classmethod
def tile(cls, op):
raise NotSupportTile('FetchChunk is a chunk operand which does not support tile')
def __call__(self, fuse_chunks):
head_chunk = fuse_chunks[0]
tail_chunk = fuse_chunks[-1]
setattr(self, '_operands', [c.op for c in fuse_chunks])
return self.new_chunk(head_chunk.inputs, tail_chunk.shape,
_composed=fuse_chunks, _key=tail_chunk.key)
|
en
| 0.820834
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2018 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
| 2.08458
| 2
|
sqlite_dbint/field_formatter.py
|
acollu/sqlite_dbint
| 0
|
6626039
|
from .errors import InvalidConditionFormat, InvalidConditionSequence, InvalidLogicalOperator, InvalidComparisonMember, InvalidConditionalOperator
class FieldFormatter:
comparison_operators = ["=", "<>", ">", "<", ">=", "<="] # equal, not equal, higher, lower, higher equal, lower equal
logical_operators = ["AND", "OR"]
@staticmethod
def format_table_name(table_name):
if isinstance(table_name, str):
return "`" + table_name + "`"
else:
TypeError
@staticmethod
def format_attribute(attribute):
if isinstance(attribute, str):
return "`" + attribute + "`"
else:
TypeError
@staticmethod
def format_attributes(attributes):
if attributes is all:
return "*"
elif isinstance(attributes, list):
return ", ".join([FieldFormatter.format_attribute(attribute) for attribute in attributes])
else:
TypeError
@staticmethod
def format_value(value):
if isinstance(value, int):
return str(value)
elif isinstance(value, float):
return str(value)
elif isinstance(value, str):
return '"' + value + '"'
else:
raise TypeError
@staticmethod
def format_values(values):
return ", ".join([self.format_value(value) for value in values])
@staticmethod
def format_condition(condition):
if condition is None:
return ""
elif isinstance(condition, list):
formatted_condition = "WHERE "
for i, member in enumerate(condition):
if i % 2:
if i + 1 == len(condition):
raise InvalidConditionSequence
if member not in FieldFormatter.logical_operators:
raise InvalidLogicalOperator
formatted_condition += " " + member + " "
else:
if len(member) != 3:
raise InvalidComparisonMember
if member[1] not in FieldFormatter.comparison_operators:
raise InvalidConditionalOperator
member = list(member)
member[0] = FieldFormatter.format_attribute(member[0])
member[2] = FieldFormatter.format_value(member[2])
formatted_condition += " ".join(member)
return formatted_condition
else:
raise InvalidConditionFormat
@staticmethod
def format_order(order_attributes, order_type):
if order_attributes is None:
return ""
elif isinstance(order_attributes, list):
return "ORDER BY " + self.format_attributes(order_attributes) + " " + order_type
else:
raise TypeError
|
from .errors import InvalidConditionFormat, InvalidConditionSequence, InvalidLogicalOperator, InvalidComparisonMember, InvalidConditionalOperator
class FieldFormatter:
comparison_operators = ["=", "<>", ">", "<", ">=", "<="] # equal, not equal, higher, lower, higher equal, lower equal
logical_operators = ["AND", "OR"]
@staticmethod
def format_table_name(table_name):
if isinstance(table_name, str):
return "`" + table_name + "`"
else:
TypeError
@staticmethod
def format_attribute(attribute):
if isinstance(attribute, str):
return "`" + attribute + "`"
else:
TypeError
@staticmethod
def format_attributes(attributes):
if attributes is all:
return "*"
elif isinstance(attributes, list):
return ", ".join([FieldFormatter.format_attribute(attribute) for attribute in attributes])
else:
TypeError
@staticmethod
def format_value(value):
if isinstance(value, int):
return str(value)
elif isinstance(value, float):
return str(value)
elif isinstance(value, str):
return '"' + value + '"'
else:
raise TypeError
@staticmethod
def format_values(values):
return ", ".join([self.format_value(value) for value in values])
@staticmethod
def format_condition(condition):
if condition is None:
return ""
elif isinstance(condition, list):
formatted_condition = "WHERE "
for i, member in enumerate(condition):
if i % 2:
if i + 1 == len(condition):
raise InvalidConditionSequence
if member not in FieldFormatter.logical_operators:
raise InvalidLogicalOperator
formatted_condition += " " + member + " "
else:
if len(member) != 3:
raise InvalidComparisonMember
if member[1] not in FieldFormatter.comparison_operators:
raise InvalidConditionalOperator
member = list(member)
member[0] = FieldFormatter.format_attribute(member[0])
member[2] = FieldFormatter.format_value(member[2])
formatted_condition += " ".join(member)
return formatted_condition
else:
raise InvalidConditionFormat
@staticmethod
def format_order(order_attributes, order_type):
if order_attributes is None:
return ""
elif isinstance(order_attributes, list):
return "ORDER BY " + self.format_attributes(order_attributes) + " " + order_type
else:
raise TypeError
|
en
| 0.757168
|
# equal, not equal, higher, lower, higher equal, lower equal
| 2.901255
| 3
|
bot/utils/paginator.py
|
fwizpy/Tortoise-BOT
| 1
|
6626040
|
from typing import List, Union
from asyncio import TimeoutError
from discord.abc import Messageable
from discord import ClientUser, User, Member, HTTPException
from discord.ext import commands
from bot.utils.embed_handler import info
class Paginator:
ARROW_TO_BEGINNING = "⏪"
ARROW_BACKWARD = "◀"
ARROW_FORWARD = "▶"
ARROW_TO_END = "⏩"
PAGINATION_EMOJIS = (ARROW_TO_BEGINNING, ARROW_BACKWARD, ARROW_FORWARD, ARROW_TO_END)
def __init__(
self,
*,
page_size: int = 2000,
separator: str = "\n",
timeout: int = 120,
prefix: str = "",
suffix: str = ""
):
"""
:param page_size: Maximum page string size for the page content.
:param separator: Separator used to break large chunks of content to smaller ones, if needed.
:param timeout: How long will the reactions be awaited for.
:param prefix: Prefix for the message content.
:param suffix: Suffix for the message content.
"""
self._separator = separator
self._timeout = timeout
self._prefix = prefix
self._suffix = suffix
self._message = None
self._page_index = 0
self._content = []
self._pages = []
self._max_page_size = page_size - len(self.prefix) - len(self.suffix)
def _make_pages(self) -> List[str]:
pages = []
chunks = self.content.split(self._separator)
self.break_long_entries(chunks, self._max_page_size)
temp_page = []
for entry in chunks:
# len(temp_chunk) is because we'll add separators in join
if sum(map(len, temp_page)) + len(entry) + len(temp_page) >= self._max_page_size:
pages.append(self._separator.join(temp_page))
temp_page = [entry]
else:
temp_page.append(entry)
# For leftovers
pages.append(self._separator.join(temp_page))
return pages
@staticmethod
def break_long_entries(chunk_list: List[str], max_chunk_size: int):
"""
We further break down chunk_list in case any of the entries are larger than max_chunk_size.
Modifies passed list in place!
Will throw RecursionError if the string length in list is mega-huge.
Basically when the entry is found just split it in half and re-add it in list without breaking order.
Split in half will be done as many times as needed as long as resulting entry is larger than max_chunk_size
:param chunk_list: list of strings
:param max_chunk_size: integer, if chunk is larger that this we break it down
"""
for i, entry in enumerate(chunk_list):
if len(entry) > max_chunk_size:
# Split string in 2 parts by the middle
f, s = entry[:len(entry) // 2], entry[len(entry) // 2:]
# Append them back to our list, not breaking order
chunk_list[i] = s
chunk_list.insert(i, f)
# Keep doing that until there is no entries that are larger in length than max_msg_size
Paginator.break_long_entries(chunk_list, max_chunk_size)
break
async def start(self, destination: Messageable, author: Union[User, Member], bot_reference):
self._pages = self._make_pages()
await self.create_message(destination)
if len(self._pages) > 1:
# No need to paginate if there are no pages.
await self._add_all_reactions()
await self._start_listener(author, bot_reference)
def close_page(self):
# Just to condone to standard paginator
pass
@property
def prefix(self) -> str:
return self._prefix
@property
def suffix(self) -> str:
return f"{self._get_page_counter_message()}{self._suffix}"
@property
def max_size(self) -> int:
return self._max_page_size
@property
def pages(self) -> List[str]:
return self._pages
@property
def content(self) -> str:
return "".join(self._content)
def clear(self):
self._pages = []
self._page_index = 0
def add_line(self, line: str = "", **kwargs):
self._content.append(line)
def _get_page_counter_message(self) -> str:
return f"\n\nPage[{self._page_index + 1:<2}/{len(self._pages):<2}]"
async def _add_all_reactions(self):
for emoji in self.PAGINATION_EMOJIS:
await self._message.add_reaction(emoji)
async def clear_all_reactions(self):
try:
await self._message.clear_reactions()
except HTTPException:
# Silently ignore if no permission to remove reaction.
pass
async def create_message(self, destination: Messageable) -> None:
self._message = await destination.send(self.get_message_content())
async def update_message(self) -> None:
await self._message.edit(content=self.get_message_content())
def get_message_content(self) -> str:
return f"{self.prefix}{self._pages[self._page_index]}{self.suffix}"
async def _remove_reaction(self, reaction, author: Union[User, Member]):
try:
await self._message.remove_reaction(reaction, author)
except HTTPException:
# Silently ignore if no permission to remove reaction. (example DM)
pass
async def _start_listener(self, author: Union[User, Member], bot_reference):
def react_check(reaction_, user_):
return (
str(reaction_) in self.PAGINATION_EMOJIS and
user_.id == author.id and
reaction_.message.id == self._message.id
)
while True:
try:
reaction, user = await bot_reference.wait_for("reaction_add", check=react_check, timeout=self._timeout)
except TimeoutError:
await self.clear_all_reactions()
break
if str(reaction) == self.ARROW_TO_BEGINNING:
await self._remove_reaction(self.ARROW_TO_BEGINNING, author)
if self._page_index > 0:
self._page_index = 0
await self.update_message()
elif str(reaction) == self.ARROW_BACKWARD:
await self._remove_reaction(self.ARROW_BACKWARD, author)
if self._page_index > 0:
self._page_index -= 1
await self.update_message()
elif str(reaction) == self.ARROW_FORWARD:
await self._remove_reaction(self.ARROW_FORWARD, author)
if self._page_index < len(self._pages) - 1:
self._page_index += 1
await self.update_message()
elif str(reaction) == self.ARROW_TO_END:
await self._remove_reaction(self.ARROW_TO_END, author)
if self._page_index < len(self._pages) - 1:
self._page_index = len(self._pages) - 1
await self.update_message()
class EmbedPaginator(Paginator):
def __init__(self, embed_title: str = "", *args, **kwargs):
super().__init__(*args, **kwargs)
self._embed_title = embed_title
@classmethod
def _get_bot_member_from_destination(cls, destination: Messageable) -> Union[Member, ClientUser]:
try:
# noinspection PyUnresolvedReferences
return destination.guild.me
except AttributeError:
# noinspection PyUnresolvedReferences
return destination.me
async def create_message(self, destination) -> None:
self._message = await destination.send(
embed=info(
self.get_message_content(),
self._get_bot_member_from_destination(destination),
title=self._embed_title
)
)
async def update_message(self):
await self._message.edit(
embed=info(
self.get_message_content(),
self._get_bot_member_from_destination(self._message.channel),
title=self._embed_title
)
)
class ListPaginator:
"""Constructs a Paginator when provided a list of Embeds/Messages"""
def __init__(
self, ctx: commands.Context, page_list,
restart_button="⏮",
back_button="◀",
forward_button="⏭",
next_button="▶",
pause_button="⏸",
stop_button="⏹"
):
self.pages = page_list
self.ctx = ctx
self.bot = ctx.bot
self.restart_button = restart_button
self.back_button = back_button
self.pause_button = pause_button
self.forward_button = forward_button
self.next_button = next_button
self.stop_button = stop_button
def get_next_page(self, page):
pages = self.pages
if page != pages[-1]:
current_page_index = pages.index(page)
next_page = pages[current_page_index+1]
return next_page
return pages[-1]
def get_prev_page(self, page):
pages = self.pages
if page != pages[0]:
current_page_index = pages.index(page)
next_page = pages[current_page_index-1]
return next_page
return pages[0]
async def start(self):
pages = self.pages
ctx = self.ctx
embed = pages[0]
msg = await ctx.send(embed=embed)
emote_list = [self.restart_button, self.back_button, self.pause_button,
self.next_button, self.forward_button, self.stop_button]
for emote in emote_list:
await msg.add_reaction(emote)
def check(_reaction, _user):
return _user == ctx.author and str(_reaction.emoji) in emote_list and _reaction.message == msg
current_page = embed
try:
while True:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)
if str(reaction.emoji) == self.restart_button:
await msg.edit(embed=pages[0])
current_page = pages[0]
await msg.remove_reaction(self.restart_button, ctx.author)
elif str(reaction.emoji) == self.forward_button:
await msg.edit(embed=pages[-1])
current_page = pages[-1]
await msg.remove_reaction(self.forward_button, ctx.author)
elif str(reaction.emoji) == self.next_button:
next_page = self.get_next_page(current_page)
await msg.edit(embed=self.get_next_page(current_page))
current_page = next_page
await msg.remove_reaction(self.next_button, ctx.author)
elif str(reaction.emoji) == self.pause_button:
await msg.clear_reactions()
break
elif str(reaction.emoji) == self.stop_button:
await msg.delete()
break
elif str(reaction.emoji) == self.back_button:
prev_page = self.get_prev_page(current_page)
await msg.edit(embed=prev_page)
current_page = prev_page
await msg.remove_reaction(self.back_button, ctx.author)
except TimeoutError:
await msg.clear_reactions()
|
from typing import List, Union
from asyncio import TimeoutError
from discord.abc import Messageable
from discord import ClientUser, User, Member, HTTPException
from discord.ext import commands
from bot.utils.embed_handler import info
class Paginator:
ARROW_TO_BEGINNING = "⏪"
ARROW_BACKWARD = "◀"
ARROW_FORWARD = "▶"
ARROW_TO_END = "⏩"
PAGINATION_EMOJIS = (ARROW_TO_BEGINNING, ARROW_BACKWARD, ARROW_FORWARD, ARROW_TO_END)
def __init__(
self,
*,
page_size: int = 2000,
separator: str = "\n",
timeout: int = 120,
prefix: str = "",
suffix: str = ""
):
"""
:param page_size: Maximum page string size for the page content.
:param separator: Separator used to break large chunks of content to smaller ones, if needed.
:param timeout: How long will the reactions be awaited for.
:param prefix: Prefix for the message content.
:param suffix: Suffix for the message content.
"""
self._separator = separator
self._timeout = timeout
self._prefix = prefix
self._suffix = suffix
self._message = None
self._page_index = 0
self._content = []
self._pages = []
self._max_page_size = page_size - len(self.prefix) - len(self.suffix)
def _make_pages(self) -> List[str]:
pages = []
chunks = self.content.split(self._separator)
self.break_long_entries(chunks, self._max_page_size)
temp_page = []
for entry in chunks:
# len(temp_chunk) is because we'll add separators in join
if sum(map(len, temp_page)) + len(entry) + len(temp_page) >= self._max_page_size:
pages.append(self._separator.join(temp_page))
temp_page = [entry]
else:
temp_page.append(entry)
# For leftovers
pages.append(self._separator.join(temp_page))
return pages
@staticmethod
def break_long_entries(chunk_list: List[str], max_chunk_size: int):
"""
We further break down chunk_list in case any of the entries are larger than max_chunk_size.
Modifies passed list in place!
Will throw RecursionError if the string length in list is mega-huge.
Basically when the entry is found just split it in half and re-add it in list without breaking order.
Split in half will be done as many times as needed as long as resulting entry is larger than max_chunk_size
:param chunk_list: list of strings
:param max_chunk_size: integer, if chunk is larger that this we break it down
"""
for i, entry in enumerate(chunk_list):
if len(entry) > max_chunk_size:
# Split string in 2 parts by the middle
f, s = entry[:len(entry) // 2], entry[len(entry) // 2:]
# Append them back to our list, not breaking order
chunk_list[i] = s
chunk_list.insert(i, f)
# Keep doing that until there is no entries that are larger in length than max_msg_size
Paginator.break_long_entries(chunk_list, max_chunk_size)
break
async def start(self, destination: Messageable, author: Union[User, Member], bot_reference):
self._pages = self._make_pages()
await self.create_message(destination)
if len(self._pages) > 1:
# No need to paginate if there are no pages.
await self._add_all_reactions()
await self._start_listener(author, bot_reference)
def close_page(self):
# Just to condone to standard paginator
pass
@property
def prefix(self) -> str:
return self._prefix
@property
def suffix(self) -> str:
return f"{self._get_page_counter_message()}{self._suffix}"
@property
def max_size(self) -> int:
return self._max_page_size
@property
def pages(self) -> List[str]:
return self._pages
@property
def content(self) -> str:
return "".join(self._content)
def clear(self):
self._pages = []
self._page_index = 0
def add_line(self, line: str = "", **kwargs):
self._content.append(line)
def _get_page_counter_message(self) -> str:
return f"\n\nPage[{self._page_index + 1:<2}/{len(self._pages):<2}]"
async def _add_all_reactions(self):
for emoji in self.PAGINATION_EMOJIS:
await self._message.add_reaction(emoji)
async def clear_all_reactions(self):
try:
await self._message.clear_reactions()
except HTTPException:
# Silently ignore if no permission to remove reaction.
pass
async def create_message(self, destination: Messageable) -> None:
self._message = await destination.send(self.get_message_content())
async def update_message(self) -> None:
await self._message.edit(content=self.get_message_content())
def get_message_content(self) -> str:
return f"{self.prefix}{self._pages[self._page_index]}{self.suffix}"
async def _remove_reaction(self, reaction, author: Union[User, Member]):
try:
await self._message.remove_reaction(reaction, author)
except HTTPException:
# Silently ignore if no permission to remove reaction. (example DM)
pass
async def _start_listener(self, author: Union[User, Member], bot_reference):
def react_check(reaction_, user_):
return (
str(reaction_) in self.PAGINATION_EMOJIS and
user_.id == author.id and
reaction_.message.id == self._message.id
)
while True:
try:
reaction, user = await bot_reference.wait_for("reaction_add", check=react_check, timeout=self._timeout)
except TimeoutError:
await self.clear_all_reactions()
break
if str(reaction) == self.ARROW_TO_BEGINNING:
await self._remove_reaction(self.ARROW_TO_BEGINNING, author)
if self._page_index > 0:
self._page_index = 0
await self.update_message()
elif str(reaction) == self.ARROW_BACKWARD:
await self._remove_reaction(self.ARROW_BACKWARD, author)
if self._page_index > 0:
self._page_index -= 1
await self.update_message()
elif str(reaction) == self.ARROW_FORWARD:
await self._remove_reaction(self.ARROW_FORWARD, author)
if self._page_index < len(self._pages) - 1:
self._page_index += 1
await self.update_message()
elif str(reaction) == self.ARROW_TO_END:
await self._remove_reaction(self.ARROW_TO_END, author)
if self._page_index < len(self._pages) - 1:
self._page_index = len(self._pages) - 1
await self.update_message()
class EmbedPaginator(Paginator):
def __init__(self, embed_title: str = "", *args, **kwargs):
super().__init__(*args, **kwargs)
self._embed_title = embed_title
@classmethod
def _get_bot_member_from_destination(cls, destination: Messageable) -> Union[Member, ClientUser]:
try:
# noinspection PyUnresolvedReferences
return destination.guild.me
except AttributeError:
# noinspection PyUnresolvedReferences
return destination.me
async def create_message(self, destination) -> None:
self._message = await destination.send(
embed=info(
self.get_message_content(),
self._get_bot_member_from_destination(destination),
title=self._embed_title
)
)
async def update_message(self):
await self._message.edit(
embed=info(
self.get_message_content(),
self._get_bot_member_from_destination(self._message.channel),
title=self._embed_title
)
)
class ListPaginator:
"""Constructs a Paginator when provided a list of Embeds/Messages"""
def __init__(
self, ctx: commands.Context, page_list,
restart_button="⏮",
back_button="◀",
forward_button="⏭",
next_button="▶",
pause_button="⏸",
stop_button="⏹"
):
self.pages = page_list
self.ctx = ctx
self.bot = ctx.bot
self.restart_button = restart_button
self.back_button = back_button
self.pause_button = pause_button
self.forward_button = forward_button
self.next_button = next_button
self.stop_button = stop_button
def get_next_page(self, page):
pages = self.pages
if page != pages[-1]:
current_page_index = pages.index(page)
next_page = pages[current_page_index+1]
return next_page
return pages[-1]
def get_prev_page(self, page):
pages = self.pages
if page != pages[0]:
current_page_index = pages.index(page)
next_page = pages[current_page_index-1]
return next_page
return pages[0]
async def start(self):
pages = self.pages
ctx = self.ctx
embed = pages[0]
msg = await ctx.send(embed=embed)
emote_list = [self.restart_button, self.back_button, self.pause_button,
self.next_button, self.forward_button, self.stop_button]
for emote in emote_list:
await msg.add_reaction(emote)
def check(_reaction, _user):
return _user == ctx.author and str(_reaction.emoji) in emote_list and _reaction.message == msg
current_page = embed
try:
while True:
reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)
if str(reaction.emoji) == self.restart_button:
await msg.edit(embed=pages[0])
current_page = pages[0]
await msg.remove_reaction(self.restart_button, ctx.author)
elif str(reaction.emoji) == self.forward_button:
await msg.edit(embed=pages[-1])
current_page = pages[-1]
await msg.remove_reaction(self.forward_button, ctx.author)
elif str(reaction.emoji) == self.next_button:
next_page = self.get_next_page(current_page)
await msg.edit(embed=self.get_next_page(current_page))
current_page = next_page
await msg.remove_reaction(self.next_button, ctx.author)
elif str(reaction.emoji) == self.pause_button:
await msg.clear_reactions()
break
elif str(reaction.emoji) == self.stop_button:
await msg.delete()
break
elif str(reaction.emoji) == self.back_button:
prev_page = self.get_prev_page(current_page)
await msg.edit(embed=prev_page)
current_page = prev_page
await msg.remove_reaction(self.back_button, ctx.author)
except TimeoutError:
await msg.clear_reactions()
|
en
| 0.819126
|
:param page_size: Maximum page string size for the page content. :param separator: Separator used to break large chunks of content to smaller ones, if needed. :param timeout: How long will the reactions be awaited for. :param prefix: Prefix for the message content. :param suffix: Suffix for the message content. # len(temp_chunk) is because we'll add separators in join # For leftovers We further break down chunk_list in case any of the entries are larger than max_chunk_size. Modifies passed list in place! Will throw RecursionError if the string length in list is mega-huge. Basically when the entry is found just split it in half and re-add it in list without breaking order. Split in half will be done as many times as needed as long as resulting entry is larger than max_chunk_size :param chunk_list: list of strings :param max_chunk_size: integer, if chunk is larger that this we break it down # Split string in 2 parts by the middle # Append them back to our list, not breaking order # Keep doing that until there is no entries that are larger in length than max_msg_size # No need to paginate if there are no pages. # Just to condone to standard paginator # Silently ignore if no permission to remove reaction. # Silently ignore if no permission to remove reaction. (example DM) # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences Constructs a Paginator when provided a list of Embeds/Messages
| 2.846509
| 3
|
tasks/Scrapy/scrapy_official_newspapers/spiders/mexicoDOF.py
|
rongfang323/policy-data-analyzer
| 0
|
6626041
|
<filename>tasks/Scrapy/scrapy_official_newspapers/spiders/mexicoDOF.py
from scrapy_official_newspapers.spiders import BaseSpider
from scrapy import Request
from scrapy.selector import Selector
from scrapy_official_newspapers.items import ScrapyOfficialNewspapersItem
import time
import json
import re
import datetime
from dateutil.rrule import rrule, DAILY
class MexicoDOF(BaseSpider):
name = "MexicoDOF"
country = "Mexico"
geo_code = "MEX-000-00000-0000000"
level = "0"
source = "Diario Oficial de la Federacion"
title = "None"
url = "https://dof.gob.mx"
years = [year for year in range(2018, 2020)]
collector = "<NAME>"
scrapper_name = "<NAME>"
scrapable = "True"
allowed_domains = ["dof.gob.mx"]
doc_name = None
doc_type = 'HTML'
with open('./keywords_knowledge_domain.json', 'r') as dict:
keyword_dict = json.load(dict)
with open('./negative_keywords_knowledge_domain.json', 'r') as dict:
negative_keyword_dict = json.load(dict)
def __init__(self, date = datetime.datetime(2020,9,1)):
if type(date) == str:
try:
self.from_date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
except:
self.from_date = datetime.datetime.strptime(date, '%d-%m-%Y').date()
else:
self.from_date = date.date()
self.today = datetime.date.today()
def create_url_DOF_list(self):
URLs = []
for dt in rrule(DAILY, dtstart=self.from_date, until=self.today):
url = self.url + f"/index_113.php?year=" + self.add_leading_zero_two_digits(
dt.year) + "&month=" + self.add_leading_zero_two_digits(
dt.month) + "&day=" + self.add_leading_zero_two_digits(dt.day)
URLs.append(url)
return URLs
def start_requests(self):
for url in self.create_url_DOF_list():
yield Request(url, dont_filter=True)
def parse(self, response):
if len(response.xpath("//*[contains(text(), 'No hay datos para la fecha')]")):
print("No publication in this date")
pass
else:
url = response.url
year = int(url.split("=")[1][:4])
month = int(url.split("=")[2][:2])
day = int(url.split("=")[3][:2])
date = datetime.datetime(year=year,month=month,day=day)
item = ScrapyOfficialNewspapersItem()
trs = response.xpath('/html//td[@class = "subtitle_azul"]')[0].xpath('//tr').xpath('following-sibling::tr[1]')
authorship = None
for tr in trs:
authorship_new = tr.xpath('td[@class = "subtitle_azul"]/text()').get()
resume_aux = tr.xpath('td/a[@class = "enlaces"]/text()').get()
url_aux = tr.xpath('td/a[@class = "enlaces"]/@href').get()
if authorship != authorship_new and authorship_new != None:
authorship = authorship_new
if resume_aux and resume_aux != "Ver más":
resume = resume_aux.replace('\t', '').replace('\n', '')
if self.search_keywords(resume, self.keyword_dict, self.negative_keyword_dict):
doc_url = self.url + url_aux + "&print=true"
reference = doc_url.split("codigo=")[1][:7]
item['country'] = self.country
item['geo_code'] = self.geo_code
item['level'] = self.level
item['data_source'] = self.source
item['title'] = resume
item['reference'] = reference
item['authorship'] = str(authorship)
item['resume'] = resume
item['publication_date'] = date
item['enforcement_date'] = date
item['url'] = self.url
item['doc_url'] = doc_url
item['doc_name'] = reference+'html'
item['doc_type'] = self.doc_type
item['doc_class'] = ''
item['file_urls'] = [doc_url]
yield item
|
<filename>tasks/Scrapy/scrapy_official_newspapers/spiders/mexicoDOF.py
from scrapy_official_newspapers.spiders import BaseSpider
from scrapy import Request
from scrapy.selector import Selector
from scrapy_official_newspapers.items import ScrapyOfficialNewspapersItem
import time
import json
import re
import datetime
from dateutil.rrule import rrule, DAILY
class MexicoDOF(BaseSpider):
name = "MexicoDOF"
country = "Mexico"
geo_code = "MEX-000-00000-0000000"
level = "0"
source = "Diario Oficial de la Federacion"
title = "None"
url = "https://dof.gob.mx"
years = [year for year in range(2018, 2020)]
collector = "<NAME>"
scrapper_name = "<NAME>"
scrapable = "True"
allowed_domains = ["dof.gob.mx"]
doc_name = None
doc_type = 'HTML'
with open('./keywords_knowledge_domain.json', 'r') as dict:
keyword_dict = json.load(dict)
with open('./negative_keywords_knowledge_domain.json', 'r') as dict:
negative_keyword_dict = json.load(dict)
def __init__(self, date = datetime.datetime(2020,9,1)):
if type(date) == str:
try:
self.from_date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
except:
self.from_date = datetime.datetime.strptime(date, '%d-%m-%Y').date()
else:
self.from_date = date.date()
self.today = datetime.date.today()
def create_url_DOF_list(self):
URLs = []
for dt in rrule(DAILY, dtstart=self.from_date, until=self.today):
url = self.url + f"/index_113.php?year=" + self.add_leading_zero_two_digits(
dt.year) + "&month=" + self.add_leading_zero_two_digits(
dt.month) + "&day=" + self.add_leading_zero_two_digits(dt.day)
URLs.append(url)
return URLs
def start_requests(self):
for url in self.create_url_DOF_list():
yield Request(url, dont_filter=True)
def parse(self, response):
if len(response.xpath("//*[contains(text(), 'No hay datos para la fecha')]")):
print("No publication in this date")
pass
else:
url = response.url
year = int(url.split("=")[1][:4])
month = int(url.split("=")[2][:2])
day = int(url.split("=")[3][:2])
date = datetime.datetime(year=year,month=month,day=day)
item = ScrapyOfficialNewspapersItem()
trs = response.xpath('/html//td[@class = "subtitle_azul"]')[0].xpath('//tr').xpath('following-sibling::tr[1]')
authorship = None
for tr in trs:
authorship_new = tr.xpath('td[@class = "subtitle_azul"]/text()').get()
resume_aux = tr.xpath('td/a[@class = "enlaces"]/text()').get()
url_aux = tr.xpath('td/a[@class = "enlaces"]/@href').get()
if authorship != authorship_new and authorship_new != None:
authorship = authorship_new
if resume_aux and resume_aux != "Ver más":
resume = resume_aux.replace('\t', '').replace('\n', '')
if self.search_keywords(resume, self.keyword_dict, self.negative_keyword_dict):
doc_url = self.url + url_aux + "&print=true"
reference = doc_url.split("codigo=")[1][:7]
item['country'] = self.country
item['geo_code'] = self.geo_code
item['level'] = self.level
item['data_source'] = self.source
item['title'] = resume
item['reference'] = reference
item['authorship'] = str(authorship)
item['resume'] = resume
item['publication_date'] = date
item['enforcement_date'] = date
item['url'] = self.url
item['doc_url'] = doc_url
item['doc_name'] = reference+'html'
item['doc_type'] = self.doc_type
item['doc_class'] = ''
item['file_urls'] = [doc_url]
yield item
|
none
| 1
| 2.712191
| 3
|
|
avwx/service/base.py
|
mralext20/avwx-engine
| 0
|
6626042
|
<gh_stars>0
"""
Service base class
"""
# pylint: disable=too-few-public-methods
# stdlib
from socket import gaierror
from typing import Any, Tuple
# library
import httpx
import httpcore
# module
from avwx.exceptions import SourceError
_VALUE_ERROR = "'{}' is not a valid report type for {}. Expected {}"
class Service:
"""Base Service class for fetching reports"""
url: str = None
report_type: str
_valid_types: Tuple[str] = tuple()
def __init__(self, report_type: str):
if self._valid_types:
if report_type not in self._valid_types:
raise ValueError(
_VALUE_ERROR.format(
report_type, self.__class__.__name__, self._valid_types
)
)
self.report_type = report_type
def fetch(self, station: str, timeout: int = 10) -> str:
"""Fetches a report string from the service"""
raise NotImplementedError()
async def async_fetch(self, station: str, timeout: int = 10) -> str:
"""Asynchronously fetch a report string from the service"""
raise NotImplementedError()
class CallsHTTP:
"""Service supporting HTTP requests"""
method: str = "GET"
async def _call(
self,
url: str,
params: dict = None,
headers: dict = None,
data: Any = None,
timeout: int = 10,
) -> str:
name = self.__class__.__name__
try:
async with httpx.AsyncClient(timeout=timeout) as client:
if self.method.lower() == "post":
resp = await client.post(
url, params=params, headers=headers, data=data
)
else:
resp = await client.get(url, params=params, headers=headers)
if resp.status_code != 200:
raise SourceError(f"{name} server returned {resp.status_code}")
except (
httpx.ConnectTimeout,
httpx.ReadTimeout,
httpcore.ReadTimeout,
) as timeout_error:
raise TimeoutError(f"Timeout from {name} server") from timeout_error
except (gaierror, httpcore.ConnectError, httpx.ConnectError) as connect_error:
raise ConnectionError(
f"Unable to connect to {name} server"
) from connect_error
except httpcore.NetworkError as network_error:
raise ConnectionError(
f"Unable to read data from {name} server"
) from network_error
return resp.text
|
"""
Service base class
"""
# pylint: disable=too-few-public-methods
# stdlib
from socket import gaierror
from typing import Any, Tuple
# library
import httpx
import httpcore
# module
from avwx.exceptions import SourceError
_VALUE_ERROR = "'{}' is not a valid report type for {}. Expected {}"
class Service:
"""Base Service class for fetching reports"""
url: str = None
report_type: str
_valid_types: Tuple[str] = tuple()
def __init__(self, report_type: str):
if self._valid_types:
if report_type not in self._valid_types:
raise ValueError(
_VALUE_ERROR.format(
report_type, self.__class__.__name__, self._valid_types
)
)
self.report_type = report_type
def fetch(self, station: str, timeout: int = 10) -> str:
"""Fetches a report string from the service"""
raise NotImplementedError()
async def async_fetch(self, station: str, timeout: int = 10) -> str:
"""Asynchronously fetch a report string from the service"""
raise NotImplementedError()
class CallsHTTP:
"""Service supporting HTTP requests"""
method: str = "GET"
async def _call(
self,
url: str,
params: dict = None,
headers: dict = None,
data: Any = None,
timeout: int = 10,
) -> str:
name = self.__class__.__name__
try:
async with httpx.AsyncClient(timeout=timeout) as client:
if self.method.lower() == "post":
resp = await client.post(
url, params=params, headers=headers, data=data
)
else:
resp = await client.get(url, params=params, headers=headers)
if resp.status_code != 200:
raise SourceError(f"{name} server returned {resp.status_code}")
except (
httpx.ConnectTimeout,
httpx.ReadTimeout,
httpcore.ReadTimeout,
) as timeout_error:
raise TimeoutError(f"Timeout from {name} server") from timeout_error
except (gaierror, httpcore.ConnectError, httpx.ConnectError) as connect_error:
raise ConnectionError(
f"Unable to connect to {name} server"
) from connect_error
except httpcore.NetworkError as network_error:
raise ConnectionError(
f"Unable to read data from {name} server"
) from network_error
return resp.text
|
en
| 0.748853
|
Service base class # pylint: disable=too-few-public-methods # stdlib # library # module Base Service class for fetching reports Fetches a report string from the service Asynchronously fetch a report string from the service Service supporting HTTP requests
| 2.620401
| 3
|
src/server/consts.py
|
theaellengo/stories
| 1
|
6626043
|
port = 5000
dbname = 'stories_data.db'
secret_key = 'q1er16sa5f7-fdfsa'
|
port = 5000
dbname = 'stories_data.db'
secret_key = 'q1er16sa5f7-fdfsa'
|
none
| 1
| 0.931772
| 1
|
|
setup.py
|
renestraub/vcu-ui
| 0
|
6626044
|
import setuptools
from vcuui._version import __version__ as version
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="vcu-ui",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="NG800/VCU Pro Web UI",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/renestraub/vcu-ui",
packages=setuptools.find_packages(exclude=("tests",)),
classifiers=[
'Programming Language :: Python :: 3.7',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
install_requires=[
'tornado',
'requests',
'ping3',
'ubxlib>=0.3.6'
],
include_package_data=True, # Use MANIFEST.in to add *.html, *.css files
entry_points={
'console_scripts': [
'vcu-ui-start = vcuui.server:run_server'
]
},
)
|
import setuptools
from vcuui._version import __version__ as version
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="vcu-ui",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="NG800/VCU Pro Web UI",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/renestraub/vcu-ui",
packages=setuptools.find_packages(exclude=("tests",)),
classifiers=[
'Programming Language :: Python :: 3.7',
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
install_requires=[
'tornado',
'requests',
'ping3',
'ubxlib>=0.3.6'
],
include_package_data=True, # Use MANIFEST.in to add *.html, *.css files
entry_points={
'console_scripts': [
'vcu-ui-start = vcuui.server:run_server'
]
},
)
|
en
| 0.265382
|
# Use MANIFEST.in to add *.html, *.css files
| 1.613443
| 2
|
xy/anneal.py
|
mrzl/Composition37XY
| 67
|
6626045
|
<filename>xy/anneal.py
import math
import random
def anneal(state, max_temp, min_temp, steps):
factor = -math.log(float(max_temp) / min_temp)
state = state.copy()
best_state = state.copy()
best_energy = state.energy()
previous_energy = best_energy
for step in xrange(steps):
temp = max_temp * math.exp(factor * step / steps)
undo = state.do_move()
energy = state.energy()
change = energy - previous_energy
if change > 0 and math.exp(-change / temp) < random.random():
state.undo_move(undo)
else:
previous_energy = energy
if energy < best_energy:
# print step, temp, energy
best_energy = energy
best_state = state.copy()
return best_state
def get_max_temp(state, iterations):
state = state.copy()
previous = state.energy()
total = 0
for _ in xrange(iterations):
state.do_move()
energy = state.energy()
total += abs(energy - previous)
previous = energy
average = total / iterations
return average * 2
|
<filename>xy/anneal.py
import math
import random
def anneal(state, max_temp, min_temp, steps):
factor = -math.log(float(max_temp) / min_temp)
state = state.copy()
best_state = state.copy()
best_energy = state.energy()
previous_energy = best_energy
for step in xrange(steps):
temp = max_temp * math.exp(factor * step / steps)
undo = state.do_move()
energy = state.energy()
change = energy - previous_energy
if change > 0 and math.exp(-change / temp) < random.random():
state.undo_move(undo)
else:
previous_energy = energy
if energy < best_energy:
# print step, temp, energy
best_energy = energy
best_state = state.copy()
return best_state
def get_max_temp(state, iterations):
state = state.copy()
previous = state.energy()
total = 0
for _ in xrange(iterations):
state.do_move()
energy = state.energy()
total += abs(energy - previous)
previous = energy
average = total / iterations
return average * 2
|
en
| 0.473434
|
# print step, temp, energy
| 3.37938
| 3
|
04_Data Manipulation with pandas/03_Slicing and Indexing/07_Slicing time series.py
|
mohd-faizy/DataScience-With-Python
| 5
|
6626046
|
<gh_stars>1-10
'''
07 - Slicing time series:
Slicing is particularly useful for time series since it's a common thing to want to
filter for data within a date range. Add the date column to the index, then use .loc[]
to perform the subsetting. The important thing to remember is to keep your dates in
ISO 8601 format, that is, yyyy-mm-dd.
Recall from Chapter 1 that you can combine multiple Boolean conditions using logical
operators (such as &). To do so in one line of code, you'll need to add parentheses ()
around each condition.
pandas is loaded as pd and temperatures, with no index, is available.
Instructions:
- Use Boolean conditions (not .isin() or .loc[]) to subset for rows in 2010 and 2011, and
print the results.
- Note that because the date isn't set as an index, a condition that contains only a year,
such as df["date"] == "2009", will check if the date is equal to the first day of the first
month of the year (e.g. 2009-01-01), rather than checking whether the date occurs within the
given year. We recommend writing out the full date when using Boolean conditions (e.g., 2009-12-31).
- Set the index to the date column.
- Use .loc[] to subset for rows in 2010 and 2011.
- Use .loc[] to subset for rows from Aug 2010 to Feb 2011.
------------------------------------------------
temperatures.head()
date city country avg_temp_c
0 2000-01-01 Abidjan Côte D'Ivoire 27.293
1 2000-02-01 Abidjan Côte D'Ivoire 27.685
2 2000-03-01 Abidjan Côte D'Ivoire 29.061
3 2000-04-01 Abidjan Côte D'Ivoire 28.162
4 2000-05-01 Abidjan <NAME> 27.547
-------------------------------------------------
'''
# Use Boolean conditions to subset temperatures for rows in 2010 and 2011
temperatures_bool = temperatures[(temperatures["date"] >= "2010-01-01") & (temperatures["date"] <= "2011-12-31")]
print(temperatures_bool)
# Set date as an index and sort the index
temperatures_ind = temperatures.set_index("date").sort_index()
# Use .loc[] to subset temperatures_ind for rows in 2010 and 2011
print(temperatures_ind.loc["2010":"2011"])
# Use .loc[] to subset temperatures_ind for rows from Aug 2010 to Feb 2011
print(temperatures_ind.loc["2010-08":"2011-02"])
|
'''
07 - Slicing time series:
Slicing is particularly useful for time series since it's a common thing to want to
filter for data within a date range. Add the date column to the index, then use .loc[]
to perform the subsetting. The important thing to remember is to keep your dates in
ISO 8601 format, that is, yyyy-mm-dd.
Recall from Chapter 1 that you can combine multiple Boolean conditions using logical
operators (such as &). To do so in one line of code, you'll need to add parentheses ()
around each condition.
pandas is loaded as pd and temperatures, with no index, is available.
Instructions:
- Use Boolean conditions (not .isin() or .loc[]) to subset for rows in 2010 and 2011, and
print the results.
- Note that because the date isn't set as an index, a condition that contains only a year,
such as df["date"] == "2009", will check if the date is equal to the first day of the first
month of the year (e.g. 2009-01-01), rather than checking whether the date occurs within the
given year. We recommend writing out the full date when using Boolean conditions (e.g., 2009-12-31).
- Set the index to the date column.
- Use .loc[] to subset for rows in 2010 and 2011.
- Use .loc[] to subset for rows from Aug 2010 to Feb 2011.
------------------------------------------------
temperatures.head()
date city country avg_temp_c
0 2000-01-01 Abidjan Côte D'Ivoire 27.293
1 2000-02-01 Abidjan Côte D'Ivoire 27.685
2 2000-03-01 Abidjan Côte D'Ivoire 29.061
3 2000-04-01 Abidjan Côte D'Ivoire 28.162
4 2000-05-01 Abidjan <NAME> 27.547
-------------------------------------------------
'''
# Use Boolean conditions to subset temperatures for rows in 2010 and 2011
temperatures_bool = temperatures[(temperatures["date"] >= "2010-01-01") & (temperatures["date"] <= "2011-12-31")]
print(temperatures_bool)
# Set date as an index and sort the index
temperatures_ind = temperatures.set_index("date").sort_index()
# Use .loc[] to subset temperatures_ind for rows in 2010 and 2011
print(temperatures_ind.loc["2010":"2011"])
# Use .loc[] to subset temperatures_ind for rows from Aug 2010 to Feb 2011
print(temperatures_ind.loc["2010-08":"2011-02"])
|
en
| 0.811598
|
07 - Slicing time series: Slicing is particularly useful for time series since it's a common thing to want to filter for data within a date range. Add the date column to the index, then use .loc[] to perform the subsetting. The important thing to remember is to keep your dates in ISO 8601 format, that is, yyyy-mm-dd. Recall from Chapter 1 that you can combine multiple Boolean conditions using logical operators (such as &). To do so in one line of code, you'll need to add parentheses () around each condition. pandas is loaded as pd and temperatures, with no index, is available. Instructions: - Use Boolean conditions (not .isin() or .loc[]) to subset for rows in 2010 and 2011, and print the results. - Note that because the date isn't set as an index, a condition that contains only a year, such as df["date"] == "2009", will check if the date is equal to the first day of the first month of the year (e.g. 2009-01-01), rather than checking whether the date occurs within the given year. We recommend writing out the full date when using Boolean conditions (e.g., 2009-12-31). - Set the index to the date column. - Use .loc[] to subset for rows in 2010 and 2011. - Use .loc[] to subset for rows from Aug 2010 to Feb 2011. ------------------------------------------------ temperatures.head() date city country avg_temp_c 0 2000-01-01 Abidjan Côte D'Ivoire 27.293 1 2000-02-01 Abidjan Côte D'Ivoire 27.685 2 2000-03-01 Abidjan Côte D'Ivoire 29.061 3 2000-04-01 Abidjan Côte D'Ivoire 28.162 4 2000-05-01 Abidjan <NAME> 27.547 ------------------------------------------------- # Use Boolean conditions to subset temperatures for rows in 2010 and 2011 # Set date as an index and sort the index # Use .loc[] to subset temperatures_ind for rows in 2010 and 2011 # Use .loc[] to subset temperatures_ind for rows from Aug 2010 to Feb 2011
| 4.217638
| 4
|
fast_bert/prediction.py
|
BobCN2017/fast-bert
| 0
|
6626047
|
import logging
import os
import torch
from transformers import BertTokenizer
from .data_cls import BertDataBunch
from .learner_cls import BertLearner
from .modeling import (
BertForMultiLabelSequenceClassification,
XLNetForMultiLabelSequenceClassification,
RobertaForMultiLabelSequenceClassification,
DistilBertForMultiLabelSequenceClassification,
CamembertForMultiLabelSequenceClassification,
AlbertForMultiLabelSequenceClassification,
)
from transformers import (
WEIGHTS_NAME,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
CamembertConfig,
CamembertForSequenceClassification,
CamembertTokenizer,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
)
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
MODEL_CLASSES = {
"bert": (
BertConfig,
(BertForSequenceClassification, BertForMultiLabelSequenceClassification),
BertTokenizer,
),
"xlnet": (
XLNetConfig,
(XLNetForSequenceClassification, XLNetForMultiLabelSequenceClassification),
XLNetTokenizer,
),
"xlm": (
XLMConfig,
(XLMForSequenceClassification, XLMForSequenceClassification),
XLMTokenizer,
),
"roberta": (
RobertaConfig,
(RobertaForSequenceClassification, RobertaForMultiLabelSequenceClassification),
RobertaTokenizer,
),
"distilbert": (
DistilBertConfig,
(
DistilBertForSequenceClassification,
DistilBertForMultiLabelSequenceClassification,
),
DistilBertTokenizer,
),
"albert": (
AlbertConfig,
(AlbertForSequenceClassification, AlbertForMultiLabelSequenceClassification),
AlbertTokenizer,
),
"camembert": (
CamembertConfig,
(
CamembertForSequenceClassification,
CamembertForMultiLabelSequenceClassification,
),
CamembertTokenizer,
),
}
class BertClassificationPredictor(object):
def __init__(
self,
model_path,
label_path,
multi_label=False,
model_type="bert",
do_lower_case=True,
):
self.model_path = model_path
self.label_path = label_path
self.multi_label = multi_label
self.model_type = model_type
self.do_lower_case = do_lower_case
self.learner = self.get_learner()
def get_learner(self):
_, _, tokenizer_class = MODEL_CLASSES[self.model_type]
# instantiate the new tokeniser object using the tokeniser name
tokenizer = tokenizer_class.from_pretrained(
self.model_path, do_lower_case=self.do_lower_case
)
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
databunch = BertDataBunch(
self.label_path,
self.label_path,
tokenizer,
train_file=None,
val_file=None,
batch_size_per_gpu=32,
max_seq_length=512,
multi_gpu=False,
multi_label=self.multi_label,
model_type=self.model_type,
no_cache=True,
)
learner = BertLearner.from_pretrained_model(
databunch,
self.model_path,
metrics=[],
device=device,
logger=logging.getLogger(),
output_dir=None,
warmup_steps=0,
multi_gpu=False,
is_fp16=False,
multi_label=self.multi_label,
logging_steps=0,
)
return learner
def predict_batch(self, texts):
return self.learner.predict_batch(texts)
def predict(self, text):
predictions = self.predict_batch([text])[0]
return predictions
|
import logging
import os
import torch
from transformers import BertTokenizer
from .data_cls import BertDataBunch
from .learner_cls import BertLearner
from .modeling import (
BertForMultiLabelSequenceClassification,
XLNetForMultiLabelSequenceClassification,
RobertaForMultiLabelSequenceClassification,
DistilBertForMultiLabelSequenceClassification,
CamembertForMultiLabelSequenceClassification,
AlbertForMultiLabelSequenceClassification,
)
from transformers import (
WEIGHTS_NAME,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
CamembertConfig,
CamembertForSequenceClassification,
CamembertTokenizer,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
)
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
MODEL_CLASSES = {
"bert": (
BertConfig,
(BertForSequenceClassification, BertForMultiLabelSequenceClassification),
BertTokenizer,
),
"xlnet": (
XLNetConfig,
(XLNetForSequenceClassification, XLNetForMultiLabelSequenceClassification),
XLNetTokenizer,
),
"xlm": (
XLMConfig,
(XLMForSequenceClassification, XLMForSequenceClassification),
XLMTokenizer,
),
"roberta": (
RobertaConfig,
(RobertaForSequenceClassification, RobertaForMultiLabelSequenceClassification),
RobertaTokenizer,
),
"distilbert": (
DistilBertConfig,
(
DistilBertForSequenceClassification,
DistilBertForMultiLabelSequenceClassification,
),
DistilBertTokenizer,
),
"albert": (
AlbertConfig,
(AlbertForSequenceClassification, AlbertForMultiLabelSequenceClassification),
AlbertTokenizer,
),
"camembert": (
CamembertConfig,
(
CamembertForSequenceClassification,
CamembertForMultiLabelSequenceClassification,
),
CamembertTokenizer,
),
}
class BertClassificationPredictor(object):
def __init__(
self,
model_path,
label_path,
multi_label=False,
model_type="bert",
do_lower_case=True,
):
self.model_path = model_path
self.label_path = label_path
self.multi_label = multi_label
self.model_type = model_type
self.do_lower_case = do_lower_case
self.learner = self.get_learner()
def get_learner(self):
_, _, tokenizer_class = MODEL_CLASSES[self.model_type]
# instantiate the new tokeniser object using the tokeniser name
tokenizer = tokenizer_class.from_pretrained(
self.model_path, do_lower_case=self.do_lower_case
)
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
databunch = BertDataBunch(
self.label_path,
self.label_path,
tokenizer,
train_file=None,
val_file=None,
batch_size_per_gpu=32,
max_seq_length=512,
multi_gpu=False,
multi_label=self.multi_label,
model_type=self.model_type,
no_cache=True,
)
learner = BertLearner.from_pretrained_model(
databunch,
self.model_path,
metrics=[],
device=device,
logger=logging.getLogger(),
output_dir=None,
warmup_steps=0,
multi_gpu=False,
is_fp16=False,
multi_label=self.multi_label,
logging_steps=0,
)
return learner
def predict_batch(self, texts):
return self.learner.predict_batch(texts)
def predict(self, text):
predictions = self.predict_batch([text])[0]
return predictions
|
en
| 0.301297
|
# instantiate the new tokeniser object using the tokeniser name
| 2.169891
| 2
|
Algo and DSA/LeetCode-Solutions-master/Python/first-unique-number.py
|
Sourav692/FAANG-Interview-Preparation
| 3,269
|
6626048
|
<reponame>Sourav692/FAANG-Interview-Preparation<gh_stars>1000+
# Time: ctor: O(k)
# add: O(1)
# showFirstUnique: O(1)
# Space: O(n)
import collections
class FirstUnique(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.__q = collections.OrderedDict()
self.__dup = set()
for num in nums:
self.add(num)
def showFirstUnique(self):
"""
:rtype: int
"""
if self.__q:
return next(iter(self.__q))
return -1
def add(self, value):
"""
:type value: int
:rtype: None
"""
if value not in self.__dup and value not in self.__q:
self.__q[value] = None
return
if value in self.__q:
self.__q.pop(value)
self.__dup.add(value)
|
# Time: ctor: O(k)
# add: O(1)
# showFirstUnique: O(1)
# Space: O(n)
import collections
class FirstUnique(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.__q = collections.OrderedDict()
self.__dup = set()
for num in nums:
self.add(num)
def showFirstUnique(self):
"""
:rtype: int
"""
if self.__q:
return next(iter(self.__q))
return -1
def add(self, value):
"""
:type value: int
:rtype: None
"""
if value not in self.__dup and value not in self.__q:
self.__q[value] = None
return
if value in self.__q:
self.__q.pop(value)
self.__dup.add(value)
|
en
| 0.385294
|
# Time: ctor: O(k) # add: O(1) # showFirstUnique: O(1) # Space: O(n) :type nums: List[int] :rtype: int :type value: int :rtype: None
| 3.588906
| 4
|
partition_wiki.py
|
trneedham/Spectral-Gromov-Wasserstein
| 13
|
6626049
|
## Script to run graph partitioning experiment on Wiki dataset
# Load packages
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
import time
import ot
from scipy import linalg
from scipy import sparse
import gromovWassersteinAveraging as gwa
import spectralGW as sgw
from geodesicVisualization import *
import json
# Load the S-GWL code
import DataIO as DataIO
import EvaluationMeasure as Eval
import GromovWassersteinGraphToolkit as GwGt
from GromovWassersteinGraphToolkit import *
import pickle
import warnings
# Load modules for network partitioning experiments
import community
from networkx.algorithms.community import greedy_modularity_communities
from networkx.algorithms.community.asyn_fluid import asyn_fluidc
from networkx.algorithms.community.quality import performance, coverage, modularity
from sklearn import metrics
from infomap import Infomap
# Breakpoint analysis package
# import ruptures as rpt
from scipy.cluster.hierarchy import dendrogram, linkage, cut_tree
from scipy.signal import find_peaks
warnings.filterwarnings("ignore")
def graph_partition_gd2(cost_s, p_s, p_t,idx2node, ot_hyperpara, trans0=None):
"""
** May 19, 2020: Gradient descent version of graph_partition
Achieve a single graph partition via calculating Gromov-Wasserstein discrepancy
between the target graph and proposed one
Args:
cost_s: (n_s, n_s) adjacency matrix of source graph
p_s: (n_s, 1) the distribution of source nodes
p_t: (n_t, 1) the distribution of target nodes
idx2node: a dictionary {key = idx of row in cost, value = name of node}
ot_hyperpara: a dictionary of hyperparameters
Returns:
sub_costs: a dictionary {key: cluster idx,
value: sub cost matrices}
sub_probs: a dictionary {key: cluster idx,
value: sub distribution of nodes}
sub_idx2nodes: a dictionary {key: cluster idx,
value: a dictionary mapping indices to nodes' names
trans: (n_s, n_t) the optimal transport
"""
cost_t = np.diag(p_t[:, 0])
cost_s = np.asarray(cost_s)
# cost_t = 1 / (1 + cost_t)
trans, log = gwa.gromov_wasserstein_asym_fixed_initialization(cost_s, cost_t, p_s.flatten(), p_t.flatten(), trans0)
d_gw = log['gw_dist']
sub_costs, sub_probs, sub_idx2nodes = node_cluster_assignment(cost_s, trans, p_s, p_t, idx2node)
return sub_costs, sub_probs, sub_idx2nodes, trans, d_gw
def get_partition(coup):
est_idx = np.argmax(coup, axis=1)
num_clusters = np.max(est_idx)
partition = []
for j in range(num_clusters+1):
partition.append(set(np.argwhere(est_idx == j).T[0]))
return partition
# dictionaries for holding results
scores = {}
runtimes = {}
avetimes = {}
# load data
f = open('data/wikicats.p', 'rb')
database = pickle.load(f)
f.close()
dG = database['G']
labels = database['labels']
num_nodes = dG.number_of_nodes()
num_partitions = len(np.unique(labels))
idx2node = {}
for n in dG.nodes:
idx2node[n] = n
G = dG.to_undirected()
# Load precomputed noisy version
save_name = "wiki_sym_noise.txt"
with open(save_name, "rb") as fp:
nG = pickle.load(fp)
save_name = "wiki_asym_noise.txt"
with open(save_name, "rb") as fp:
ndG = pickle.load(fp)
print('---Data files loaded. Computing...\n')
def process_sgwl_wiki(cost,database,num_nodes,num_partitions,verbose=False):
p_s = np.zeros((num_nodes, 1))
p_s[:, 0] = np.sum(cost, axis=1) ** .001
p_s /= np.sum(p_s)
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=num_partitions)
ot_dict = {'loss_type': 'L2', # the key hyperparameters of GW distance
'ot_method': 'proximal',
'beta': 2e-7,
'outer_iteration': 300,
# outer, inner iteration, error bound of optimal transport
'iter_bound': 1e-30,
'inner_iteration': 1,
'sk_bound': 1e-30,
'node_prior': 0,
'max_iter': 200, # iteration and error bound for calcuating barycenter
'cost_bound': 1e-16,
'update_p': False, # optional updates of source distribution
'lr': 0,
'alpha': 0}
sub_costs, sub_probs, sub_idx2nodes, trans, d_gw = graph_partition_gd2(cost,
p_s,
p_t,
idx2node,
ot_dict)
est_idx = np.argmax(trans, axis=1)
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
if verbose:
print('Mutual information score = {:3.3f}'.format(mutual_info))
return mutual_info, d_gw, trans
###########################################################
###########################################################
# Method: Fluid communities (symmetrized)
###########################################################
# Raw data
if not nx.is_connected(G):
#print('---Fluid community requires connected graph, skipping raw version---')
scores['fluid-symmetrized-raw'] = 'failed'
runtimes['fluid-symmetrized-raw'] = 'failed'
else:
time_s = time.time()
comp = asyn_fluidc(G.to_undirected(), k=num_partitions)
list_nodes = [frozenset(c) for c in comp]
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fluid-symmetrized-raw'] = mutual_info
runtimes['fluid-symmetrized-raw'] = runtime
# Noisy data
if not nx.is_connected(nG):
print('---Fluid community requires connected graph, skipping noisy version---')
scores['fluid-symmetrized-noisy'] = 'failed'
runtimes['fluid-symmetrized-noisy'] = 'failed'
else:
time_s = time.time()
comp = asyn_fluidc(nG.to_undirected(), k=num_partitions)
list_nodes = [frozenset(c) for c in comp]
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fluid-symmetrized-noisy'] = mutual_info
runtimes['fluid-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: FastGreedy (symmetrized)
###########################################################
# Raw
time_s = time.time()
list_nodes = list(greedy_modularity_communities(G))
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fastgreedy-symmetrized-raw'] = mutual_info
runtimes['fastgreedy-symmetrized-raw'] = runtime
# Noisy
time_s = time.time()
list_nodes = list(greedy_modularity_communities(nG))
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fastgreedy-symmetrized-noisy'] = mutual_info
runtimes['fastgreedy-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Louvain (symmetrized)
###########################################################
# Raw
time_s = time.time()
partition = community.best_partition(G)
est_idx = np.zeros((num_nodes,))
for com in set(partition.values()):
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
for idx in list_nodes:
est_idx[idx] = com
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['louvain-symmetrized-raw'] = mutual_info
runtimes['louvain-symmetrized-raw'] = runtime
# Noisy
time_s = time.time()
partition = community.best_partition(nG)
est_idx = np.zeros((num_nodes,))
for com in set(partition.values()):
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
for idx in list_nodes:
est_idx[idx] = com
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['louvain-symmetrized-noisy'] = mutual_info
runtimes['louvain-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Infomap (symmetrized)
###########################################################
# Raw
time_s = time.time()
im = Infomap()
for node in G.nodes:
im.add_node(node)
for edge in G.edges:
im.add_link(edge[0], edge[1])
im.add_link(edge[1], edge[0])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-symmetrized-raw'] = mutual_info
runtimes['infomap-symmetrized-raw'] = runtime
# Noisy
print('---Running Infomap with noisy data---\n')
time_s = time.time()
im = Infomap()
for node in nG.nodes:
im.add_node(node)
for edge in nG.edges:
im.add_link(edge[0], edge[1])
im.add_link(edge[1], edge[0])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-symmetrized-noisy'] = mutual_info
runtimes['infomap-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Infomap (asymmetric)
###########################################################
# Raw
time_s = time.time()
im = Infomap()
for node in dG.nodes:
im.add_node(node)
for edge in dG.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-asymmetric-raw'] = mutual_info
runtimes['infomap-asymmetric-raw'] = runtime
# Noisy
print('---Running Infomap with noisy data---\n')
time_s = time.time()
im = Infomap()
for node in ndG.nodes:
im.add_node(node)
for edge in ndG.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-asymmetric-noisy'] = mutual_info
runtimes['infomap-asymmetric-noisy'] = runtime
###########################################################
###########################################################
# Method: GWL, symmetrized
###########################################################
# Raw
start = time.time()
cost = nx.adjacency_matrix(G).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-symmetrized-raw'] = mutual_info
runtimes['gwl-symmetrized-raw'] = end-start
# Noisy
start = time.time()
cost = nx.adjacency_matrix(nG).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-symmetrized-noisy'] = mutual_info
runtimes['gwl-symmetrized-noisy'] = end-start
###########################################################
###########################################################
# Method: GWL, asymmetric
###########################################################
# Raw
start = time.time()
cost = nx.adjacency_matrix(dG).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-asymmetric-raw'] = mutual_info
runtimes['gwl-asymmetric-raw'] = end-start
# Noisy
start = time.time()
cost = nx.adjacency_matrix(ndG).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-asymmetric-noisy'] = mutual_info
runtimes['gwl-asymmetric-noisy'] = end-start
###########################################################
###########################################################
# Method: SpecGWL
###########################################################
# Note that the GWL pipeline above takes the true number of clusters as input.
# We now show how this number is estimated in the SpecGWL pipeline for
# a bona fide unsupervised partitioning method.
def t_selection_pipeline_undirected_wiki(G,ts,num_partitions,fraction_t_to_keep=0.25):
mis = []
coups = []
d_gws = []
rt = []
for t in ts:
start = time.time()
cost = sgw.undirected_normalized_heat_kernel(G,t)
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,num_partitions)
mis.append(mutual_info)
coups.append(coup)
d_gws.append(d_gw)
end = time.time()
rt.append(end-start)
print('Couplings Computed')
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(G,partition))
num_to_keep = int(np.round(fraction_t_to_keep*len(ts)))
good_t_max = ts[np.argsort(coverages)][-num_to_keep:]
good_t_grad = ts[np.argsort(np.abs(np.gradient(coverages)))][:num_to_keep]
return mis, coups, d_gws, good_t_max, good_t_grad, rt
def t_selection_pipeline_directed_wiki(G,ts,num_partitions,fraction_t_to_keep=0.25):
mis = []
coups = []
d_gws = []
rt = []
for t in ts:
start = time.time()
cost = sgw.directed_heat_kernel(G,t)
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,num_partitions)
mis.append(mutual_info)
coups.append(coup)
d_gws.append(d_gw)
end = time.time()
rt.append(end-start)
print('Couplings Computed')
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(G,partition))
num_to_keep = int(np.round(fraction_t_to_keep*len(ts)))
good_t_max = ts[np.argsort(coverages)][-num_to_keep:]
good_t_grad = ts[np.argsort(np.abs(np.gradient(coverages)))][:num_to_keep]
return mis, coups, d_gws, good_t_max, good_t_grad, rt
# Keeping t fixed, do a grid search to estimate the number of clusters
num_clusts = list(range(5,30))
t = 20
cost = sgw.undirected_normalized_heat_kernel(G,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(G,partition))
modularities.append(modularity(G,partition))
# Estimate number of clusters
estimated_clusters_raw_sym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_raw_sym)
# Now perform modularity/coverage maximizing pipeline
ts = np.linspace(5,50,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_undirected_wiki(G,ts,estimated_clusters_raw_sym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(G,partition))
modularities = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
modularities.append(modularity(G,partition))
wiki_raw_sym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Raw, Sym:',wiki_raw_sym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-symmetric-raw'] = wiki_raw_sym_ami
runtimes['specgwl-symmetric-raw'] = rt[np.argmax(coverages)]
## Repeat for undirected, noisy data
num_clusts = list(range(5,30))
t = 20
cost = sgw.undirected_normalized_heat_kernel(nG,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(nG,partition))
modularities.append(modularity(nG,partition))
estimated_clusters_noisy_sym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_noisy_sym)
ts = np.linspace(5,20,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_undirected_wiki(nG,ts,estimated_clusters_noisy_sym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(nG,partition))
wiki_noisy_sym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Noisy, Sym:',wiki_noisy_sym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-symmetric-noisy'] = wiki_noisy_sym_ami
runtimes['specgwl-symmetric-noisy'] = rt[np.argmax(coverages)]
## Repeat for directed, raw data
num_clusts = list(range(5,30))
t = 20
cost = sgw.directed_heat_kernel(dG,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(dG,partition))
modularities.append(modularity(dG,partition))
estimated_clusters_raw_asym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_raw_asym)
ts = np.linspace(5,20,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_directed_wiki(dG,ts,estimated_clusters_raw_asym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(dG,partition))
wiki_raw_asym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Raw, Asym:',wiki_raw_asym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-asymmetric-raw'] = wiki_raw_asym_ami
runtimes['specgwl-asymmetric-raw'] = rt[np.argmax(coverages)]
## Repeat for directed noisy data
num_clusts = list(range(5,30))
t = 20
cost = sgw.directed_heat_kernel(ndG,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(ndG,partition))
modularities.append(modularity(ndG,partition))
estimated_clusters_noisy_asym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_noisy_asym)
ts = np.linspace(10,14,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_directed_wiki(ndG,ts,estimated_clusters_noisy_asym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(ndG,partition))
wiki_noisy_asym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Noisy, Asym:',wiki_noisy_asym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-asymmetric-noisy'] = wiki_noisy_asym_ami
runtimes['specgwl-asymmetric-noisy'] = rt[np.argmax(coverages)]
print('Mutual information scores')
print(json.dumps(scores,indent=1))
print('Runtimes')
print(json.dumps(runtimes,indent=1))
with open('res_partition_wiki.txt', 'w') as outfile:
json.dump(['Adjusted mutual information scores',
scores,
'Runtimes',
runtimes], outfile,indent=1)
|
## Script to run graph partitioning experiment on Wiki dataset
# Load packages
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib
import time
import ot
from scipy import linalg
from scipy import sparse
import gromovWassersteinAveraging as gwa
import spectralGW as sgw
from geodesicVisualization import *
import json
# Load the S-GWL code
import DataIO as DataIO
import EvaluationMeasure as Eval
import GromovWassersteinGraphToolkit as GwGt
from GromovWassersteinGraphToolkit import *
import pickle
import warnings
# Load modules for network partitioning experiments
import community
from networkx.algorithms.community import greedy_modularity_communities
from networkx.algorithms.community.asyn_fluid import asyn_fluidc
from networkx.algorithms.community.quality import performance, coverage, modularity
from sklearn import metrics
from infomap import Infomap
# Breakpoint analysis package
# import ruptures as rpt
from scipy.cluster.hierarchy import dendrogram, linkage, cut_tree
from scipy.signal import find_peaks
warnings.filterwarnings("ignore")
def graph_partition_gd2(cost_s, p_s, p_t,idx2node, ot_hyperpara, trans0=None):
"""
** May 19, 2020: Gradient descent version of graph_partition
Achieve a single graph partition via calculating Gromov-Wasserstein discrepancy
between the target graph and proposed one
Args:
cost_s: (n_s, n_s) adjacency matrix of source graph
p_s: (n_s, 1) the distribution of source nodes
p_t: (n_t, 1) the distribution of target nodes
idx2node: a dictionary {key = idx of row in cost, value = name of node}
ot_hyperpara: a dictionary of hyperparameters
Returns:
sub_costs: a dictionary {key: cluster idx,
value: sub cost matrices}
sub_probs: a dictionary {key: cluster idx,
value: sub distribution of nodes}
sub_idx2nodes: a dictionary {key: cluster idx,
value: a dictionary mapping indices to nodes' names
trans: (n_s, n_t) the optimal transport
"""
cost_t = np.diag(p_t[:, 0])
cost_s = np.asarray(cost_s)
# cost_t = 1 / (1 + cost_t)
trans, log = gwa.gromov_wasserstein_asym_fixed_initialization(cost_s, cost_t, p_s.flatten(), p_t.flatten(), trans0)
d_gw = log['gw_dist']
sub_costs, sub_probs, sub_idx2nodes = node_cluster_assignment(cost_s, trans, p_s, p_t, idx2node)
return sub_costs, sub_probs, sub_idx2nodes, trans, d_gw
def get_partition(coup):
est_idx = np.argmax(coup, axis=1)
num_clusters = np.max(est_idx)
partition = []
for j in range(num_clusters+1):
partition.append(set(np.argwhere(est_idx == j).T[0]))
return partition
# dictionaries for holding results
scores = {}
runtimes = {}
avetimes = {}
# load data
f = open('data/wikicats.p', 'rb')
database = pickle.load(f)
f.close()
dG = database['G']
labels = database['labels']
num_nodes = dG.number_of_nodes()
num_partitions = len(np.unique(labels))
idx2node = {}
for n in dG.nodes:
idx2node[n] = n
G = dG.to_undirected()
# Load precomputed noisy version
save_name = "wiki_sym_noise.txt"
with open(save_name, "rb") as fp:
nG = pickle.load(fp)
save_name = "wiki_asym_noise.txt"
with open(save_name, "rb") as fp:
ndG = pickle.load(fp)
print('---Data files loaded. Computing...\n')
def process_sgwl_wiki(cost,database,num_nodes,num_partitions,verbose=False):
p_s = np.zeros((num_nodes, 1))
p_s[:, 0] = np.sum(cost, axis=1) ** .001
p_s /= np.sum(p_s)
p_t = GwGt.estimate_target_distribution({0: p_s}, dim_t=num_partitions)
ot_dict = {'loss_type': 'L2', # the key hyperparameters of GW distance
'ot_method': 'proximal',
'beta': 2e-7,
'outer_iteration': 300,
# outer, inner iteration, error bound of optimal transport
'iter_bound': 1e-30,
'inner_iteration': 1,
'sk_bound': 1e-30,
'node_prior': 0,
'max_iter': 200, # iteration and error bound for calcuating barycenter
'cost_bound': 1e-16,
'update_p': False, # optional updates of source distribution
'lr': 0,
'alpha': 0}
sub_costs, sub_probs, sub_idx2nodes, trans, d_gw = graph_partition_gd2(cost,
p_s,
p_t,
idx2node,
ot_dict)
est_idx = np.argmax(trans, axis=1)
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
if verbose:
print('Mutual information score = {:3.3f}'.format(mutual_info))
return mutual_info, d_gw, trans
###########################################################
###########################################################
# Method: Fluid communities (symmetrized)
###########################################################
# Raw data
if not nx.is_connected(G):
#print('---Fluid community requires connected graph, skipping raw version---')
scores['fluid-symmetrized-raw'] = 'failed'
runtimes['fluid-symmetrized-raw'] = 'failed'
else:
time_s = time.time()
comp = asyn_fluidc(G.to_undirected(), k=num_partitions)
list_nodes = [frozenset(c) for c in comp]
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fluid-symmetrized-raw'] = mutual_info
runtimes['fluid-symmetrized-raw'] = runtime
# Noisy data
if not nx.is_connected(nG):
print('---Fluid community requires connected graph, skipping noisy version---')
scores['fluid-symmetrized-noisy'] = 'failed'
runtimes['fluid-symmetrized-noisy'] = 'failed'
else:
time_s = time.time()
comp = asyn_fluidc(nG.to_undirected(), k=num_partitions)
list_nodes = [frozenset(c) for c in comp]
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fluid-symmetrized-noisy'] = mutual_info
runtimes['fluid-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: FastGreedy (symmetrized)
###########################################################
# Raw
time_s = time.time()
list_nodes = list(greedy_modularity_communities(G))
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fastgreedy-symmetrized-raw'] = mutual_info
runtimes['fastgreedy-symmetrized-raw'] = runtime
# Noisy
time_s = time.time()
list_nodes = list(greedy_modularity_communities(nG))
est_idx = np.zeros((num_nodes,))
for i in range(len(list_nodes)):
for idx in list_nodes[i]:
est_idx[idx] = i
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['fastgreedy-symmetrized-noisy'] = mutual_info
runtimes['fastgreedy-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Louvain (symmetrized)
###########################################################
# Raw
time_s = time.time()
partition = community.best_partition(G)
est_idx = np.zeros((num_nodes,))
for com in set(partition.values()):
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
for idx in list_nodes:
est_idx[idx] = com
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['louvain-symmetrized-raw'] = mutual_info
runtimes['louvain-symmetrized-raw'] = runtime
# Noisy
time_s = time.time()
partition = community.best_partition(nG)
est_idx = np.zeros((num_nodes,))
for com in set(partition.values()):
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
for idx in list_nodes:
est_idx[idx] = com
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['louvain-symmetrized-noisy'] = mutual_info
runtimes['louvain-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Infomap (symmetrized)
###########################################################
# Raw
time_s = time.time()
im = Infomap()
for node in G.nodes:
im.add_node(node)
for edge in G.edges:
im.add_link(edge[0], edge[1])
im.add_link(edge[1], edge[0])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-symmetrized-raw'] = mutual_info
runtimes['infomap-symmetrized-raw'] = runtime
# Noisy
print('---Running Infomap with noisy data---\n')
time_s = time.time()
im = Infomap()
for node in nG.nodes:
im.add_node(node)
for edge in nG.edges:
im.add_link(edge[0], edge[1])
im.add_link(edge[1], edge[0])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-symmetrized-noisy'] = mutual_info
runtimes['infomap-symmetrized-noisy'] = runtime
###########################################################
###########################################################
# Method: Infomap (asymmetric)
###########################################################
# Raw
time_s = time.time()
im = Infomap()
for node in dG.nodes:
im.add_node(node)
for edge in dG.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-asymmetric-raw'] = mutual_info
runtimes['infomap-asymmetric-raw'] = runtime
# Noisy
print('---Running Infomap with noisy data---\n')
time_s = time.time()
im = Infomap()
for node in ndG.nodes:
im.add_node(node)
for edge in ndG.edges:
im.add_link(edge[0], edge[1])
# Run the Infomap search algorithm to find optimal modules
im.run()
# print(f"Found {im.num_top_modules} modules with Infomap")
est_idx = np.zeros((num_nodes,))
for node in im.tree:
if node.is_leaf:
est_idx[node.node_id] = node.module_id
runtime = time.time() - time_s
mutual_info = metrics.adjusted_mutual_info_score(database['labels'], est_idx, average_method='max')
scores['infomap-asymmetric-noisy'] = mutual_info
runtimes['infomap-asymmetric-noisy'] = runtime
###########################################################
###########################################################
# Method: GWL, symmetrized
###########################################################
# Raw
start = time.time()
cost = nx.adjacency_matrix(G).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-symmetrized-raw'] = mutual_info
runtimes['gwl-symmetrized-raw'] = end-start
# Noisy
start = time.time()
cost = nx.adjacency_matrix(nG).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-symmetrized-noisy'] = mutual_info
runtimes['gwl-symmetrized-noisy'] = end-start
###########################################################
###########################################################
# Method: GWL, asymmetric
###########################################################
# Raw
start = time.time()
cost = nx.adjacency_matrix(dG).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-asymmetric-raw'] = mutual_info
runtimes['gwl-asymmetric-raw'] = end-start
# Noisy
start = time.time()
cost = nx.adjacency_matrix(ndG).toarray()
mutual_info,_,_ = process_sgwl_wiki(cost,database,num_nodes,num_partitions);
end = time.time()
scores['gwl-asymmetric-noisy'] = mutual_info
runtimes['gwl-asymmetric-noisy'] = end-start
###########################################################
###########################################################
# Method: SpecGWL
###########################################################
# Note that the GWL pipeline above takes the true number of clusters as input.
# We now show how this number is estimated in the SpecGWL pipeline for
# a bona fide unsupervised partitioning method.
def t_selection_pipeline_undirected_wiki(G,ts,num_partitions,fraction_t_to_keep=0.25):
mis = []
coups = []
d_gws = []
rt = []
for t in ts:
start = time.time()
cost = sgw.undirected_normalized_heat_kernel(G,t)
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,num_partitions)
mis.append(mutual_info)
coups.append(coup)
d_gws.append(d_gw)
end = time.time()
rt.append(end-start)
print('Couplings Computed')
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(G,partition))
num_to_keep = int(np.round(fraction_t_to_keep*len(ts)))
good_t_max = ts[np.argsort(coverages)][-num_to_keep:]
good_t_grad = ts[np.argsort(np.abs(np.gradient(coverages)))][:num_to_keep]
return mis, coups, d_gws, good_t_max, good_t_grad, rt
def t_selection_pipeline_directed_wiki(G,ts,num_partitions,fraction_t_to_keep=0.25):
mis = []
coups = []
d_gws = []
rt = []
for t in ts:
start = time.time()
cost = sgw.directed_heat_kernel(G,t)
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,num_partitions)
mis.append(mutual_info)
coups.append(coup)
d_gws.append(d_gw)
end = time.time()
rt.append(end-start)
print('Couplings Computed')
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(G,partition))
num_to_keep = int(np.round(fraction_t_to_keep*len(ts)))
good_t_max = ts[np.argsort(coverages)][-num_to_keep:]
good_t_grad = ts[np.argsort(np.abs(np.gradient(coverages)))][:num_to_keep]
return mis, coups, d_gws, good_t_max, good_t_grad, rt
# Keeping t fixed, do a grid search to estimate the number of clusters
num_clusts = list(range(5,30))
t = 20
cost = sgw.undirected_normalized_heat_kernel(G,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(G,partition))
modularities.append(modularity(G,partition))
# Estimate number of clusters
estimated_clusters_raw_sym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_raw_sym)
# Now perform modularity/coverage maximizing pipeline
ts = np.linspace(5,50,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_undirected_wiki(G,ts,estimated_clusters_raw_sym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(G,partition))
modularities = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
modularities.append(modularity(G,partition))
wiki_raw_sym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Raw, Sym:',wiki_raw_sym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-symmetric-raw'] = wiki_raw_sym_ami
runtimes['specgwl-symmetric-raw'] = rt[np.argmax(coverages)]
## Repeat for undirected, noisy data
num_clusts = list(range(5,30))
t = 20
cost = sgw.undirected_normalized_heat_kernel(nG,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(nG,partition))
modularities.append(modularity(nG,partition))
estimated_clusters_noisy_sym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_noisy_sym)
ts = np.linspace(5,20,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_undirected_wiki(nG,ts,estimated_clusters_noisy_sym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(nG,partition))
wiki_noisy_sym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Noisy, Sym:',wiki_noisy_sym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-symmetric-noisy'] = wiki_noisy_sym_ami
runtimes['specgwl-symmetric-noisy'] = rt[np.argmax(coverages)]
## Repeat for directed, raw data
num_clusts = list(range(5,30))
t = 20
cost = sgw.directed_heat_kernel(dG,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(dG,partition))
modularities.append(modularity(dG,partition))
estimated_clusters_raw_asym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_raw_asym)
ts = np.linspace(5,20,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_directed_wiki(dG,ts,estimated_clusters_raw_asym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(dG,partition))
wiki_raw_asym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Raw, Asym:',wiki_raw_asym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-asymmetric-raw'] = wiki_raw_asym_ami
runtimes['specgwl-asymmetric-raw'] = rt[np.argmax(coverages)]
## Repeat for directed noisy data
num_clusts = list(range(5,30))
t = 20
cost = sgw.directed_heat_kernel(ndG,t)
d_gws = []
mis = []
coverages = []
modularities = []
for j in num_clusts:
mutual_info, d_gw, coup = process_sgwl_wiki(cost,database,num_nodes,j)
partition = get_partition(coup)
mis.append(mutual_info)
d_gws.append(d_gw)
coverages.append(coverage(ndG,partition))
modularities.append(modularity(ndG,partition))
estimated_clusters_noisy_asym = num_clusts[np.argmax(modularities)]
print('Number of Clusters:',estimated_clusters_noisy_asym)
ts = np.linspace(10,14,20)
mis, coups, d_gws, good_t_max, good_t_grad, rt = t_selection_pipeline_directed_wiki(ndG,ts,estimated_clusters_noisy_asym)
coverages = []
for j in range(len(ts)):
coup = coups[j]
partition = get_partition(coup)
coverages.append(coverage(ndG,partition))
wiki_noisy_asym_ami = mis[np.argmax(coverages)]
print('AMI for WIKI, Noisy, Asym:',wiki_noisy_asym_ami)
print('Occurs at t-value:',ts[np.argmax(coverages)])
scores['specgwl-asymmetric-noisy'] = wiki_noisy_asym_ami
runtimes['specgwl-asymmetric-noisy'] = rt[np.argmax(coverages)]
print('Mutual information scores')
print(json.dumps(scores,indent=1))
print('Runtimes')
print(json.dumps(runtimes,indent=1))
with open('res_partition_wiki.txt', 'w') as outfile:
json.dump(['Adjusted mutual information scores',
scores,
'Runtimes',
runtimes], outfile,indent=1)
|
de
| 0.379649
|
## Script to run graph partitioning experiment on Wiki dataset # Load packages # Load the S-GWL code # Load modules for network partitioning experiments # Breakpoint analysis package # import ruptures as rpt ** May 19, 2020: Gradient descent version of graph_partition Achieve a single graph partition via calculating Gromov-Wasserstein discrepancy between the target graph and proposed one Args: cost_s: (n_s, n_s) adjacency matrix of source graph p_s: (n_s, 1) the distribution of source nodes p_t: (n_t, 1) the distribution of target nodes idx2node: a dictionary {key = idx of row in cost, value = name of node} ot_hyperpara: a dictionary of hyperparameters Returns: sub_costs: a dictionary {key: cluster idx, value: sub cost matrices} sub_probs: a dictionary {key: cluster idx, value: sub distribution of nodes} sub_idx2nodes: a dictionary {key: cluster idx, value: a dictionary mapping indices to nodes' names trans: (n_s, n_t) the optimal transport # cost_t = 1 / (1 + cost_t) # dictionaries for holding results # load data # Load precomputed noisy version # the key hyperparameters of GW distance # outer, inner iteration, error bound of optimal transport # iteration and error bound for calcuating barycenter # optional updates of source distribution ########################################################### ########################################################### # Method: Fluid communities (symmetrized) ########################################################### # Raw data #print('---Fluid community requires connected graph, skipping raw version---') # Noisy data ########################################################### ########################################################### # Method: FastGreedy (symmetrized) ########################################################### # Raw # Noisy ########################################################### ########################################################### # Method: Louvain (symmetrized) ########################################################### # Raw # Noisy ########################################################### ########################################################### # Method: Infomap (symmetrized) ########################################################### # Raw # Run the Infomap search algorithm to find optimal modules # print(f"Found {im.num_top_modules} modules with Infomap") # Noisy # Run the Infomap search algorithm to find optimal modules # print(f"Found {im.num_top_modules} modules with Infomap") ########################################################### ########################################################### # Method: Infomap (asymmetric) ########################################################### # Raw # Run the Infomap search algorithm to find optimal modules # print(f"Found {im.num_top_modules} modules with Infomap") # Noisy # Run the Infomap search algorithm to find optimal modules # print(f"Found {im.num_top_modules} modules with Infomap") ########################################################### ########################################################### # Method: GWL, symmetrized ########################################################### # Raw # Noisy ########################################################### ########################################################### # Method: GWL, asymmetric ########################################################### # Raw # Noisy ########################################################### ########################################################### # Method: SpecGWL ########################################################### # Note that the GWL pipeline above takes the true number of clusters as input. # We now show how this number is estimated in the SpecGWL pipeline for # a bona fide unsupervised partitioning method. # Keeping t fixed, do a grid search to estimate the number of clusters # Estimate number of clusters # Now perform modularity/coverage maximizing pipeline ## Repeat for undirected, noisy data ## Repeat for directed, raw data ## Repeat for directed noisy data
| 2.424838
| 2
|
python/tests/test_measurements.py
|
copark86/kontiki
| 94
|
6626050
|
import pytest
import numpy as np
from kontiki.measurements import StaticRsCameraMeasurement, LiftingRsCameraMeasurement, NewtonRsCameraMeasurement, \
PositionMeasurement, GyroscopeMeasurement, AccelerometerMeasurement
from kontiki.rotations import quat_to_rotation_matrix
from kontiki.sfm import Landmark, View
from kontiki.utils import safe_time_span, safe_time
from kontiki.rotations import quat_to_rotation_matrix
from trajectories.test_general import trajectory_example
projection_types = [StaticRsCameraMeasurement, LiftingRsCameraMeasurement, NewtonRsCameraMeasurement]
imu_measurement_types = [AccelerometerMeasurement, GyroscopeMeasurement]
@pytest.mark.parametrize('cls', projection_types)
def test_rscamera_measurements(cls, small_sfm):
# NOTE: If this test fails, first try to clear the pytest cache using
# python3 -m pytest --cache-clear
views, trajectory, camera = small_sfm
# Take the first landmark
landmarks = {obs.landmark for v in views for obs in v.observations}
# Make sure the measurements agree
for lm in landmarks:
assert len(lm.observations) >= 2
for obs in lm.observations[1:]:
m = cls(camera, obs)
yhat = m.project(trajectory)
np.testing.assert_almost_equal(yhat, obs.uv)
# Newton method should handle noise in the projection
# Beware that this doesn't seem to catch faulty derivatives for the camera
def test_newton_rscamera_measurements_with_noise(small_sfm):
# NOTE: If this test fails, first try to clear the pytest cache using
# python3 -m pytest --cache-clear
views, trajectory, camera = small_sfm
# Take the first landmark
landmarks = {obs.landmark for v in views for obs in v.observations}
# The projection error should be below half a row, because that is the threshold we use to terminate the Newton algorithm
for lm in landmarks:
assert len(lm.observations) >= 2
for obs in lm.observations[1:]:
uv_org = obs.uv
obs.uv = obs.uv + np.random.normal(0, 2.0, size=2)
m = NewtonRsCameraMeasurement(camera, obs)
yhat = m.project(trajectory)
row_diff = yhat[1] - uv_org[1]
assert np.abs(row_diff) <= 0.5
@pytest.mark.parametrize('cls', projection_types)
def test_rscamera_measurements_attribute_access(cls, camera):
lm = Landmark()
views = [View(i, i/30) for i in range(2)]
def random_point():
return np.array([np.random.uniform(0, camera.cols), np.random.uniform(0, camera.rows)])
ref, obs = [v.create_observation(lm, random_point()) for v in views]
lm.reference = ref
m = cls(camera, obs)
assert m.camera is camera
assert m.observation is obs
@pytest.mark.parametrize('cls', projection_types)
def test_rscamera_measurements_weights(cls, small_sfm):
views, trajectory, camera = small_sfm
lm = np.random.choice(list({obs.landmark for v in views for obs in v.observations}))
obs = np.random.choice(lm.observations[1:])
assert not obs.is_reference
huber_c = 2.
m0 = cls(camera, obs, huber_c)
assert m0.weight == 1.
e0 = m0.error(trajectory)
for w in [1, 2, 0.43]:
m = cls(camera, obs, huber_c, w)
e = m.error(trajectory)
np.testing.assert_equal(e, e0 * w)
def test_camera_errors_size(trajectory, camera_measurements):
for m in camera_measurements:
e = m.error(trajectory)
if issubclass(type(m), LiftingRsCameraMeasurement):
assert e.size == 3
else:
assert e.size == 2
def test_position_measurements(trajectory_example):
trajectory, example_data = trajectory_example
expected_positions = example_data.position
for t, x in expected_positions:
m = PositionMeasurement(t, x)
xhat = m.measure(trajectory)
np.testing.assert_almost_equal(xhat, x)
def test_gyroscope_measurements(trajectory, imu):
times = np.linspace(*safe_time_span(trajectory, 3.0), num=10, endpoint=False)
def true_gyro(t):
q = trajectory.orientation(t)
R = quat_to_rotation_matrix(q)
w_world = trajectory.angular_velocity(t)
w_body = R.T @ w_world
return w_body
for t in times:
w = true_gyro(t)
m = GyroscopeMeasurement(imu, t, w)
w_hat = m.measure(trajectory)
try:
w_hat -= imu.gyroscope_bias
except AttributeError:
pass # No bias to remove
np.testing.assert_almost_equal(w_hat, w)
def test_accelerometer_measurements(trajectory, imu):
times = np.linspace(*safe_time_span(trajectory, 3.0), num=10, endpoint=False)
from kontiki.trajectories import UniformSE3SplineTrajectory
if type(trajectory) == UniformSE3SplineTrajectory:
pytest.xfail("SE3 fails because second order derivative is not the same as body acceleration")
def true_acc(t):
q = trajectory.orientation(t)
acc_world = trajectory.acceleration(t)
R = quat_to_rotation_matrix(q)
gravity = np.array([0, 0, 9.80665])
acc = R.T @ (acc_world - gravity)
return acc
# Currently fails for ConstantBiasImu since we don't take bias into account
for t in times:
acc = true_acc(t)
m = AccelerometerMeasurement(imu, t, acc)
acc_hat = m.measure(trajectory)
try:
acc_hat -= imu.accelerometer_bias
except AttributeError:
pass # No bias to remove
np.testing.assert_almost_equal(acc_hat, acc)
@pytest.mark.parametrize('mcls', imu_measurement_types)
def test_imu_measurement_same_imu(mcls, imu):
t = 1.0
m = mcls(imu, t, np.random.uniform(-1, 1, size=3))
print(imu, m.imu)
assert m.imu is imu
@pytest.mark.parametrize('mcls', imu_measurement_types)
def test_imu_measurement_time_offset(mcls, imu, split_trajectory):
t = safe_time(split_trajectory)
d = np.random.uniform(-imu.max_time_offset, imu.max_time_offset)
v = np.random.uniform(-1, 1, size=3)
m1 = mcls(imu, t, v)
y1 = m1.measure(split_trajectory)
imu.time_offset = d
m2 = mcls(imu, t - d, v)
y2 = m2.measure(split_trajectory)
np.testing.assert_equal(y1, y2)
@pytest.mark.parametrize('mcls', projection_types)
def test_camera_measurement_time_offset(mcls, camera, split_trajectory):
t1, t2 = safe_time_span(split_trajectory, 1)
t1 += camera.max_time_offset
t2 -= camera.max_time_offset
d = np.random.uniform(-camera.max_time_offset, camera.max_time_offset)
lm = Landmark()
lm.inverse_depth = np.random.uniform(0.01, 1)
views = [View(i, t) for i, t in enumerate([t1, t1+0.23])]
ref, obs = [v.create_observation(lm, np.random.uniform(100, 900, size=2)) for v in views]
lm.reference = ref
m1 = mcls(camera, obs)
y1 = m1.measure(split_trajectory)
new_lm = Landmark()
new_lm.inverse_depth = lm.inverse_depth
new_views = [View(v.frame_nr, v.t0 - d) for v in views]
new_ref, new_obs = [v.create_observation(new_lm, o.uv) for v, o in zip(new_views, [ref, obs])]
new_lm.reference = new_ref
camera.time_offset = d
m2 = mcls(camera, new_obs)
y2 = m2.measure(split_trajectory)
np.testing.assert_almost_equal(y1, y2)
|
import pytest
import numpy as np
from kontiki.measurements import StaticRsCameraMeasurement, LiftingRsCameraMeasurement, NewtonRsCameraMeasurement, \
PositionMeasurement, GyroscopeMeasurement, AccelerometerMeasurement
from kontiki.rotations import quat_to_rotation_matrix
from kontiki.sfm import Landmark, View
from kontiki.utils import safe_time_span, safe_time
from kontiki.rotations import quat_to_rotation_matrix
from trajectories.test_general import trajectory_example
projection_types = [StaticRsCameraMeasurement, LiftingRsCameraMeasurement, NewtonRsCameraMeasurement]
imu_measurement_types = [AccelerometerMeasurement, GyroscopeMeasurement]
@pytest.mark.parametrize('cls', projection_types)
def test_rscamera_measurements(cls, small_sfm):
# NOTE: If this test fails, first try to clear the pytest cache using
# python3 -m pytest --cache-clear
views, trajectory, camera = small_sfm
# Take the first landmark
landmarks = {obs.landmark for v in views for obs in v.observations}
# Make sure the measurements agree
for lm in landmarks:
assert len(lm.observations) >= 2
for obs in lm.observations[1:]:
m = cls(camera, obs)
yhat = m.project(trajectory)
np.testing.assert_almost_equal(yhat, obs.uv)
# Newton method should handle noise in the projection
# Beware that this doesn't seem to catch faulty derivatives for the camera
def test_newton_rscamera_measurements_with_noise(small_sfm):
# NOTE: If this test fails, first try to clear the pytest cache using
# python3 -m pytest --cache-clear
views, trajectory, camera = small_sfm
# Take the first landmark
landmarks = {obs.landmark for v in views for obs in v.observations}
# The projection error should be below half a row, because that is the threshold we use to terminate the Newton algorithm
for lm in landmarks:
assert len(lm.observations) >= 2
for obs in lm.observations[1:]:
uv_org = obs.uv
obs.uv = obs.uv + np.random.normal(0, 2.0, size=2)
m = NewtonRsCameraMeasurement(camera, obs)
yhat = m.project(trajectory)
row_diff = yhat[1] - uv_org[1]
assert np.abs(row_diff) <= 0.5
@pytest.mark.parametrize('cls', projection_types)
def test_rscamera_measurements_attribute_access(cls, camera):
lm = Landmark()
views = [View(i, i/30) for i in range(2)]
def random_point():
return np.array([np.random.uniform(0, camera.cols), np.random.uniform(0, camera.rows)])
ref, obs = [v.create_observation(lm, random_point()) for v in views]
lm.reference = ref
m = cls(camera, obs)
assert m.camera is camera
assert m.observation is obs
@pytest.mark.parametrize('cls', projection_types)
def test_rscamera_measurements_weights(cls, small_sfm):
views, trajectory, camera = small_sfm
lm = np.random.choice(list({obs.landmark for v in views for obs in v.observations}))
obs = np.random.choice(lm.observations[1:])
assert not obs.is_reference
huber_c = 2.
m0 = cls(camera, obs, huber_c)
assert m0.weight == 1.
e0 = m0.error(trajectory)
for w in [1, 2, 0.43]:
m = cls(camera, obs, huber_c, w)
e = m.error(trajectory)
np.testing.assert_equal(e, e0 * w)
def test_camera_errors_size(trajectory, camera_measurements):
for m in camera_measurements:
e = m.error(trajectory)
if issubclass(type(m), LiftingRsCameraMeasurement):
assert e.size == 3
else:
assert e.size == 2
def test_position_measurements(trajectory_example):
trajectory, example_data = trajectory_example
expected_positions = example_data.position
for t, x in expected_positions:
m = PositionMeasurement(t, x)
xhat = m.measure(trajectory)
np.testing.assert_almost_equal(xhat, x)
def test_gyroscope_measurements(trajectory, imu):
times = np.linspace(*safe_time_span(trajectory, 3.0), num=10, endpoint=False)
def true_gyro(t):
q = trajectory.orientation(t)
R = quat_to_rotation_matrix(q)
w_world = trajectory.angular_velocity(t)
w_body = R.T @ w_world
return w_body
for t in times:
w = true_gyro(t)
m = GyroscopeMeasurement(imu, t, w)
w_hat = m.measure(trajectory)
try:
w_hat -= imu.gyroscope_bias
except AttributeError:
pass # No bias to remove
np.testing.assert_almost_equal(w_hat, w)
def test_accelerometer_measurements(trajectory, imu):
times = np.linspace(*safe_time_span(trajectory, 3.0), num=10, endpoint=False)
from kontiki.trajectories import UniformSE3SplineTrajectory
if type(trajectory) == UniformSE3SplineTrajectory:
pytest.xfail("SE3 fails because second order derivative is not the same as body acceleration")
def true_acc(t):
q = trajectory.orientation(t)
acc_world = trajectory.acceleration(t)
R = quat_to_rotation_matrix(q)
gravity = np.array([0, 0, 9.80665])
acc = R.T @ (acc_world - gravity)
return acc
# Currently fails for ConstantBiasImu since we don't take bias into account
for t in times:
acc = true_acc(t)
m = AccelerometerMeasurement(imu, t, acc)
acc_hat = m.measure(trajectory)
try:
acc_hat -= imu.accelerometer_bias
except AttributeError:
pass # No bias to remove
np.testing.assert_almost_equal(acc_hat, acc)
@pytest.mark.parametrize('mcls', imu_measurement_types)
def test_imu_measurement_same_imu(mcls, imu):
t = 1.0
m = mcls(imu, t, np.random.uniform(-1, 1, size=3))
print(imu, m.imu)
assert m.imu is imu
@pytest.mark.parametrize('mcls', imu_measurement_types)
def test_imu_measurement_time_offset(mcls, imu, split_trajectory):
t = safe_time(split_trajectory)
d = np.random.uniform(-imu.max_time_offset, imu.max_time_offset)
v = np.random.uniform(-1, 1, size=3)
m1 = mcls(imu, t, v)
y1 = m1.measure(split_trajectory)
imu.time_offset = d
m2 = mcls(imu, t - d, v)
y2 = m2.measure(split_trajectory)
np.testing.assert_equal(y1, y2)
@pytest.mark.parametrize('mcls', projection_types)
def test_camera_measurement_time_offset(mcls, camera, split_trajectory):
t1, t2 = safe_time_span(split_trajectory, 1)
t1 += camera.max_time_offset
t2 -= camera.max_time_offset
d = np.random.uniform(-camera.max_time_offset, camera.max_time_offset)
lm = Landmark()
lm.inverse_depth = np.random.uniform(0.01, 1)
views = [View(i, t) for i, t in enumerate([t1, t1+0.23])]
ref, obs = [v.create_observation(lm, np.random.uniform(100, 900, size=2)) for v in views]
lm.reference = ref
m1 = mcls(camera, obs)
y1 = m1.measure(split_trajectory)
new_lm = Landmark()
new_lm.inverse_depth = lm.inverse_depth
new_views = [View(v.frame_nr, v.t0 - d) for v in views]
new_ref, new_obs = [v.create_observation(new_lm, o.uv) for v, o in zip(new_views, [ref, obs])]
new_lm.reference = new_ref
camera.time_offset = d
m2 = mcls(camera, new_obs)
y2 = m2.measure(split_trajectory)
np.testing.assert_almost_equal(y1, y2)
|
en
| 0.844585
|
# NOTE: If this test fails, first try to clear the pytest cache using # python3 -m pytest --cache-clear # Take the first landmark # Make sure the measurements agree # Newton method should handle noise in the projection # Beware that this doesn't seem to catch faulty derivatives for the camera # NOTE: If this test fails, first try to clear the pytest cache using # python3 -m pytest --cache-clear # Take the first landmark # The projection error should be below half a row, because that is the threshold we use to terminate the Newton algorithm # No bias to remove # Currently fails for ConstantBiasImu since we don't take bias into account # No bias to remove
| 2.095336
| 2
|