file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
SpatialGP.py | import os
import time
import numpy as np
import collections
import scipy
import scipy.sparse
import pyublas
import hashlib
from sigvisa.gpr import munge, kernels, evaluate, learn, distributions, plot
from sigvisa.gpr.gp import GaussianProcess
from sigvisa.gpr.util import marshal_fn, unmarshal_fn
from sigvisa.models.spatial_regression.baseline_models import ParamModel
from sigvisa.source.event import Event
from sigvisa.utils.cover_tree import VectorTree, MatrixTree
start_params_dad_log = {"coda_decay": [.022, .0187, 1.00, .14, .1],
"amp_transfer": [1.1, 3.4, 9.5, 0.1, .31],
"peak_offset": [2.7, 3.4, 2, .7, 0.1]
}
start_params_lld = {"coda_decay": [.022, .0187, 50.00, 1.0],
"amp_transfer": [1.1, 3.4, 100.00, 1.0],
"peak_offset": [2.7, 3.4, 50.00, 1.0]
}
start_params_composite = {"coda_decay": [.022, .01, 1.0, .01, 100.0, .01, 3.0, .01, 100.0],
"amp_transfer": [1.1, 3.0, 5.0, 3.0, 100.0, 3.0, 3.0, 3.0, 100.0],
"peak_offset": [2.7, 3.0, 5.0, 3.0, 100.0, 3.0, 3.0, 3.0, 100.0],
}
start_params = {"dad_log": start_params_dad_log,
"lld": start_params_lld,
"composite": start_params_composite
}
X_LON, X_LAT, X_DEPTH, X_DIST, X_AZI = range(5)
def dist_azi_depth_distfn_log(lldda1, lldda2, params):
import sigvisa.utils.geog as geog
import numpy as np
azi_scale = params[0]
depth_scale = params[1]
dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1)
avg_dist = (lldda1[3] + lldda2[3]) / 2
azi = geog.degdiff(lldda1[4], lldda2[4]) * np.log(avg_dist)
depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1)
r = np.sqrt(dist ** 2 + (azi_scale * azi) ** 2 + (depth_scale * depth) ** 2)
return r
def dist_azi_depth_distfn_deriv_log(i, lldda1, lldda2, params):
import numpy as np
import sigvisa.utils.geog as geog
azi_scale = params[0]
depth_scale = params[1]
dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1)
avg_dist = (lldda1[3] + lldda2[3]) / 2
azi = geog.degdiff(lldda1[4], lldda2[4]) * np.log(avg_dist + 1)
depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1)
r = np.sqrt(dist ** 2 + (azi_scale * azi) ** 2 + (depth_scale * depth) ** 2)
if i == 0: # deriv wrt azi_scale
deriv = azi_scale * azi ** 2 / r if r != 0 else 0
elif i == 1: # deriv wrt depth_scale
deriv = depth_scale * depth ** 2 / r if r != 0 else 0
else:
raise Exception("unknown parameter number %d" % i)
return deriv
def lon_lat_depth_distfn(lldda1, lldda2, params=None):
import sigvisa.utils.geog as geog
import numpy as np
ll = geog.dist_km(tuple(lldda1[0:2]), tuple(lldda2[0:2]))
depth = ( lldda1[2] - lldda2[2] ) * params[0]
r = np.sqrt(ll ** 2 + depth ** 2)
return r
def lon_lat_depth_distfn_deriv(i, lldda1, lldda2, params=None):
import sigvisa.utils.geog as geog
import numpy as np
assert (i == 0)
ll = geog.dist_km(tuple(lldda1[0:2]), tuple(lldda2[0:2]))
depth = ( lldda1[2] - lldda2[2] ) * params[0]
r = np.sqrt(ll ** 2 + depth ** 2)
return ( params[0] * ( lldda1[2] - lldda2[2] )**2 ) / r if r != 0 else 0.0
def logdist_diff_distfn(lldda1, lldda2, params=None):
import numpy as np
dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1)
return dist
def azi_diff_distfn(lldda1, lldda2, params=None):
import sigvisa.utils.geog as geog
import numpy as np
azi = np.abs ( geog.degdiff(lldda1[4], lldda2[4]) )
return azi
def logdepth_diff_distfn(lldda1, lldda2, params=None):
import numpy as np
depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1)
return depth
X_LON, X_LAT, X_DEPTH, X_DIST, X_AZI = range(5)
def spatial_kernel_from_str(kernel_str, target=None, params=None):
params = params if params is not None else start_params[kernel_str][target]
priors = [None,] * len(params) # TODO: use real priors
if kernel_str == "dad_log":
k = kernels.setup_kernel(name='distfn',
params = params,
extra=[dist_azi_depth_distfn_log, dist_azi_depth_distfn_deriv_log],
)
elif kernel_str == "lld":
noise_kernel = kernels.DiagonalKernel(params=params[0:1], priors = priors[0:1])
local_kernel = kernels.DistFNKernel(params=params[1:4], priors=priors[1:4],
distfn = lon_lat_depth_distfn, deriv=lon_lat_depth_distfn_deriv)
k = noise_kernel + local_kernel
elif kernel_str == "composite":
# assume we are passed the following params/priors:
# 0 : sigma2_n -- noise variance
# 1 : sigma2_f_dist -- function variance wrt dist_diff
# 2 : w_dist -- length scale for dist_diff
# 3 : sigma2_f_azi -- function variance wrt azi_diff
# 4 : w_azi -- length scale for azi_diff
# 5 : sigma2_f_depth -- function variance wrt depth_diff
# 6 : w_depth -- length scale for depth_diff
# 7 : sigma2_f_local -- function variance wrt local_dist
# 8 : w_local -- length scale for local_dist
noise_kernel = kernels.DiagonalKernel(params=params[0:1], priors = priors[0:1])
distdiff_kernel = kernels.DistFNKernel(params=params[1:3], priors=priors[1:3],
distfn = logdist_diff_distfn, deriv=None)
azidiff_kernel = kernels.DistFNKernel(params=params[3:5], priors=priors[3:5],
distfn = azi_diff_distfn, deriv=None)
depthdiff_kernel = kernels.DistFNKernel(params=params[5:7], priors=priors[5:7],
distfn = logdepth_diff_distfn, deriv=None)
local_kernel = kernels.DistFNKernel(params=params[7:10], priors=priors[7:10],
distfn = lon_lat_depth_distfn, deriv=lon_lat_depth_distfn_deriv)
k = noise_kernel + distdiff_kernel + azidiff_kernel + depthdiff_kernel + local_kernel
return k
"""
def spatial_kernel_from_str(target=None, params=None):
params = params if params is not None else start_params_lld[target]
return params
"""
class SpatialGP(GaussianProcess, ParamModel):
def init_hyperparams(self, hyperparams):
(noise_var, signal_var, ll_scale, d_scale) = hyperparams
self.noise_var = noise_var
self.dfn_params = np.array((ll_scale, d_scale), dtype=np.float)
self.wfn_params = np.array((signal_var,), copy=True, dtype=np.float)
def build_kernel_matrix(self, X, hyperparams):
self.init_hyperparams(hyperparams)
vt = VectorTree(X[0:1,:], 1, "lld", self.dfn_params)
K = vt.kernel_matrix(X, X, "se", self.wfn_params, False) + self.noise_var * np.eye(len(X), dtype=np.float64)
K += np.eye(K.shape[0], dtype=np.float64) * 1e-8 # try to avoid losing
# positive-definiteness
# to numeric issues
return K
def invert_kernel_matrix(self, K):
L = None
alpha = None
try:
L = scipy.linalg.cholesky(K, lower=True)
alpha = scipy.linalg.cho_solve((L, True), self.y)
Kinv = scipy.linalg.inv(K)
except np.linalg.linalg.LinAlgError:
#u,v = np.linalg.eig(K)
#print K, u
#import pdb; pdb.set_trace()
raise
except ValueError:
raise
return alpha, L, Kinv
def build_parametric_model(self, alpha, Kinv_sp, H, b, B):
# notation follows section 2.7 of Rasmussen and Williams
Binv = scipy.linalg.inv(B)
tmp = np.dot(H, alpha) + np.dot(Binv, b) # H * K^-1 * y + B^-1 * b
HKinv = H * Kinv_sp
M_inv = Binv + np.dot(HKinv, H.T) # here M = (inv(B) +
# H*K^-1*H.T)^-1 is the
# posterior covariance
# matrix on the params.
c = scipy.linalg.cholesky(M_inv, lower=True) # c = sqrt(inv(B) + H*K^-1*H.T)
beta_bar = scipy.linalg.cho_solve((c, True), tmp)
invc = scipy.linalg.inv(c)
return c, beta_bar, invc, HKinv
def sparsify(self, M):
return scipy.sparse.csr_matrix(M * (np.abs(M) > self.sparse_threshold))
def sort_events(self, X, y):
combined = np.hstack([X, np.reshape(y, (-1, 1))])
combined_sorted = np.array(sorted(combined, key = lambda x: x[0]), dtype=float)
X_sorted = np.array(combined_sorted[:, :-1], copy=True, dtype=float)
y_sorted = combined_sorted[:, -1].flatten()
return X_sorted, y_sorted
def __init__(self, X=None, y=None,
fname=None, basisfns=None,
hyperparams=None,
param_mean=None, param_cov=None,
compute_ll=False,
compute_grad=False,
sparse_threshold=1e-20,
sta = None,
sort_events=True):
try:
ParamModel.__init__(self, sta=sta)
except KeyError:
pass
if fname is not None:
self.load_trained_model(fname)
else:
if sort_events:
X, y = self.sort_events(X, y) # arrange events by
# lon/lat, as a
# heuristic to expose
# block structure in the
# kernel matrix
self.hyperparams = np.array(hyperparams)
self.sparse_threshold = sparse_threshold
self.X = X
self.n = X.shape[0]
self.basisfns = basisfns
mu, self.y, H = self.setup_mean("parametric", X, y)
# train model
#t0 = time.time()
K = self.build_kernel_matrix(self.X, hyperparams)
#t1 = time.time()
self.alpha, L, Kinv = self.invert_kernel_matrix(K)
Kinv_tri = 2 * np.tril(Kinv, k=0) - np.diag(np.diag(Kinv))
#t2 = time.time()
self.Kinv_sp = self.sparsify(Kinv)
self.Kinv_sp_tri = self.sparsify(Kinv_tri)
#t3 = time.time()
self.c,self.beta_bar, self.invc, self.HKinv = self.build_parametric_model(self.alpha,
self.Kinv_sp, H,
b=param_mean,
B=param_cov)
#t4 = time.time()
r = self.y - np.dot(H.T, self.beta_bar)
self.alpha_r = scipy.linalg.cho_solve((L, True), r)
#t5 = time.time()
self.build_point_tree(HKinv = self.HKinv, Kinv = Kinv_tri, Kinv_sp=self.Kinv_sp_tri, alpha_r = self.alpha_r)
#t6 = time.time()
# precompute training set log likelihood, so we don't need
# to keep L around.
z = np.dot(H.T, param_mean) - self.y
B = param_cov
if compute_ll:
self._compute_marginal_likelihood(L=L, z=z, B=B, H=H, K=K, Kinv_sp=self.Kinv_sp_tri)
else:
self.ll = -np.inf
#t7 = time.time()
if compute_grad:
self.ll_grad = self._log_likelihood_gradient(z=z, K=K, H=H, B=B, Kinv=Kinv)
np.save('spatialK.npy', K)
np.save('spatialKinv.npy', Kinv)
#t8 = time.time()
"""
print t1-t0
print t2-t1
print t3-t2
print t4-t3
print t5-t4
print t6-t5
print t7-t6
print t8-t7
"""
def build_point_tree(self, HKinv, Kinv, Kinv_sp, alpha_r):
|
def predict(self, cond, eps=1e-8):
X1 = self.standardize_input_array(cond).astype(np.float)
gp_pred = np.array([self.predict_tree.weighted_sum(0, np.reshape(x, (1,-1)), eps, "se", self.wfn_params) for x in X1])
H = self.get_data_features(X1)
mean_pred = np.reshape(np.dot(H.T, self.beta_bar), gp_pred.shape)
gp_pred += mean_pred
if len(gp_pred) == 1:
gp_pred = gp_pred[0]
return gp_pred
def kernel(self, X1, X2, identical=False):
K = self.predict_tree.kernel_matrix(X1, X2, "se", self.wfn_params, False)
if identical:
K += self.noise_var * np.eye(K.shape[0])
return K
def covariance(self, cond, include_obs=False, parametric_only=False, pad=1e-8):
"""
Compute the posterior covariance matrix at a set of points given by the rows of X1.
Default is to compute the covariance of f, the latent function values. If obs_covar
is True, we instead compute the covariance of y, the observed values.
By default, we add a tiny bit of padding to the diagonal to counteract any potential
loss of positive definiteness from numerical issues. Setting pad=0 disables this.
"""
X1 = self.standardize_input_array(cond)
m = X1.shape[0]
Kstar = self.get_query_K(X1)
if not parametric_only:
tmp = self.Kinv_sp_tri * Kstar
qf = np.dot(Kstar.T, tmp)
k = self.kernel(X1,X1, identical=include_obs)
gp_cov = k - qf
else:
gp_cov = np.zeros((m,m))
R = self.query_R
tmp = np.dot(self.invc, R)
mean_cov = np.dot(tmp.T, tmp)
gp_cov += mean_cov
gp_cov += pad * np.eye(gp_cov.shape[0])
return gp_cov
def covariance_double_tree(self, cond, include_obs=False, parametric_only=False, pad=1e-8, eps=1e-8):
X1 = self.standardize_input_array(cond)
m = X1.shape[0]
d = len(self.basisfns)
t0 = time.time()
if not parametric_only:
k = self.kernel(X1, X1, identical=include_obs)
qf = self.double_tree.quadratic_form(X1, eps, "se", self.wfn_params)
gp_cov = k - qf
else:
gp_cov = np.zeros((m,m))
t1 = time.time()
H = np.array([[f(x) for x in X1] for f in self.basisfns], dtype=np.float64)
HKinvKstar = np.zeros((d, m))
for i in range(d):
for j in range(m):
HKinvKstar[i,j] = self.cov_tree.weighted_sum(i, X1[j:j+1,:], eps, "se", self.wfn_params)
R = H - HKinvKstar
v = np.dot(self.invc, R)
mc = np.dot(v.T, v)
gp_cov += mc
t2 = time.time()
self.nptime = (t1-t0)
self.ptime = (t2-t1)
gp_cov += pad * np.eye(m)
return gp_cov
def variance(self, cond, **kwargs):
X1 = self.standardize_input_array(cond)
result = GaussianProcess.variance(self, X1, **kwargs)
if len(result) == 1:
result = result[0]
return result
def sample(self, cond):
X1 = self.standardize_input_array(cond)
result = GaussianProcess.sample(self, X1)
if len(result) == 1:
result = result[0]
return result
def log_p(self, x, cond):
X1 = self.standardize_input_array(cond)
x = x if isinstance(x, collections.Iterable) else (x,)
result = GaussianProcess.posterior_log_likelihood(self, X1, x)
return result
def pack_npz(self):
d = dict()
d['c'] = self.c
d['beta_bar'] = self.beta_bar
d['invc'] = self.invc
d['HKinv'] = self.HKinv
d['basisfns'] = np.array([marshal_fn(f) for f in self.basisfns], dtype=object)
d['X'] = self.X,
d['y'] =self.y,
d['alpha'] =self.alpha,
d['hyperparams'] = self.hyperparams
d['Kinv_sp_tri'] =self.Kinv_sp_tri,
#d['Kinv_sp'] =self.Kinv_sp,
d['sparse_threshold'] =self.sparse_threshold,
d['ll'] =self.ll,
d['alpha_r'] = self.alpha_r
return d
def save_trained_model(self, filename):
"""
Serialize the model to a file.
"""
d = self.pack_npz()
with open(filename, 'wb') as f:
np.savez(f, base_str=super(SpatialGP, self).__repr_base_params__(), **d)
def unpack_npz(self, npzfile):
self.X = npzfile['X'][0]
self.y = npzfile['y'][0]
self.alpha = npzfile['alpha'][0]
self.hyperparams = npzfile['hyperparams']
self.init_hyperparams(self.hyperparams)
self.Kinv_sp_tri = npzfile['Kinv_sp_tri'][0]
#self.Kinv_sp = npzfile['Kinv_sp'][0]
self.sparse_threshold = npzfile['sparse_threshold'][0]
self.ll = npzfile['ll'][0]
self.basisfns = npzfile['basisfns']
self.basisfns = [unmarshal_fn(code) for code in self.basisfns]
self.beta_bar = npzfile['beta_bar']
self.c = npzfile['c']
self.invc = npzfile['invc']
self.HKinv = npzfile['HKinv']
self.alpha_r = npzfile['alpha_r']
def load_trained_model(self, filename):
npzfile = np.load(filename)
self.unpack_npz(npzfile)
super(SpatialGP, self).__unrepr_base_params__(str(npzfile['base_str']))
del npzfile.f
npzfile.close()
self.n = self.X.shape[0]
self.build_point_tree(HKinv=self.HKinv, Kinv=self.Kinv_sp_tri.todense(), Kinv_sp=self.Kinv_sp_tri, alpha_r=self.alpha_r)
def _compute_marginal_likelihood(self, L, z, B, H, K, Kinv_sp):
# here we follow eqn 2.43 in R&W
#
# let z = H.T*b - y, then we want
# .5 * z.T * (K + H.T * B * H)^-1 * z
# minus some other stuff (dealt with below).
# by the matrix inv lemma,
# (K + H.T * B * H)^-1
# = Kinv - Kinv*H.T*(Binv + H*Kinv*H.T)^-1*H*Kinv
# = Kinv - Kinv*H.T* invc.T * invc *H*Kinv
# = Kinv - (invc * HKinv)^T (invc * HKinv)
#
# so we have z.T * Kinv * z - z.T * (other thing) * z
# i.e.: term1 - term2
# in the notation of the code.
tmp1 = Kinv_sp * z
term1 = np.dot(z.T, tmp1)
tmp2 = np.dot(self.HKinv, z)
tmp3 = np.dot(self.invc, tmp2)
term2 = np.dot(tmp3.T, tmp3)
# following eqn 2.43 in R&W, we want to compute
# log det(K + H.T * B * H). using the matrix inversion
# lemma, we instead compute
# log det(K) + log det(B) + log det(B^-1 + H*K^-1*H.T)
# to compute log(det(K)), we use the trick that the
# determinant of a symmetric pos. def. matrix is the
# product of squares of the diagonal elements of the
# Cholesky factor
ld2_K = np.log(np.diag(L)).sum()
ld2 = np.log(np.diag(self.c)).sum() # det( B^-1 - H * K^-1 * H.T )
ld_B = np.log(np.linalg.det(B))
# eqn 2.43 in R&W, using the matrix inv lemma
self.ll = -.5 * (term1 - term2 + self.n * np.log(2*np.pi) + ld_B) - ld2_K - ld2
def _log_likelihood_gradient(self, z, K, H, B, Kinv):
"""
Gradient of the training set log likelihood with respect to the kernel hyperparams.
"""
nparams = 4
grad = np.zeros((nparams,))
#t0 = time.time()
tmp = np.dot(self.invc, self.HKinv)
#t1 = time.time()
K_HBH_inv = Kinv - np.dot(tmp.T, tmp)
#t2 = time.time()
alpha_z = np.dot(K_HBH_inv, z)
#t3 = time.time()
#print "gradient: %f %f %f" % (t1-t0, t2-t1, t3-t2)
for i in range(nparams):
tA = time.time()
if (i == 0):
dKdi = np.eye(self.n)
else:
dKdi = self.predict_tree.kernel_deriv_wrt_i(self.X, self.X, "se", self.wfn_params, i-1)
dlldi = .5 * np.dot(alpha_z.T, np.dot(dKdi, alpha_z))
tB = time.time()
# here we use the fact:
# trace(AB) = sum_{ij} A_ij * B_ij
dlldi -= .5 * np.sum(np.sum(K_HBH_inv.T * dKdi))
grad[i] = dlldi
tC = time.time()
print " %d: %f %f" % (i, tB-tA, tC-tB)
return grad
def spatialgp_nll_ngrad(**kwargs):
"""
Get both the negative log-likelihood and its gradient
simultaneously (more efficient than doing it separately since we
only create one new GP object, which only constructs the kernel
matrix once, etc.).
"""
try:
# print "optimizing params", kernel_params
gp = SpatialGP(compute_ll=True, compute_grad=True, **kwargs)
nll = -1 * gp.ll
ngrad = -1 * gp.ll_grad
except np.linalg.linalg.LinAlgError as e:
print "warning: lin alg error (%s) in likelihood computation, returning likelihood -inf" % str(e)
nll = np.float("inf")
ngrad = None
except ValueError as e:
print "warning: value error (%s) in likelihood computation, returning likelihood -inf" % str(e)
nll = np.float("inf")
ngrad = None
return nll, ngrad
| self.predict_tree = VectorTree(self.X, 1, "lld", self.dfn_params)
self.predict_tree.set_v(0, alpha_r.astype(np.float))
d = len(self.basisfns)
self.cov_tree = VectorTree(self.X, d, "lld", self.dfn_params)
HKinv = HKinv.astype(np.float)
for i in range(d):
self.cov_tree.set_v(i, HKinv[i, :])
nzr, nzc = Kinv_sp.nonzero()
self.double_tree = MatrixTree(self.X, nzr, nzc, "lld", self.dfn_params)
kkk = np.matrix(Kinv, copy=True, dtype=np.float64)
self.double_tree.set_m(kkk) | identifier_body |
SpatialGP.py | import os
import time
import numpy as np
import collections
import scipy
import scipy.sparse
import pyublas
import hashlib
from sigvisa.gpr import munge, kernels, evaluate, learn, distributions, plot
from sigvisa.gpr.gp import GaussianProcess
from sigvisa.gpr.util import marshal_fn, unmarshal_fn
from sigvisa.models.spatial_regression.baseline_models import ParamModel
from sigvisa.source.event import Event
from sigvisa.utils.cover_tree import VectorTree, MatrixTree
start_params_dad_log = {"coda_decay": [.022, .0187, 1.00, .14, .1],
"amp_transfer": [1.1, 3.4, 9.5, 0.1, .31],
"peak_offset": [2.7, 3.4, 2, .7, 0.1]
}
start_params_lld = {"coda_decay": [.022, .0187, 50.00, 1.0],
"amp_transfer": [1.1, 3.4, 100.00, 1.0],
"peak_offset": [2.7, 3.4, 50.00, 1.0]
}
start_params_composite = {"coda_decay": [.022, .01, 1.0, .01, 100.0, .01, 3.0, .01, 100.0],
"amp_transfer": [1.1, 3.0, 5.0, 3.0, 100.0, 3.0, 3.0, 3.0, 100.0],
"peak_offset": [2.7, 3.0, 5.0, 3.0, 100.0, 3.0, 3.0, 3.0, 100.0],
}
start_params = {"dad_log": start_params_dad_log,
"lld": start_params_lld,
"composite": start_params_composite
}
X_LON, X_LAT, X_DEPTH, X_DIST, X_AZI = range(5)
def dist_azi_depth_distfn_log(lldda1, lldda2, params):
import sigvisa.utils.geog as geog
import numpy as np
azi_scale = params[0]
depth_scale = params[1]
dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1)
avg_dist = (lldda1[3] + lldda2[3]) / 2
azi = geog.degdiff(lldda1[4], lldda2[4]) * np.log(avg_dist)
depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1)
r = np.sqrt(dist ** 2 + (azi_scale * azi) ** 2 + (depth_scale * depth) ** 2)
return r
def dist_azi_depth_distfn_deriv_log(i, lldda1, lldda2, params):
import numpy as np
import sigvisa.utils.geog as geog
azi_scale = params[0]
depth_scale = params[1]
dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1)
avg_dist = (lldda1[3] + lldda2[3]) / 2
azi = geog.degdiff(lldda1[4], lldda2[4]) * np.log(avg_dist + 1)
depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1)
r = np.sqrt(dist ** 2 + (azi_scale * azi) ** 2 + (depth_scale * depth) ** 2)
if i == 0: # deriv wrt azi_scale
deriv = azi_scale * azi ** 2 / r if r != 0 else 0
elif i == 1: # deriv wrt depth_scale
deriv = depth_scale * depth ** 2 / r if r != 0 else 0
else:
raise Exception("unknown parameter number %d" % i)
return deriv
def | (lldda1, lldda2, params=None):
import sigvisa.utils.geog as geog
import numpy as np
ll = geog.dist_km(tuple(lldda1[0:2]), tuple(lldda2[0:2]))
depth = ( lldda1[2] - lldda2[2] ) * params[0]
r = np.sqrt(ll ** 2 + depth ** 2)
return r
def lon_lat_depth_distfn_deriv(i, lldda1, lldda2, params=None):
import sigvisa.utils.geog as geog
import numpy as np
assert (i == 0)
ll = geog.dist_km(tuple(lldda1[0:2]), tuple(lldda2[0:2]))
depth = ( lldda1[2] - lldda2[2] ) * params[0]
r = np.sqrt(ll ** 2 + depth ** 2)
return ( params[0] * ( lldda1[2] - lldda2[2] )**2 ) / r if r != 0 else 0.0
def logdist_diff_distfn(lldda1, lldda2, params=None):
import numpy as np
dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1)
return dist
def azi_diff_distfn(lldda1, lldda2, params=None):
import sigvisa.utils.geog as geog
import numpy as np
azi = np.abs ( geog.degdiff(lldda1[4], lldda2[4]) )
return azi
def logdepth_diff_distfn(lldda1, lldda2, params=None):
import numpy as np
depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1)
return depth
X_LON, X_LAT, X_DEPTH, X_DIST, X_AZI = range(5)
def spatial_kernel_from_str(kernel_str, target=None, params=None):
params = params if params is not None else start_params[kernel_str][target]
priors = [None,] * len(params) # TODO: use real priors
if kernel_str == "dad_log":
k = kernels.setup_kernel(name='distfn',
params = params,
extra=[dist_azi_depth_distfn_log, dist_azi_depth_distfn_deriv_log],
)
elif kernel_str == "lld":
noise_kernel = kernels.DiagonalKernel(params=params[0:1], priors = priors[0:1])
local_kernel = kernels.DistFNKernel(params=params[1:4], priors=priors[1:4],
distfn = lon_lat_depth_distfn, deriv=lon_lat_depth_distfn_deriv)
k = noise_kernel + local_kernel
elif kernel_str == "composite":
# assume we are passed the following params/priors:
# 0 : sigma2_n -- noise variance
# 1 : sigma2_f_dist -- function variance wrt dist_diff
# 2 : w_dist -- length scale for dist_diff
# 3 : sigma2_f_azi -- function variance wrt azi_diff
# 4 : w_azi -- length scale for azi_diff
# 5 : sigma2_f_depth -- function variance wrt depth_diff
# 6 : w_depth -- length scale for depth_diff
# 7 : sigma2_f_local -- function variance wrt local_dist
# 8 : w_local -- length scale for local_dist
noise_kernel = kernels.DiagonalKernel(params=params[0:1], priors = priors[0:1])
distdiff_kernel = kernels.DistFNKernel(params=params[1:3], priors=priors[1:3],
distfn = logdist_diff_distfn, deriv=None)
azidiff_kernel = kernels.DistFNKernel(params=params[3:5], priors=priors[3:5],
distfn = azi_diff_distfn, deriv=None)
depthdiff_kernel = kernels.DistFNKernel(params=params[5:7], priors=priors[5:7],
distfn = logdepth_diff_distfn, deriv=None)
local_kernel = kernels.DistFNKernel(params=params[7:10], priors=priors[7:10],
distfn = lon_lat_depth_distfn, deriv=lon_lat_depth_distfn_deriv)
k = noise_kernel + distdiff_kernel + azidiff_kernel + depthdiff_kernel + local_kernel
return k
"""
def spatial_kernel_from_str(target=None, params=None):
params = params if params is not None else start_params_lld[target]
return params
"""
class SpatialGP(GaussianProcess, ParamModel):
def init_hyperparams(self, hyperparams):
(noise_var, signal_var, ll_scale, d_scale) = hyperparams
self.noise_var = noise_var
self.dfn_params = np.array((ll_scale, d_scale), dtype=np.float)
self.wfn_params = np.array((signal_var,), copy=True, dtype=np.float)
def build_kernel_matrix(self, X, hyperparams):
self.init_hyperparams(hyperparams)
vt = VectorTree(X[0:1,:], 1, "lld", self.dfn_params)
K = vt.kernel_matrix(X, X, "se", self.wfn_params, False) + self.noise_var * np.eye(len(X), dtype=np.float64)
K += np.eye(K.shape[0], dtype=np.float64) * 1e-8 # try to avoid losing
# positive-definiteness
# to numeric issues
return K
def invert_kernel_matrix(self, K):
L = None
alpha = None
try:
L = scipy.linalg.cholesky(K, lower=True)
alpha = scipy.linalg.cho_solve((L, True), self.y)
Kinv = scipy.linalg.inv(K)
except np.linalg.linalg.LinAlgError:
#u,v = np.linalg.eig(K)
#print K, u
#import pdb; pdb.set_trace()
raise
except ValueError:
raise
return alpha, L, Kinv
def build_parametric_model(self, alpha, Kinv_sp, H, b, B):
# notation follows section 2.7 of Rasmussen and Williams
Binv = scipy.linalg.inv(B)
tmp = np.dot(H, alpha) + np.dot(Binv, b) # H * K^-1 * y + B^-1 * b
HKinv = H * Kinv_sp
M_inv = Binv + np.dot(HKinv, H.T) # here M = (inv(B) +
# H*K^-1*H.T)^-1 is the
# posterior covariance
# matrix on the params.
c = scipy.linalg.cholesky(M_inv, lower=True) # c = sqrt(inv(B) + H*K^-1*H.T)
beta_bar = scipy.linalg.cho_solve((c, True), tmp)
invc = scipy.linalg.inv(c)
return c, beta_bar, invc, HKinv
def sparsify(self, M):
return scipy.sparse.csr_matrix(M * (np.abs(M) > self.sparse_threshold))
def sort_events(self, X, y):
combined = np.hstack([X, np.reshape(y, (-1, 1))])
combined_sorted = np.array(sorted(combined, key = lambda x: x[0]), dtype=float)
X_sorted = np.array(combined_sorted[:, :-1], copy=True, dtype=float)
y_sorted = combined_sorted[:, -1].flatten()
return X_sorted, y_sorted
def __init__(self, X=None, y=None,
fname=None, basisfns=None,
hyperparams=None,
param_mean=None, param_cov=None,
compute_ll=False,
compute_grad=False,
sparse_threshold=1e-20,
sta = None,
sort_events=True):
try:
ParamModel.__init__(self, sta=sta)
except KeyError:
pass
if fname is not None:
self.load_trained_model(fname)
else:
if sort_events:
X, y = self.sort_events(X, y) # arrange events by
# lon/lat, as a
# heuristic to expose
# block structure in the
# kernel matrix
self.hyperparams = np.array(hyperparams)
self.sparse_threshold = sparse_threshold
self.X = X
self.n = X.shape[0]
self.basisfns = basisfns
mu, self.y, H = self.setup_mean("parametric", X, y)
# train model
#t0 = time.time()
K = self.build_kernel_matrix(self.X, hyperparams)
#t1 = time.time()
self.alpha, L, Kinv = self.invert_kernel_matrix(K)
Kinv_tri = 2 * np.tril(Kinv, k=0) - np.diag(np.diag(Kinv))
#t2 = time.time()
self.Kinv_sp = self.sparsify(Kinv)
self.Kinv_sp_tri = self.sparsify(Kinv_tri)
#t3 = time.time()
self.c,self.beta_bar, self.invc, self.HKinv = self.build_parametric_model(self.alpha,
self.Kinv_sp, H,
b=param_mean,
B=param_cov)
#t4 = time.time()
r = self.y - np.dot(H.T, self.beta_bar)
self.alpha_r = scipy.linalg.cho_solve((L, True), r)
#t5 = time.time()
self.build_point_tree(HKinv = self.HKinv, Kinv = Kinv_tri, Kinv_sp=self.Kinv_sp_tri, alpha_r = self.alpha_r)
#t6 = time.time()
# precompute training set log likelihood, so we don't need
# to keep L around.
z = np.dot(H.T, param_mean) - self.y
B = param_cov
if compute_ll:
self._compute_marginal_likelihood(L=L, z=z, B=B, H=H, K=K, Kinv_sp=self.Kinv_sp_tri)
else:
self.ll = -np.inf
#t7 = time.time()
if compute_grad:
self.ll_grad = self._log_likelihood_gradient(z=z, K=K, H=H, B=B, Kinv=Kinv)
np.save('spatialK.npy', K)
np.save('spatialKinv.npy', Kinv)
#t8 = time.time()
"""
print t1-t0
print t2-t1
print t3-t2
print t4-t3
print t5-t4
print t6-t5
print t7-t6
print t8-t7
"""
def build_point_tree(self, HKinv, Kinv, Kinv_sp, alpha_r):
self.predict_tree = VectorTree(self.X, 1, "lld", self.dfn_params)
self.predict_tree.set_v(0, alpha_r.astype(np.float))
d = len(self.basisfns)
self.cov_tree = VectorTree(self.X, d, "lld", self.dfn_params)
HKinv = HKinv.astype(np.float)
for i in range(d):
self.cov_tree.set_v(i, HKinv[i, :])
nzr, nzc = Kinv_sp.nonzero()
self.double_tree = MatrixTree(self.X, nzr, nzc, "lld", self.dfn_params)
kkk = np.matrix(Kinv, copy=True, dtype=np.float64)
self.double_tree.set_m(kkk)
def predict(self, cond, eps=1e-8):
X1 = self.standardize_input_array(cond).astype(np.float)
gp_pred = np.array([self.predict_tree.weighted_sum(0, np.reshape(x, (1,-1)), eps, "se", self.wfn_params) for x in X1])
H = self.get_data_features(X1)
mean_pred = np.reshape(np.dot(H.T, self.beta_bar), gp_pred.shape)
gp_pred += mean_pred
if len(gp_pred) == 1:
gp_pred = gp_pred[0]
return gp_pred
def kernel(self, X1, X2, identical=False):
K = self.predict_tree.kernel_matrix(X1, X2, "se", self.wfn_params, False)
if identical:
K += self.noise_var * np.eye(K.shape[0])
return K
def covariance(self, cond, include_obs=False, parametric_only=False, pad=1e-8):
"""
Compute the posterior covariance matrix at a set of points given by the rows of X1.
Default is to compute the covariance of f, the latent function values. If obs_covar
is True, we instead compute the covariance of y, the observed values.
By default, we add a tiny bit of padding to the diagonal to counteract any potential
loss of positive definiteness from numerical issues. Setting pad=0 disables this.
"""
X1 = self.standardize_input_array(cond)
m = X1.shape[0]
Kstar = self.get_query_K(X1)
if not parametric_only:
tmp = self.Kinv_sp_tri * Kstar
qf = np.dot(Kstar.T, tmp)
k = self.kernel(X1,X1, identical=include_obs)
gp_cov = k - qf
else:
gp_cov = np.zeros((m,m))
R = self.query_R
tmp = np.dot(self.invc, R)
mean_cov = np.dot(tmp.T, tmp)
gp_cov += mean_cov
gp_cov += pad * np.eye(gp_cov.shape[0])
return gp_cov
def covariance_double_tree(self, cond, include_obs=False, parametric_only=False, pad=1e-8, eps=1e-8):
X1 = self.standardize_input_array(cond)
m = X1.shape[0]
d = len(self.basisfns)
t0 = time.time()
if not parametric_only:
k = self.kernel(X1, X1, identical=include_obs)
qf = self.double_tree.quadratic_form(X1, eps, "se", self.wfn_params)
gp_cov = k - qf
else:
gp_cov = np.zeros((m,m))
t1 = time.time()
H = np.array([[f(x) for x in X1] for f in self.basisfns], dtype=np.float64)
HKinvKstar = np.zeros((d, m))
for i in range(d):
for j in range(m):
HKinvKstar[i,j] = self.cov_tree.weighted_sum(i, X1[j:j+1,:], eps, "se", self.wfn_params)
R = H - HKinvKstar
v = np.dot(self.invc, R)
mc = np.dot(v.T, v)
gp_cov += mc
t2 = time.time()
self.nptime = (t1-t0)
self.ptime = (t2-t1)
gp_cov += pad * np.eye(m)
return gp_cov
def variance(self, cond, **kwargs):
X1 = self.standardize_input_array(cond)
result = GaussianProcess.variance(self, X1, **kwargs)
if len(result) == 1:
result = result[0]
return result
def sample(self, cond):
X1 = self.standardize_input_array(cond)
result = GaussianProcess.sample(self, X1)
if len(result) == 1:
result = result[0]
return result
def log_p(self, x, cond):
X1 = self.standardize_input_array(cond)
x = x if isinstance(x, collections.Iterable) else (x,)
result = GaussianProcess.posterior_log_likelihood(self, X1, x)
return result
def pack_npz(self):
d = dict()
d['c'] = self.c
d['beta_bar'] = self.beta_bar
d['invc'] = self.invc
d['HKinv'] = self.HKinv
d['basisfns'] = np.array([marshal_fn(f) for f in self.basisfns], dtype=object)
d['X'] = self.X,
d['y'] =self.y,
d['alpha'] =self.alpha,
d['hyperparams'] = self.hyperparams
d['Kinv_sp_tri'] =self.Kinv_sp_tri,
#d['Kinv_sp'] =self.Kinv_sp,
d['sparse_threshold'] =self.sparse_threshold,
d['ll'] =self.ll,
d['alpha_r'] = self.alpha_r
return d
def save_trained_model(self, filename):
"""
Serialize the model to a file.
"""
d = self.pack_npz()
with open(filename, 'wb') as f:
np.savez(f, base_str=super(SpatialGP, self).__repr_base_params__(), **d)
def unpack_npz(self, npzfile):
self.X = npzfile['X'][0]
self.y = npzfile['y'][0]
self.alpha = npzfile['alpha'][0]
self.hyperparams = npzfile['hyperparams']
self.init_hyperparams(self.hyperparams)
self.Kinv_sp_tri = npzfile['Kinv_sp_tri'][0]
#self.Kinv_sp = npzfile['Kinv_sp'][0]
self.sparse_threshold = npzfile['sparse_threshold'][0]
self.ll = npzfile['ll'][0]
self.basisfns = npzfile['basisfns']
self.basisfns = [unmarshal_fn(code) for code in self.basisfns]
self.beta_bar = npzfile['beta_bar']
self.c = npzfile['c']
self.invc = npzfile['invc']
self.HKinv = npzfile['HKinv']
self.alpha_r = npzfile['alpha_r']
def load_trained_model(self, filename):
npzfile = np.load(filename)
self.unpack_npz(npzfile)
super(SpatialGP, self).__unrepr_base_params__(str(npzfile['base_str']))
del npzfile.f
npzfile.close()
self.n = self.X.shape[0]
self.build_point_tree(HKinv=self.HKinv, Kinv=self.Kinv_sp_tri.todense(), Kinv_sp=self.Kinv_sp_tri, alpha_r=self.alpha_r)
def _compute_marginal_likelihood(self, L, z, B, H, K, Kinv_sp):
# here we follow eqn 2.43 in R&W
#
# let z = H.T*b - y, then we want
# .5 * z.T * (K + H.T * B * H)^-1 * z
# minus some other stuff (dealt with below).
# by the matrix inv lemma,
# (K + H.T * B * H)^-1
# = Kinv - Kinv*H.T*(Binv + H*Kinv*H.T)^-1*H*Kinv
# = Kinv - Kinv*H.T* invc.T * invc *H*Kinv
# = Kinv - (invc * HKinv)^T (invc * HKinv)
#
# so we have z.T * Kinv * z - z.T * (other thing) * z
# i.e.: term1 - term2
# in the notation of the code.
tmp1 = Kinv_sp * z
term1 = np.dot(z.T, tmp1)
tmp2 = np.dot(self.HKinv, z)
tmp3 = np.dot(self.invc, tmp2)
term2 = np.dot(tmp3.T, tmp3)
# following eqn 2.43 in R&W, we want to compute
# log det(K + H.T * B * H). using the matrix inversion
# lemma, we instead compute
# log det(K) + log det(B) + log det(B^-1 + H*K^-1*H.T)
# to compute log(det(K)), we use the trick that the
# determinant of a symmetric pos. def. matrix is the
# product of squares of the diagonal elements of the
# Cholesky factor
ld2_K = np.log(np.diag(L)).sum()
ld2 = np.log(np.diag(self.c)).sum() # det( B^-1 - H * K^-1 * H.T )
ld_B = np.log(np.linalg.det(B))
# eqn 2.43 in R&W, using the matrix inv lemma
self.ll = -.5 * (term1 - term2 + self.n * np.log(2*np.pi) + ld_B) - ld2_K - ld2
def _log_likelihood_gradient(self, z, K, H, B, Kinv):
"""
Gradient of the training set log likelihood with respect to the kernel hyperparams.
"""
nparams = 4
grad = np.zeros((nparams,))
#t0 = time.time()
tmp = np.dot(self.invc, self.HKinv)
#t1 = time.time()
K_HBH_inv = Kinv - np.dot(tmp.T, tmp)
#t2 = time.time()
alpha_z = np.dot(K_HBH_inv, z)
#t3 = time.time()
#print "gradient: %f %f %f" % (t1-t0, t2-t1, t3-t2)
for i in range(nparams):
tA = time.time()
if (i == 0):
dKdi = np.eye(self.n)
else:
dKdi = self.predict_tree.kernel_deriv_wrt_i(self.X, self.X, "se", self.wfn_params, i-1)
dlldi = .5 * np.dot(alpha_z.T, np.dot(dKdi, alpha_z))
tB = time.time()
# here we use the fact:
# trace(AB) = sum_{ij} A_ij * B_ij
dlldi -= .5 * np.sum(np.sum(K_HBH_inv.T * dKdi))
grad[i] = dlldi
tC = time.time()
print " %d: %f %f" % (i, tB-tA, tC-tB)
return grad
def spatialgp_nll_ngrad(**kwargs):
"""
Get both the negative log-likelihood and its gradient
simultaneously (more efficient than doing it separately since we
only create one new GP object, which only constructs the kernel
matrix once, etc.).
"""
try:
# print "optimizing params", kernel_params
gp = SpatialGP(compute_ll=True, compute_grad=True, **kwargs)
nll = -1 * gp.ll
ngrad = -1 * gp.ll_grad
except np.linalg.linalg.LinAlgError as e:
print "warning: lin alg error (%s) in likelihood computation, returning likelihood -inf" % str(e)
nll = np.float("inf")
ngrad = None
except ValueError as e:
print "warning: value error (%s) in likelihood computation, returning likelihood -inf" % str(e)
nll = np.float("inf")
ngrad = None
return nll, ngrad
| lon_lat_depth_distfn | identifier_name |
SpatialGP.py | import os
import time
import numpy as np
import collections
import scipy
import scipy.sparse
import pyublas
import hashlib
from sigvisa.gpr import munge, kernels, evaluate, learn, distributions, plot
from sigvisa.gpr.gp import GaussianProcess
from sigvisa.gpr.util import marshal_fn, unmarshal_fn
from sigvisa.models.spatial_regression.baseline_models import ParamModel
from sigvisa.source.event import Event
from sigvisa.utils.cover_tree import VectorTree, MatrixTree
start_params_dad_log = {"coda_decay": [.022, .0187, 1.00, .14, .1],
"amp_transfer": [1.1, 3.4, 9.5, 0.1, .31],
"peak_offset": [2.7, 3.4, 2, .7, 0.1]
}
start_params_lld = {"coda_decay": [.022, .0187, 50.00, 1.0],
"amp_transfer": [1.1, 3.4, 100.00, 1.0],
"peak_offset": [2.7, 3.4, 50.00, 1.0]
}
start_params_composite = {"coda_decay": [.022, .01, 1.0, .01, 100.0, .01, 3.0, .01, 100.0],
"amp_transfer": [1.1, 3.0, 5.0, 3.0, 100.0, 3.0, 3.0, 3.0, 100.0],
"peak_offset": [2.7, 3.0, 5.0, 3.0, 100.0, 3.0, 3.0, 3.0, 100.0],
}
start_params = {"dad_log": start_params_dad_log,
"lld": start_params_lld,
"composite": start_params_composite
}
X_LON, X_LAT, X_DEPTH, X_DIST, X_AZI = range(5)
def dist_azi_depth_distfn_log(lldda1, lldda2, params):
import sigvisa.utils.geog as geog
import numpy as np
azi_scale = params[0]
depth_scale = params[1]
dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1)
avg_dist = (lldda1[3] + lldda2[3]) / 2
azi = geog.degdiff(lldda1[4], lldda2[4]) * np.log(avg_dist)
depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1)
r = np.sqrt(dist ** 2 + (azi_scale * azi) ** 2 + (depth_scale * depth) ** 2)
return r
def dist_azi_depth_distfn_deriv_log(i, lldda1, lldda2, params):
import numpy as np
import sigvisa.utils.geog as geog
azi_scale = params[0]
depth_scale = params[1]
dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1)
avg_dist = (lldda1[3] + lldda2[3]) / 2
azi = geog.degdiff(lldda1[4], lldda2[4]) * np.log(avg_dist + 1)
depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1)
r = np.sqrt(dist ** 2 + (azi_scale * azi) ** 2 + (depth_scale * depth) ** 2)
if i == 0: # deriv wrt azi_scale
deriv = azi_scale * azi ** 2 / r if r != 0 else 0
elif i == 1: # deriv wrt depth_scale
deriv = depth_scale * depth ** 2 / r if r != 0 else 0
else:
raise Exception("unknown parameter number %d" % i)
return deriv
def lon_lat_depth_distfn(lldda1, lldda2, params=None):
import sigvisa.utils.geog as geog
import numpy as np
ll = geog.dist_km(tuple(lldda1[0:2]), tuple(lldda2[0:2]))
depth = ( lldda1[2] - lldda2[2] ) * params[0]
r = np.sqrt(ll ** 2 + depth ** 2)
return r
def lon_lat_depth_distfn_deriv(i, lldda1, lldda2, params=None):
import sigvisa.utils.geog as geog
import numpy as np
assert (i == 0)
ll = geog.dist_km(tuple(lldda1[0:2]), tuple(lldda2[0:2]))
depth = ( lldda1[2] - lldda2[2] ) * params[0]
r = np.sqrt(ll ** 2 + depth ** 2)
return ( params[0] * ( lldda1[2] - lldda2[2] )**2 ) / r if r != 0 else 0.0
def logdist_diff_distfn(lldda1, lldda2, params=None):
import numpy as np
dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1)
return dist
def azi_diff_distfn(lldda1, lldda2, params=None):
import sigvisa.utils.geog as geog
import numpy as np
azi = np.abs ( geog.degdiff(lldda1[4], lldda2[4]) )
return azi
def logdepth_diff_distfn(lldda1, lldda2, params=None):
import numpy as np
depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1)
return depth
X_LON, X_LAT, X_DEPTH, X_DIST, X_AZI = range(5)
def spatial_kernel_from_str(kernel_str, target=None, params=None):
params = params if params is not None else start_params[kernel_str][target]
priors = [None,] * len(params) # TODO: use real priors
if kernel_str == "dad_log":
k = kernels.setup_kernel(name='distfn',
params = params,
extra=[dist_azi_depth_distfn_log, dist_azi_depth_distfn_deriv_log],
)
elif kernel_str == "lld":
noise_kernel = kernels.DiagonalKernel(params=params[0:1], priors = priors[0:1])
local_kernel = kernels.DistFNKernel(params=params[1:4], priors=priors[1:4],
distfn = lon_lat_depth_distfn, deriv=lon_lat_depth_distfn_deriv)
k = noise_kernel + local_kernel
elif kernel_str == "composite":
# assume we are passed the following params/priors:
# 0 : sigma2_n -- noise variance
# 1 : sigma2_f_dist -- function variance wrt dist_diff
# 2 : w_dist -- length scale for dist_diff
# 3 : sigma2_f_azi -- function variance wrt azi_diff
# 4 : w_azi -- length scale for azi_diff
# 5 : sigma2_f_depth -- function variance wrt depth_diff
# 6 : w_depth -- length scale for depth_diff
# 7 : sigma2_f_local -- function variance wrt local_dist
# 8 : w_local -- length scale for local_dist
|
return k
"""
def spatial_kernel_from_str(target=None, params=None):
params = params if params is not None else start_params_lld[target]
return params
"""
class SpatialGP(GaussianProcess, ParamModel):
def init_hyperparams(self, hyperparams):
(noise_var, signal_var, ll_scale, d_scale) = hyperparams
self.noise_var = noise_var
self.dfn_params = np.array((ll_scale, d_scale), dtype=np.float)
self.wfn_params = np.array((signal_var,), copy=True, dtype=np.float)
def build_kernel_matrix(self, X, hyperparams):
self.init_hyperparams(hyperparams)
vt = VectorTree(X[0:1,:], 1, "lld", self.dfn_params)
K = vt.kernel_matrix(X, X, "se", self.wfn_params, False) + self.noise_var * np.eye(len(X), dtype=np.float64)
K += np.eye(K.shape[0], dtype=np.float64) * 1e-8 # try to avoid losing
# positive-definiteness
# to numeric issues
return K
def invert_kernel_matrix(self, K):
L = None
alpha = None
try:
L = scipy.linalg.cholesky(K, lower=True)
alpha = scipy.linalg.cho_solve((L, True), self.y)
Kinv = scipy.linalg.inv(K)
except np.linalg.linalg.LinAlgError:
#u,v = np.linalg.eig(K)
#print K, u
#import pdb; pdb.set_trace()
raise
except ValueError:
raise
return alpha, L, Kinv
def build_parametric_model(self, alpha, Kinv_sp, H, b, B):
# notation follows section 2.7 of Rasmussen and Williams
Binv = scipy.linalg.inv(B)
tmp = np.dot(H, alpha) + np.dot(Binv, b) # H * K^-1 * y + B^-1 * b
HKinv = H * Kinv_sp
M_inv = Binv + np.dot(HKinv, H.T) # here M = (inv(B) +
# H*K^-1*H.T)^-1 is the
# posterior covariance
# matrix on the params.
c = scipy.linalg.cholesky(M_inv, lower=True) # c = sqrt(inv(B) + H*K^-1*H.T)
beta_bar = scipy.linalg.cho_solve((c, True), tmp)
invc = scipy.linalg.inv(c)
return c, beta_bar, invc, HKinv
def sparsify(self, M):
return scipy.sparse.csr_matrix(M * (np.abs(M) > self.sparse_threshold))
def sort_events(self, X, y):
combined = np.hstack([X, np.reshape(y, (-1, 1))])
combined_sorted = np.array(sorted(combined, key = lambda x: x[0]), dtype=float)
X_sorted = np.array(combined_sorted[:, :-1], copy=True, dtype=float)
y_sorted = combined_sorted[:, -1].flatten()
return X_sorted, y_sorted
def __init__(self, X=None, y=None,
fname=None, basisfns=None,
hyperparams=None,
param_mean=None, param_cov=None,
compute_ll=False,
compute_grad=False,
sparse_threshold=1e-20,
sta = None,
sort_events=True):
try:
ParamModel.__init__(self, sta=sta)
except KeyError:
pass
if fname is not None:
self.load_trained_model(fname)
else:
if sort_events:
X, y = self.sort_events(X, y) # arrange events by
# lon/lat, as a
# heuristic to expose
# block structure in the
# kernel matrix
self.hyperparams = np.array(hyperparams)
self.sparse_threshold = sparse_threshold
self.X = X
self.n = X.shape[0]
self.basisfns = basisfns
mu, self.y, H = self.setup_mean("parametric", X, y)
# train model
#t0 = time.time()
K = self.build_kernel_matrix(self.X, hyperparams)
#t1 = time.time()
self.alpha, L, Kinv = self.invert_kernel_matrix(K)
Kinv_tri = 2 * np.tril(Kinv, k=0) - np.diag(np.diag(Kinv))
#t2 = time.time()
self.Kinv_sp = self.sparsify(Kinv)
self.Kinv_sp_tri = self.sparsify(Kinv_tri)
#t3 = time.time()
self.c,self.beta_bar, self.invc, self.HKinv = self.build_parametric_model(self.alpha,
self.Kinv_sp, H,
b=param_mean,
B=param_cov)
#t4 = time.time()
r = self.y - np.dot(H.T, self.beta_bar)
self.alpha_r = scipy.linalg.cho_solve((L, True), r)
#t5 = time.time()
self.build_point_tree(HKinv = self.HKinv, Kinv = Kinv_tri, Kinv_sp=self.Kinv_sp_tri, alpha_r = self.alpha_r)
#t6 = time.time()
# precompute training set log likelihood, so we don't need
# to keep L around.
z = np.dot(H.T, param_mean) - self.y
B = param_cov
if compute_ll:
self._compute_marginal_likelihood(L=L, z=z, B=B, H=H, K=K, Kinv_sp=self.Kinv_sp_tri)
else:
self.ll = -np.inf
#t7 = time.time()
if compute_grad:
self.ll_grad = self._log_likelihood_gradient(z=z, K=K, H=H, B=B, Kinv=Kinv)
np.save('spatialK.npy', K)
np.save('spatialKinv.npy', Kinv)
#t8 = time.time()
"""
print t1-t0
print t2-t1
print t3-t2
print t4-t3
print t5-t4
print t6-t5
print t7-t6
print t8-t7
"""
def build_point_tree(self, HKinv, Kinv, Kinv_sp, alpha_r):
self.predict_tree = VectorTree(self.X, 1, "lld", self.dfn_params)
self.predict_tree.set_v(0, alpha_r.astype(np.float))
d = len(self.basisfns)
self.cov_tree = VectorTree(self.X, d, "lld", self.dfn_params)
HKinv = HKinv.astype(np.float)
for i in range(d):
self.cov_tree.set_v(i, HKinv[i, :])
nzr, nzc = Kinv_sp.nonzero()
self.double_tree = MatrixTree(self.X, nzr, nzc, "lld", self.dfn_params)
kkk = np.matrix(Kinv, copy=True, dtype=np.float64)
self.double_tree.set_m(kkk)
def predict(self, cond, eps=1e-8):
X1 = self.standardize_input_array(cond).astype(np.float)
gp_pred = np.array([self.predict_tree.weighted_sum(0, np.reshape(x, (1,-1)), eps, "se", self.wfn_params) for x in X1])
H = self.get_data_features(X1)
mean_pred = np.reshape(np.dot(H.T, self.beta_bar), gp_pred.shape)
gp_pred += mean_pred
if len(gp_pred) == 1:
gp_pred = gp_pred[0]
return gp_pred
def kernel(self, X1, X2, identical=False):
K = self.predict_tree.kernel_matrix(X1, X2, "se", self.wfn_params, False)
if identical:
K += self.noise_var * np.eye(K.shape[0])
return K
def covariance(self, cond, include_obs=False, parametric_only=False, pad=1e-8):
"""
Compute the posterior covariance matrix at a set of points given by the rows of X1.
Default is to compute the covariance of f, the latent function values. If obs_covar
is True, we instead compute the covariance of y, the observed values.
By default, we add a tiny bit of padding to the diagonal to counteract any potential
loss of positive definiteness from numerical issues. Setting pad=0 disables this.
"""
X1 = self.standardize_input_array(cond)
m = X1.shape[0]
Kstar = self.get_query_K(X1)
if not parametric_only:
tmp = self.Kinv_sp_tri * Kstar
qf = np.dot(Kstar.T, tmp)
k = self.kernel(X1,X1, identical=include_obs)
gp_cov = k - qf
else:
gp_cov = np.zeros((m,m))
R = self.query_R
tmp = np.dot(self.invc, R)
mean_cov = np.dot(tmp.T, tmp)
gp_cov += mean_cov
gp_cov += pad * np.eye(gp_cov.shape[0])
return gp_cov
def covariance_double_tree(self, cond, include_obs=False, parametric_only=False, pad=1e-8, eps=1e-8):
X1 = self.standardize_input_array(cond)
m = X1.shape[0]
d = len(self.basisfns)
t0 = time.time()
if not parametric_only:
k = self.kernel(X1, X1, identical=include_obs)
qf = self.double_tree.quadratic_form(X1, eps, "se", self.wfn_params)
gp_cov = k - qf
else:
gp_cov = np.zeros((m,m))
t1 = time.time()
H = np.array([[f(x) for x in X1] for f in self.basisfns], dtype=np.float64)
HKinvKstar = np.zeros((d, m))
for i in range(d):
for j in range(m):
HKinvKstar[i,j] = self.cov_tree.weighted_sum(i, X1[j:j+1,:], eps, "se", self.wfn_params)
R = H - HKinvKstar
v = np.dot(self.invc, R)
mc = np.dot(v.T, v)
gp_cov += mc
t2 = time.time()
self.nptime = (t1-t0)
self.ptime = (t2-t1)
gp_cov += pad * np.eye(m)
return gp_cov
def variance(self, cond, **kwargs):
X1 = self.standardize_input_array(cond)
result = GaussianProcess.variance(self, X1, **kwargs)
if len(result) == 1:
result = result[0]
return result
def sample(self, cond):
X1 = self.standardize_input_array(cond)
result = GaussianProcess.sample(self, X1)
if len(result) == 1:
result = result[0]
return result
def log_p(self, x, cond):
X1 = self.standardize_input_array(cond)
x = x if isinstance(x, collections.Iterable) else (x,)
result = GaussianProcess.posterior_log_likelihood(self, X1, x)
return result
def pack_npz(self):
d = dict()
d['c'] = self.c
d['beta_bar'] = self.beta_bar
d['invc'] = self.invc
d['HKinv'] = self.HKinv
d['basisfns'] = np.array([marshal_fn(f) for f in self.basisfns], dtype=object)
d['X'] = self.X,
d['y'] =self.y,
d['alpha'] =self.alpha,
d['hyperparams'] = self.hyperparams
d['Kinv_sp_tri'] =self.Kinv_sp_tri,
#d['Kinv_sp'] =self.Kinv_sp,
d['sparse_threshold'] =self.sparse_threshold,
d['ll'] =self.ll,
d['alpha_r'] = self.alpha_r
return d
def save_trained_model(self, filename):
"""
Serialize the model to a file.
"""
d = self.pack_npz()
with open(filename, 'wb') as f:
np.savez(f, base_str=super(SpatialGP, self).__repr_base_params__(), **d)
def unpack_npz(self, npzfile):
self.X = npzfile['X'][0]
self.y = npzfile['y'][0]
self.alpha = npzfile['alpha'][0]
self.hyperparams = npzfile['hyperparams']
self.init_hyperparams(self.hyperparams)
self.Kinv_sp_tri = npzfile['Kinv_sp_tri'][0]
#self.Kinv_sp = npzfile['Kinv_sp'][0]
self.sparse_threshold = npzfile['sparse_threshold'][0]
self.ll = npzfile['ll'][0]
self.basisfns = npzfile['basisfns']
self.basisfns = [unmarshal_fn(code) for code in self.basisfns]
self.beta_bar = npzfile['beta_bar']
self.c = npzfile['c']
self.invc = npzfile['invc']
self.HKinv = npzfile['HKinv']
self.alpha_r = npzfile['alpha_r']
def load_trained_model(self, filename):
npzfile = np.load(filename)
self.unpack_npz(npzfile)
super(SpatialGP, self).__unrepr_base_params__(str(npzfile['base_str']))
del npzfile.f
npzfile.close()
self.n = self.X.shape[0]
self.build_point_tree(HKinv=self.HKinv, Kinv=self.Kinv_sp_tri.todense(), Kinv_sp=self.Kinv_sp_tri, alpha_r=self.alpha_r)
def _compute_marginal_likelihood(self, L, z, B, H, K, Kinv_sp):
# here we follow eqn 2.43 in R&W
#
# let z = H.T*b - y, then we want
# .5 * z.T * (K + H.T * B * H)^-1 * z
# minus some other stuff (dealt with below).
# by the matrix inv lemma,
# (K + H.T * B * H)^-1
# = Kinv - Kinv*H.T*(Binv + H*Kinv*H.T)^-1*H*Kinv
# = Kinv - Kinv*H.T* invc.T * invc *H*Kinv
# = Kinv - (invc * HKinv)^T (invc * HKinv)
#
# so we have z.T * Kinv * z - z.T * (other thing) * z
# i.e.: term1 - term2
# in the notation of the code.
tmp1 = Kinv_sp * z
term1 = np.dot(z.T, tmp1)
tmp2 = np.dot(self.HKinv, z)
tmp3 = np.dot(self.invc, tmp2)
term2 = np.dot(tmp3.T, tmp3)
# following eqn 2.43 in R&W, we want to compute
# log det(K + H.T * B * H). using the matrix inversion
# lemma, we instead compute
# log det(K) + log det(B) + log det(B^-1 + H*K^-1*H.T)
# to compute log(det(K)), we use the trick that the
# determinant of a symmetric pos. def. matrix is the
# product of squares of the diagonal elements of the
# Cholesky factor
ld2_K = np.log(np.diag(L)).sum()
ld2 = np.log(np.diag(self.c)).sum() # det( B^-1 - H * K^-1 * H.T )
ld_B = np.log(np.linalg.det(B))
# eqn 2.43 in R&W, using the matrix inv lemma
self.ll = -.5 * (term1 - term2 + self.n * np.log(2*np.pi) + ld_B) - ld2_K - ld2
def _log_likelihood_gradient(self, z, K, H, B, Kinv):
"""
Gradient of the training set log likelihood with respect to the kernel hyperparams.
"""
nparams = 4
grad = np.zeros((nparams,))
#t0 = time.time()
tmp = np.dot(self.invc, self.HKinv)
#t1 = time.time()
K_HBH_inv = Kinv - np.dot(tmp.T, tmp)
#t2 = time.time()
alpha_z = np.dot(K_HBH_inv, z)
#t3 = time.time()
#print "gradient: %f %f %f" % (t1-t0, t2-t1, t3-t2)
for i in range(nparams):
tA = time.time()
if (i == 0):
dKdi = np.eye(self.n)
else:
dKdi = self.predict_tree.kernel_deriv_wrt_i(self.X, self.X, "se", self.wfn_params, i-1)
dlldi = .5 * np.dot(alpha_z.T, np.dot(dKdi, alpha_z))
tB = time.time()
# here we use the fact:
# trace(AB) = sum_{ij} A_ij * B_ij
dlldi -= .5 * np.sum(np.sum(K_HBH_inv.T * dKdi))
grad[i] = dlldi
tC = time.time()
print " %d: %f %f" % (i, tB-tA, tC-tB)
return grad
def spatialgp_nll_ngrad(**kwargs):
"""
Get both the negative log-likelihood and its gradient
simultaneously (more efficient than doing it separately since we
only create one new GP object, which only constructs the kernel
matrix once, etc.).
"""
try:
# print "optimizing params", kernel_params
gp = SpatialGP(compute_ll=True, compute_grad=True, **kwargs)
nll = -1 * gp.ll
ngrad = -1 * gp.ll_grad
except np.linalg.linalg.LinAlgError as e:
print "warning: lin alg error (%s) in likelihood computation, returning likelihood -inf" % str(e)
nll = np.float("inf")
ngrad = None
except ValueError as e:
print "warning: value error (%s) in likelihood computation, returning likelihood -inf" % str(e)
nll = np.float("inf")
ngrad = None
return nll, ngrad
| noise_kernel = kernels.DiagonalKernel(params=params[0:1], priors = priors[0:1])
distdiff_kernel = kernels.DistFNKernel(params=params[1:3], priors=priors[1:3],
distfn = logdist_diff_distfn, deriv=None)
azidiff_kernel = kernels.DistFNKernel(params=params[3:5], priors=priors[3:5],
distfn = azi_diff_distfn, deriv=None)
depthdiff_kernel = kernels.DistFNKernel(params=params[5:7], priors=priors[5:7],
distfn = logdepth_diff_distfn, deriv=None)
local_kernel = kernels.DistFNKernel(params=params[7:10], priors=priors[7:10],
distfn = lon_lat_depth_distfn, deriv=lon_lat_depth_distfn_deriv)
k = noise_kernel + distdiff_kernel + azidiff_kernel + depthdiff_kernel + local_kernel | conditional_block |
kubeconfig.go | package cluster
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/go-kit/kit/endpoint"
"github.com/gorilla/securecookie"
"github.com/kubermatic/kubermatic/api/pkg/handler/auth"
"github.com/kubermatic/kubermatic/api/pkg/handler/middleware"
"github.com/kubermatic/kubermatic/api/pkg/handler/v1/common"
"github.com/kubermatic/kubermatic/api/pkg/provider"
kcerrors "github.com/kubermatic/kubermatic/api/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
const (
csrfCookieName = "csrf_token"
cookieMaxAge = 180
)
var secureCookie *securecookie.SecureCookie
func GetAdminKubeconfigEndpoint(projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(common.GetClusterReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo, err := userInfoGetter(ctx, req.ProjectID)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
_, err = projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
filePrefix := "admin"
var adminClientCfg *clientcmdapi.Config
if strings.HasPrefix(userInfo.Group, "viewers") {
filePrefix = "viewer"
adminClientCfg, err = clusterProvider.GetViewerKubeconfigForCustomerCluster(cluster)
} else {
adminClientCfg, err = clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
}
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
return &encodeKubeConifgResponse{clientCfg: adminClientCfg, filePrefix: filePrefix}, nil
}
}
func GetOidcKubeconfigEndpoint(projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(common.GetClusterReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo, err := userInfoGetter(ctx, req.ProjectID)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
_, err = projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
adminClientCfg, err := clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
clientCmdAuth := clientcmdapi.NewAuthInfo()
clientCmdAuthProvider := &clientcmdapi.AuthProviderConfig{Config: map[string]string{}}
clientCmdAuthProvider.Name = "oidc"
clientCmdAuthProvider.Config["idp-issuer-url"] = cluster.Spec.OIDC.IssuerURL
clientCmdAuthProvider.Config["client-id"] = cluster.Spec.OIDC.ClientID
if cluster.Spec.OIDC.ClientSecret != "" {
clientCmdAuthProvider.Config["client-secret"] = cluster.Spec.OIDC.ClientSecret
}
if cluster.Spec.OIDC.ExtraScopes != "" {
clientCmdAuthProvider.Config["extra-scopes"] = cluster.Spec.OIDC.ExtraScopes
}
clientCmdAuth.AuthProvider = clientCmdAuthProvider
adminClientCfg.AuthInfos = map[string]*clientcmdapi.AuthInfo{}
adminClientCfg.AuthInfos["default"] = clientCmdAuth
return &encodeKubeConifgResponse{clientCfg: adminClientCfg, filePrefix: "oidc"}, nil
}
}
func CreateOIDCKubeconfigEndpoint(projectProvider provider.ProjectProvider, oidcIssuerVerifier auth.OIDCIssuerVerifier, oidcCfg common.OIDCConfiguration) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
oidcIssuer := oidcIssuerVerifier.(auth.OIDCIssuer)
oidcVerifier := oidcIssuerVerifier.(auth.TokenVerifier)
req := request.(CreateOIDCKubeconfigReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo := ctx.Value(middleware.UserInfoContextKey).(*provider.UserInfo)
if secureCookie == nil {
secureCookie = securecookie.New([]byte(oidcCfg.CookieHashKey), nil)
}
_, err := projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
// PHASE exchangeCode handles callback response from OIDC provider
// and generates kubeconfig
if req.phase == exchangeCodePhase {
// validate the state
if req.decodedState.Nonce != req.cookieNonceValue {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter = %s", req.decodedState.Nonce)
}
oidcTokens, err := oidcIssuer.Exchange(ctx, req.code)
if err != nil {
return nil, kcerrors.NewBadRequest("error while exchaning oidc code for token = %v", err)
}
if len(oidcTokens.RefreshToken) == 0 {
return nil, kcerrors.NewBadRequest("the refresh token is missing but required, try setting/unsetting \"oidc-offline-access-as-scope\" command line flag")
}
claims, err := oidcVerifier.Verify(ctx, oidcTokens.IDToken)
if err != nil {
return nil, kcerrors.New(http.StatusUnauthorized, err.Error())
}
if len(claims.Email) == 0 {
return nil, kcerrors.NewBadRequest("the token doesn't contain the mandatory \"email\" claim")
}
adminKubeConfig, err := clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
// create a kubeconfig that contains OIDC tokens
oidcKubeCfg := clientcmdapi.NewConfig()
{
// grab admin kubeconfig to read the cluster info
var clusterFromAdminKubeCfg *clientcmdapi.Cluster
for clusterName, cluster := range adminKubeConfig.Clusters {
if clusterName == req.ClusterID {
clusterFromAdminKubeCfg = cluster
}
}
if clusterFromAdminKubeCfg == nil {
return nil, kcerrors.New(http.StatusInternalServerError, fmt.Sprintf("unable to construct kubeconfig because couldn't find %s cluster enty in existing kubecfg", req.ClusterID))
}
// create cluster entry
clientCmdCluster := clientcmdapi.NewCluster()
clientCmdCluster.Server = clusterFromAdminKubeCfg.Server
clientCmdCluster.CertificateAuthorityData = clusterFromAdminKubeCfg.CertificateAuthorityData
oidcKubeCfg.Clusters[req.ClusterID] = clientCmdCluster
// create auth entry
clientCmdAuth := clientcmdapi.NewAuthInfo()
clientCmdAuthProvider := &clientcmdapi.AuthProviderConfig{Config: map[string]string{}}
clientCmdAuthProvider.Name = "oidc"
clientCmdAuthProvider.Config["id-token"] = oidcTokens.IDToken
clientCmdAuthProvider.Config["refresh-token"] = oidcTokens.RefreshToken
clientCmdAuthProvider.Config["idp-issuer-url"] = oidcCfg.URL
clientCmdAuthProvider.Config["client-id"] = oidcCfg.ClientID
clientCmdAuthProvider.Config["client-secret"] = oidcCfg.ClientSecret
clientCmdAuth.AuthProvider = clientCmdAuthProvider
oidcKubeCfg.AuthInfos[claims.Email] = clientCmdAuth
// create default ctx
clientCmdCtx := clientcmdapi.NewContext()
clientCmdCtx.Cluster = req.ClusterID
clientCmdCtx.AuthInfo = claims.Email
oidcKubeCfg.Contexts["default"] = clientCmdCtx
oidcKubeCfg.CurrentContext = "default"
}
// prepare final rsp that holds kubeconfig
rsp := createOIDCKubeconfigRsp{}
rsp.phase = kubeconfigGenerated
rsp.oidcKubeConfig = oidcKubeCfg
rsp.secureCookieMode = oidcCfg.CookieSecureMode
return rsp, nil
}
// PHASE initial handles request from the end-user that wants to authenticate
// and kicksoff the process of kubeconfig generation
if req.phase != initialPhase {
return nil, kcerrors.NewBadRequest(fmt.Sprintf("bad request unexpected phase = %d, expected phase = %d, did you forget to set the phase while decoding the request ?", req.phase, initialPhase))
}
rsp := createOIDCKubeconfigRsp{}
scopes := []string{"openid", "email"}
if oidcCfg.OfflineAccessAsScope {
scopes = append(scopes, "offline_access")
}
// pass nonce
nonce := rand.String(rand.IntnRange(10, 15))
rsp.nonce = nonce
rsp.secureCookieMode = oidcCfg.CookieSecureMode
oidcState := OIDCState{
Nonce: nonce,
ClusterID: req.ClusterID,
ProjectID: req.ProjectID,
UserID: req.UserID,
Datacenter: req.Datacenter,
}
rawState, err := json.Marshal(oidcState)
if err != nil {
return nil, err
}
encodedState := base64.StdEncoding.EncodeToString(rawState)
urlSafeState := url.QueryEscape(encodedState)
rsp.authCodeURL = oidcIssuer.AuthCodeURL(urlSafeState, oidcCfg.OfflineAccessAsScope, scopes...)
return rsp, nil
}
}
type encodeKubeConifgResponse struct {
clientCfg *clientcmdapi.Config
filePrefix string
}
func EncodeKubeconfig(c context.Context, w http.ResponseWriter, response interface{}) (err error) {
rsp := response.(*encodeKubeConifgResponse)
cfg := rsp.clientCfg
filename := "kubeconfig"
if len(rsp.filePrefix) > 0 {
filename = fmt.Sprintf("%s-%s", filename, rsp.filePrefix)
}
if len(cfg.Contexts) > 0 {
filename = fmt.Sprintf("%s-%s", filename, cfg.Contexts[cfg.CurrentContext].Cluster)
}
w.Header().Set("Content-Type", "application/yaml")
w.Header().Set("Content-disposition", fmt.Sprintf("attachment; filename=%s", filename))
w.Header().Add("Cache-Control", "no-cache")
b, err := clientcmd.Write(*cfg)
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
type createOIDCKubeconfigRsp struct {
// authCodeURL holds a URL to OpenID provider's consent page that asks for permissions for the required scopes explicitly.
authCodeURL string
// phase tells encoding function how to handle response
phase int
// oidcKubeConfig holds not serialized kubeconfig
oidcKubeConfig *clientcmdapi.Config
// nonce holds an arbitrary number storied in cookie to prevent Cross-site Request Forgery attack.
nonce string
// cookie received only with HTTPS, never with HTTP.
secureCookieMode bool
}
func EncodeOIDCKubeconfig(c context.Context, w http.ResponseWriter, response interface{}) (err error) {
rsp := response.(createOIDCKubeconfigRsp)
// handles kubeconfig Generated PHASE
// it means that kubeconfig was generated and we need to properly encode it.
if rsp.phase == kubeconfigGenerated {
// clear cookie by setting MaxAge<0
err = setCookie(w, "", rsp.secureCookieMode, -1)
if err != nil {
return fmt.Errorf("the cookie can't be removed, err = %v", err)
}
return EncodeKubeconfig(c, w, &encodeKubeConifgResponse{clientCfg: rsp.oidcKubeConfig})
}
// handles initialPhase
// redirects request to OpenID provider's consent page
// and set cookie with nonce
err = setCookie(w, rsp.nonce, rsp.secureCookieMode, cookieMaxAge)
if err != nil {
return fmt.Errorf("the cookie can't be created, err = %v", err)
}
w.Header().Add("Location", rsp.authCodeURL)
w.Header().Add("Cache-Control", "no-cache")
w.WriteHeader(http.StatusSeeOther)
return nil
}
func DecodeGetAdminKubeconfig(c context.Context, r *http.Request) (interface{}, error) {
req, err := common.DecodeGetClusterReq(c, r)
if err != nil {
return nil, err
}
return req, nil
}
const (
initialPhase = iota
exchangeCodePhase = iota
kubeconfigGenerated = iota
)
// OIDCState holds data that are send and retrieved from OIDC provider
type OIDCState struct {
// nonce a random string that binds requests / responses of API server and OIDC provider
// see https://tools.ietf.org/html/rfc6749#section-10.12
Nonce string `json:"nonce"`
ClusterID string `json:"cluster_id"`
ProjectID string `json:"project_id"`
// UserID holds the ID of the user on behalf of which the request is being handled.
UserID string `json:"user_id"`
Datacenter string `json:"datacenter"`
}
// CreateOIDCKubeconfigReq represent a request for creating kubeconfig for a cluster with OIDC credentials
// swagger:parameters createOIDCKubeconfig
type CreateOIDCKubeconfigReq struct {
// in: query
ClusterID string `json:"cluster_id,omitempty"`
ProjectID string `json:"project_id,omitempty"`
UserID string `json:"user_id,omitempty"`
Datacenter string `json:"datacenter,omitempty"`
// not exported so that they don't leak to swagger spec.
code string
encodedState string
decodedState OIDCState
phase int
cookieNonceValue string
}
func | (c context.Context, r *http.Request) (interface{}, error) {
req := CreateOIDCKubeconfigReq{}
// handle OIDC errors
{
errType := r.URL.Query().Get("error")
errMessage := r.URL.Query().Get("error_description")
if len(errMessage) != 0 {
return nil, fmt.Errorf("OIDC provider error type = %s, description = %s", errType, errMessage)
}
}
// if true - then this is a callback from OIDC provider and the next step is
// to exchange the given code and generate kubeconfig
// note: state is decoded here so that the middlewares can load providers (cluster) into the ctx.
req.code = r.URL.Query().Get("code")
req.encodedState = r.URL.Query().Get("state")
if len(req.code) != 0 && len(req.encodedState) != 0 {
unescapedState, err := url.QueryUnescape(req.encodedState)
if err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected url encoded value, err = %v", err)
}
rawState, err := base64.StdEncoding.DecodeString(unescapedState)
if err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected base64 encoded value, err = %v", err)
}
oidcState := OIDCState{}
if err := json.Unmarshal(rawState, &oidcState); err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected json encoded value, err = %v", err)
}
// handle cookie when new endpoint is created and secureCookie was initialized
if secureCookie != nil {
// cookie should be set in initial code phase
if cookie, err := r.Cookie(csrfCookieName); err == nil {
var value string
if err = secureCookie.Decode(csrfCookieName, cookie.Value, &value); err == nil {
req.cookieNonceValue = value
}
} else {
return nil, kcerrors.NewBadRequest("incorrect value of cookie or cookie not set, err = %v", err)
}
}
req.phase = exchangeCodePhase
req.Datacenter = oidcState.Datacenter
req.ProjectID = oidcState.ProjectID
req.UserID = oidcState.UserID
req.ClusterID = oidcState.ClusterID
req.decodedState = oidcState
return req, nil
}
// initial flow an end-user wants to authenticate using OIDC provider
req.ClusterID = r.URL.Query().Get("cluster_id")
req.ProjectID = r.URL.Query().Get("project_id")
req.UserID = r.URL.Query().Get("user_id")
req.Datacenter = r.URL.Query().Get("datacenter")
if len(req.ClusterID) == 0 || len(req.ProjectID) == 0 || len(req.UserID) == 0 || len(req.Datacenter) == 0 {
return nil, errors.New("the following query parameters cluster_id, project_id, user_id and datacenter are mandatory, please make sure that all are set")
}
req.phase = initialPhase
return req, nil
}
// GetUserID implements UserGetter interface
func (r CreateOIDCKubeconfigReq) GetUserID() string {
return r.UserID
}
// GetDC implements DCGetter interface
func (r CreateOIDCKubeconfigReq) GetDC() string {
return r.Datacenter
}
// GetProjectID implements ProjectGetter interface
func (r CreateOIDCKubeconfigReq) GetProjectID() string {
return r.ProjectID
}
// setCookie add cookie with random string value
func setCookie(w http.ResponseWriter, nonce string, secureMode bool, maxAge int) error {
encoded, err := secureCookie.Encode(csrfCookieName, nonce)
if err != nil {
return fmt.Errorf("the encode cookie failed, err = %v", err)
}
cookie := &http.Cookie{
Name: csrfCookieName,
Value: encoded,
MaxAge: maxAge,
HttpOnly: true,
Secure: secureMode,
SameSite: http.SameSiteLaxMode,
}
http.SetCookie(w, cookie)
return nil
}
| DecodeCreateOIDCKubeconfig | identifier_name |
kubeconfig.go | package cluster
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/go-kit/kit/endpoint"
"github.com/gorilla/securecookie"
"github.com/kubermatic/kubermatic/api/pkg/handler/auth"
"github.com/kubermatic/kubermatic/api/pkg/handler/middleware"
"github.com/kubermatic/kubermatic/api/pkg/handler/v1/common"
"github.com/kubermatic/kubermatic/api/pkg/provider"
kcerrors "github.com/kubermatic/kubermatic/api/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
const (
csrfCookieName = "csrf_token"
cookieMaxAge = 180
)
var secureCookie *securecookie.SecureCookie
func GetAdminKubeconfigEndpoint(projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(common.GetClusterReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo, err := userInfoGetter(ctx, req.ProjectID)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
_, err = projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
filePrefix := "admin"
var adminClientCfg *clientcmdapi.Config
if strings.HasPrefix(userInfo.Group, "viewers") {
filePrefix = "viewer"
adminClientCfg, err = clusterProvider.GetViewerKubeconfigForCustomerCluster(cluster)
} else {
adminClientCfg, err = clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
}
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
return &encodeKubeConifgResponse{clientCfg: adminClientCfg, filePrefix: filePrefix}, nil
}
}
func GetOidcKubeconfigEndpoint(projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(common.GetClusterReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo, err := userInfoGetter(ctx, req.ProjectID)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
_, err = projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
adminClientCfg, err := clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
clientCmdAuth := clientcmdapi.NewAuthInfo()
clientCmdAuthProvider := &clientcmdapi.AuthProviderConfig{Config: map[string]string{}}
clientCmdAuthProvider.Name = "oidc"
clientCmdAuthProvider.Config["idp-issuer-url"] = cluster.Spec.OIDC.IssuerURL
clientCmdAuthProvider.Config["client-id"] = cluster.Spec.OIDC.ClientID
if cluster.Spec.OIDC.ClientSecret != "" {
clientCmdAuthProvider.Config["client-secret"] = cluster.Spec.OIDC.ClientSecret
}
if cluster.Spec.OIDC.ExtraScopes != "" {
clientCmdAuthProvider.Config["extra-scopes"] = cluster.Spec.OIDC.ExtraScopes
}
clientCmdAuth.AuthProvider = clientCmdAuthProvider
adminClientCfg.AuthInfos = map[string]*clientcmdapi.AuthInfo{}
adminClientCfg.AuthInfos["default"] = clientCmdAuth
return &encodeKubeConifgResponse{clientCfg: adminClientCfg, filePrefix: "oidc"}, nil
}
}
func CreateOIDCKubeconfigEndpoint(projectProvider provider.ProjectProvider, oidcIssuerVerifier auth.OIDCIssuerVerifier, oidcCfg common.OIDCConfiguration) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
oidcIssuer := oidcIssuerVerifier.(auth.OIDCIssuer)
oidcVerifier := oidcIssuerVerifier.(auth.TokenVerifier)
req := request.(CreateOIDCKubeconfigReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo := ctx.Value(middleware.UserInfoContextKey).(*provider.UserInfo)
if secureCookie == nil {
secureCookie = securecookie.New([]byte(oidcCfg.CookieHashKey), nil)
}
_, err := projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
// PHASE exchangeCode handles callback response from OIDC provider
// and generates kubeconfig
if req.phase == exchangeCodePhase {
// validate the state
if req.decodedState.Nonce != req.cookieNonceValue {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter = %s", req.decodedState.Nonce)
}
oidcTokens, err := oidcIssuer.Exchange(ctx, req.code)
if err != nil {
return nil, kcerrors.NewBadRequest("error while exchaning oidc code for token = %v", err)
}
if len(oidcTokens.RefreshToken) == 0 {
return nil, kcerrors.NewBadRequest("the refresh token is missing but required, try setting/unsetting \"oidc-offline-access-as-scope\" command line flag")
}
claims, err := oidcVerifier.Verify(ctx, oidcTokens.IDToken)
if err != nil {
return nil, kcerrors.New(http.StatusUnauthorized, err.Error())
}
if len(claims.Email) == 0 {
return nil, kcerrors.NewBadRequest("the token doesn't contain the mandatory \"email\" claim")
}
adminKubeConfig, err := clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
// create a kubeconfig that contains OIDC tokens
oidcKubeCfg := clientcmdapi.NewConfig()
{
// grab admin kubeconfig to read the cluster info
var clusterFromAdminKubeCfg *clientcmdapi.Cluster
for clusterName, cluster := range adminKubeConfig.Clusters {
if clusterName == req.ClusterID {
clusterFromAdminKubeCfg = cluster
}
}
if clusterFromAdminKubeCfg == nil {
return nil, kcerrors.New(http.StatusInternalServerError, fmt.Sprintf("unable to construct kubeconfig because couldn't find %s cluster enty in existing kubecfg", req.ClusterID))
}
// create cluster entry
clientCmdCluster := clientcmdapi.NewCluster()
clientCmdCluster.Server = clusterFromAdminKubeCfg.Server
clientCmdCluster.CertificateAuthorityData = clusterFromAdminKubeCfg.CertificateAuthorityData
oidcKubeCfg.Clusters[req.ClusterID] = clientCmdCluster
// create auth entry
clientCmdAuth := clientcmdapi.NewAuthInfo()
clientCmdAuthProvider := &clientcmdapi.AuthProviderConfig{Config: map[string]string{}}
clientCmdAuthProvider.Name = "oidc"
clientCmdAuthProvider.Config["id-token"] = oidcTokens.IDToken
clientCmdAuthProvider.Config["refresh-token"] = oidcTokens.RefreshToken
clientCmdAuthProvider.Config["idp-issuer-url"] = oidcCfg.URL
clientCmdAuthProvider.Config["client-id"] = oidcCfg.ClientID
clientCmdAuthProvider.Config["client-secret"] = oidcCfg.ClientSecret
clientCmdAuth.AuthProvider = clientCmdAuthProvider
oidcKubeCfg.AuthInfos[claims.Email] = clientCmdAuth
// create default ctx
clientCmdCtx := clientcmdapi.NewContext()
clientCmdCtx.Cluster = req.ClusterID
clientCmdCtx.AuthInfo = claims.Email
oidcKubeCfg.Contexts["default"] = clientCmdCtx
oidcKubeCfg.CurrentContext = "default"
}
// prepare final rsp that holds kubeconfig
rsp := createOIDCKubeconfigRsp{}
rsp.phase = kubeconfigGenerated
rsp.oidcKubeConfig = oidcKubeCfg
rsp.secureCookieMode = oidcCfg.CookieSecureMode
return rsp, nil
}
// PHASE initial handles request from the end-user that wants to authenticate
// and kicksoff the process of kubeconfig generation
if req.phase != initialPhase {
return nil, kcerrors.NewBadRequest(fmt.Sprintf("bad request unexpected phase = %d, expected phase = %d, did you forget to set the phase while decoding the request ?", req.phase, initialPhase))
}
rsp := createOIDCKubeconfigRsp{}
scopes := []string{"openid", "email"}
if oidcCfg.OfflineAccessAsScope {
scopes = append(scopes, "offline_access")
}
// pass nonce
nonce := rand.String(rand.IntnRange(10, 15))
rsp.nonce = nonce
rsp.secureCookieMode = oidcCfg.CookieSecureMode
oidcState := OIDCState{
Nonce: nonce,
ClusterID: req.ClusterID,
ProjectID: req.ProjectID,
UserID: req.UserID,
Datacenter: req.Datacenter,
}
rawState, err := json.Marshal(oidcState)
if err != nil {
return nil, err
}
encodedState := base64.StdEncoding.EncodeToString(rawState)
urlSafeState := url.QueryEscape(encodedState)
rsp.authCodeURL = oidcIssuer.AuthCodeURL(urlSafeState, oidcCfg.OfflineAccessAsScope, scopes...)
return rsp, nil
}
}
type encodeKubeConifgResponse struct {
clientCfg *clientcmdapi.Config
filePrefix string
}
func EncodeKubeconfig(c context.Context, w http.ResponseWriter, response interface{}) (err error) {
rsp := response.(*encodeKubeConifgResponse)
cfg := rsp.clientCfg
filename := "kubeconfig"
if len(rsp.filePrefix) > 0 {
filename = fmt.Sprintf("%s-%s", filename, rsp.filePrefix)
}
if len(cfg.Contexts) > 0 {
filename = fmt.Sprintf("%s-%s", filename, cfg.Contexts[cfg.CurrentContext].Cluster)
}
w.Header().Set("Content-Type", "application/yaml")
w.Header().Set("Content-disposition", fmt.Sprintf("attachment; filename=%s", filename))
w.Header().Add("Cache-Control", "no-cache")
b, err := clientcmd.Write(*cfg)
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
type createOIDCKubeconfigRsp struct {
// authCodeURL holds a URL to OpenID provider's consent page that asks for permissions for the required scopes explicitly.
authCodeURL string
// phase tells encoding function how to handle response
phase int
// oidcKubeConfig holds not serialized kubeconfig
oidcKubeConfig *clientcmdapi.Config
// nonce holds an arbitrary number storied in cookie to prevent Cross-site Request Forgery attack.
nonce string
// cookie received only with HTTPS, never with HTTP.
secureCookieMode bool
}
func EncodeOIDCKubeconfig(c context.Context, w http.ResponseWriter, response interface{}) (err error) |
func DecodeGetAdminKubeconfig(c context.Context, r *http.Request) (interface{}, error) {
req, err := common.DecodeGetClusterReq(c, r)
if err != nil {
return nil, err
}
return req, nil
}
const (
initialPhase = iota
exchangeCodePhase = iota
kubeconfigGenerated = iota
)
// OIDCState holds data that are send and retrieved from OIDC provider
type OIDCState struct {
// nonce a random string that binds requests / responses of API server and OIDC provider
// see https://tools.ietf.org/html/rfc6749#section-10.12
Nonce string `json:"nonce"`
ClusterID string `json:"cluster_id"`
ProjectID string `json:"project_id"`
// UserID holds the ID of the user on behalf of which the request is being handled.
UserID string `json:"user_id"`
Datacenter string `json:"datacenter"`
}
// CreateOIDCKubeconfigReq represent a request for creating kubeconfig for a cluster with OIDC credentials
// swagger:parameters createOIDCKubeconfig
type CreateOIDCKubeconfigReq struct {
// in: query
ClusterID string `json:"cluster_id,omitempty"`
ProjectID string `json:"project_id,omitempty"`
UserID string `json:"user_id,omitempty"`
Datacenter string `json:"datacenter,omitempty"`
// not exported so that they don't leak to swagger spec.
code string
encodedState string
decodedState OIDCState
phase int
cookieNonceValue string
}
func DecodeCreateOIDCKubeconfig(c context.Context, r *http.Request) (interface{}, error) {
req := CreateOIDCKubeconfigReq{}
// handle OIDC errors
{
errType := r.URL.Query().Get("error")
errMessage := r.URL.Query().Get("error_description")
if len(errMessage) != 0 {
return nil, fmt.Errorf("OIDC provider error type = %s, description = %s", errType, errMessage)
}
}
// if true - then this is a callback from OIDC provider and the next step is
// to exchange the given code and generate kubeconfig
// note: state is decoded here so that the middlewares can load providers (cluster) into the ctx.
req.code = r.URL.Query().Get("code")
req.encodedState = r.URL.Query().Get("state")
if len(req.code) != 0 && len(req.encodedState) != 0 {
unescapedState, err := url.QueryUnescape(req.encodedState)
if err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected url encoded value, err = %v", err)
}
rawState, err := base64.StdEncoding.DecodeString(unescapedState)
if err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected base64 encoded value, err = %v", err)
}
oidcState := OIDCState{}
if err := json.Unmarshal(rawState, &oidcState); err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected json encoded value, err = %v", err)
}
// handle cookie when new endpoint is created and secureCookie was initialized
if secureCookie != nil {
// cookie should be set in initial code phase
if cookie, err := r.Cookie(csrfCookieName); err == nil {
var value string
if err = secureCookie.Decode(csrfCookieName, cookie.Value, &value); err == nil {
req.cookieNonceValue = value
}
} else {
return nil, kcerrors.NewBadRequest("incorrect value of cookie or cookie not set, err = %v", err)
}
}
req.phase = exchangeCodePhase
req.Datacenter = oidcState.Datacenter
req.ProjectID = oidcState.ProjectID
req.UserID = oidcState.UserID
req.ClusterID = oidcState.ClusterID
req.decodedState = oidcState
return req, nil
}
// initial flow an end-user wants to authenticate using OIDC provider
req.ClusterID = r.URL.Query().Get("cluster_id")
req.ProjectID = r.URL.Query().Get("project_id")
req.UserID = r.URL.Query().Get("user_id")
req.Datacenter = r.URL.Query().Get("datacenter")
if len(req.ClusterID) == 0 || len(req.ProjectID) == 0 || len(req.UserID) == 0 || len(req.Datacenter) == 0 {
return nil, errors.New("the following query parameters cluster_id, project_id, user_id and datacenter are mandatory, please make sure that all are set")
}
req.phase = initialPhase
return req, nil
}
// GetUserID implements UserGetter interface
func (r CreateOIDCKubeconfigReq) GetUserID() string {
return r.UserID
}
// GetDC implements DCGetter interface
func (r CreateOIDCKubeconfigReq) GetDC() string {
return r.Datacenter
}
// GetProjectID implements ProjectGetter interface
func (r CreateOIDCKubeconfigReq) GetProjectID() string {
return r.ProjectID
}
// setCookie add cookie with random string value
func setCookie(w http.ResponseWriter, nonce string, secureMode bool, maxAge int) error {
encoded, err := secureCookie.Encode(csrfCookieName, nonce)
if err != nil {
return fmt.Errorf("the encode cookie failed, err = %v", err)
}
cookie := &http.Cookie{
Name: csrfCookieName,
Value: encoded,
MaxAge: maxAge,
HttpOnly: true,
Secure: secureMode,
SameSite: http.SameSiteLaxMode,
}
http.SetCookie(w, cookie)
return nil
}
| {
rsp := response.(createOIDCKubeconfigRsp)
// handles kubeconfig Generated PHASE
// it means that kubeconfig was generated and we need to properly encode it.
if rsp.phase == kubeconfigGenerated {
// clear cookie by setting MaxAge<0
err = setCookie(w, "", rsp.secureCookieMode, -1)
if err != nil {
return fmt.Errorf("the cookie can't be removed, err = %v", err)
}
return EncodeKubeconfig(c, w, &encodeKubeConifgResponse{clientCfg: rsp.oidcKubeConfig})
}
// handles initialPhase
// redirects request to OpenID provider's consent page
// and set cookie with nonce
err = setCookie(w, rsp.nonce, rsp.secureCookieMode, cookieMaxAge)
if err != nil {
return fmt.Errorf("the cookie can't be created, err = %v", err)
}
w.Header().Add("Location", rsp.authCodeURL)
w.Header().Add("Cache-Control", "no-cache")
w.WriteHeader(http.StatusSeeOther)
return nil
} | identifier_body |
kubeconfig.go | package cluster
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/go-kit/kit/endpoint"
"github.com/gorilla/securecookie"
"github.com/kubermatic/kubermatic/api/pkg/handler/auth"
"github.com/kubermatic/kubermatic/api/pkg/handler/middleware"
"github.com/kubermatic/kubermatic/api/pkg/handler/v1/common"
"github.com/kubermatic/kubermatic/api/pkg/provider"
kcerrors "github.com/kubermatic/kubermatic/api/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
const (
csrfCookieName = "csrf_token"
cookieMaxAge = 180
)
var secureCookie *securecookie.SecureCookie
func GetAdminKubeconfigEndpoint(projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(common.GetClusterReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo, err := userInfoGetter(ctx, req.ProjectID)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
_, err = projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
filePrefix := "admin"
var adminClientCfg *clientcmdapi.Config
if strings.HasPrefix(userInfo.Group, "viewers") {
filePrefix = "viewer"
adminClientCfg, err = clusterProvider.GetViewerKubeconfigForCustomerCluster(cluster)
} else {
adminClientCfg, err = clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
}
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
return &encodeKubeConifgResponse{clientCfg: adminClientCfg, filePrefix: filePrefix}, nil
}
}
func GetOidcKubeconfigEndpoint(projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(common.GetClusterReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo, err := userInfoGetter(ctx, req.ProjectID)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
_, err = projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
adminClientCfg, err := clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
clientCmdAuth := clientcmdapi.NewAuthInfo()
clientCmdAuthProvider := &clientcmdapi.AuthProviderConfig{Config: map[string]string{}}
clientCmdAuthProvider.Name = "oidc"
clientCmdAuthProvider.Config["idp-issuer-url"] = cluster.Spec.OIDC.IssuerURL
clientCmdAuthProvider.Config["client-id"] = cluster.Spec.OIDC.ClientID
if cluster.Spec.OIDC.ClientSecret != "" {
clientCmdAuthProvider.Config["client-secret"] = cluster.Spec.OIDC.ClientSecret
}
if cluster.Spec.OIDC.ExtraScopes != "" {
clientCmdAuthProvider.Config["extra-scopes"] = cluster.Spec.OIDC.ExtraScopes
}
clientCmdAuth.AuthProvider = clientCmdAuthProvider
adminClientCfg.AuthInfos = map[string]*clientcmdapi.AuthInfo{}
adminClientCfg.AuthInfos["default"] = clientCmdAuth
return &encodeKubeConifgResponse{clientCfg: adminClientCfg, filePrefix: "oidc"}, nil
}
}
func CreateOIDCKubeconfigEndpoint(projectProvider provider.ProjectProvider, oidcIssuerVerifier auth.OIDCIssuerVerifier, oidcCfg common.OIDCConfiguration) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
oidcIssuer := oidcIssuerVerifier.(auth.OIDCIssuer)
oidcVerifier := oidcIssuerVerifier.(auth.TokenVerifier)
req := request.(CreateOIDCKubeconfigReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo := ctx.Value(middleware.UserInfoContextKey).(*provider.UserInfo)
if secureCookie == nil {
secureCookie = securecookie.New([]byte(oidcCfg.CookieHashKey), nil)
}
_, err := projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
// PHASE exchangeCode handles callback response from OIDC provider
// and generates kubeconfig
if req.phase == exchangeCodePhase {
// validate the state
if req.decodedState.Nonce != req.cookieNonceValue {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter = %s", req.decodedState.Nonce)
}
oidcTokens, err := oidcIssuer.Exchange(ctx, req.code)
if err != nil {
return nil, kcerrors.NewBadRequest("error while exchaning oidc code for token = %v", err)
}
if len(oidcTokens.RefreshToken) == 0 {
return nil, kcerrors.NewBadRequest("the refresh token is missing but required, try setting/unsetting \"oidc-offline-access-as-scope\" command line flag")
}
claims, err := oidcVerifier.Verify(ctx, oidcTokens.IDToken)
if err != nil {
return nil, kcerrors.New(http.StatusUnauthorized, err.Error())
}
if len(claims.Email) == 0 {
return nil, kcerrors.NewBadRequest("the token doesn't contain the mandatory \"email\" claim")
}
adminKubeConfig, err := clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
// create a kubeconfig that contains OIDC tokens
oidcKubeCfg := clientcmdapi.NewConfig()
{
// grab admin kubeconfig to read the cluster info
var clusterFromAdminKubeCfg *clientcmdapi.Cluster
for clusterName, cluster := range adminKubeConfig.Clusters {
if clusterName == req.ClusterID {
clusterFromAdminKubeCfg = cluster
}
}
if clusterFromAdminKubeCfg == nil {
return nil, kcerrors.New(http.StatusInternalServerError, fmt.Sprintf("unable to construct kubeconfig because couldn't find %s cluster enty in existing kubecfg", req.ClusterID))
}
// create cluster entry
clientCmdCluster := clientcmdapi.NewCluster()
clientCmdCluster.Server = clusterFromAdminKubeCfg.Server
clientCmdCluster.CertificateAuthorityData = clusterFromAdminKubeCfg.CertificateAuthorityData
oidcKubeCfg.Clusters[req.ClusterID] = clientCmdCluster
// create auth entry
clientCmdAuth := clientcmdapi.NewAuthInfo()
clientCmdAuthProvider := &clientcmdapi.AuthProviderConfig{Config: map[string]string{}}
clientCmdAuthProvider.Name = "oidc"
clientCmdAuthProvider.Config["id-token"] = oidcTokens.IDToken
clientCmdAuthProvider.Config["refresh-token"] = oidcTokens.RefreshToken
clientCmdAuthProvider.Config["idp-issuer-url"] = oidcCfg.URL
clientCmdAuthProvider.Config["client-id"] = oidcCfg.ClientID
clientCmdAuthProvider.Config["client-secret"] = oidcCfg.ClientSecret
clientCmdAuth.AuthProvider = clientCmdAuthProvider
oidcKubeCfg.AuthInfos[claims.Email] = clientCmdAuth
// create default ctx
clientCmdCtx := clientcmdapi.NewContext()
clientCmdCtx.Cluster = req.ClusterID
clientCmdCtx.AuthInfo = claims.Email
oidcKubeCfg.Contexts["default"] = clientCmdCtx
oidcKubeCfg.CurrentContext = "default"
}
// prepare final rsp that holds kubeconfig
rsp := createOIDCKubeconfigRsp{}
rsp.phase = kubeconfigGenerated
rsp.oidcKubeConfig = oidcKubeCfg
rsp.secureCookieMode = oidcCfg.CookieSecureMode
return rsp, nil
}
// PHASE initial handles request from the end-user that wants to authenticate
// and kicksoff the process of kubeconfig generation
if req.phase != initialPhase {
return nil, kcerrors.NewBadRequest(fmt.Sprintf("bad request unexpected phase = %d, expected phase = %d, did you forget to set the phase while decoding the request ?", req.phase, initialPhase))
}
rsp := createOIDCKubeconfigRsp{}
scopes := []string{"openid", "email"}
if oidcCfg.OfflineAccessAsScope {
scopes = append(scopes, "offline_access")
}
// pass nonce
nonce := rand.String(rand.IntnRange(10, 15))
rsp.nonce = nonce
rsp.secureCookieMode = oidcCfg.CookieSecureMode
oidcState := OIDCState{
Nonce: nonce,
ClusterID: req.ClusterID,
ProjectID: req.ProjectID,
UserID: req.UserID,
Datacenter: req.Datacenter,
}
rawState, err := json.Marshal(oidcState)
if err != nil {
return nil, err
}
encodedState := base64.StdEncoding.EncodeToString(rawState)
urlSafeState := url.QueryEscape(encodedState)
rsp.authCodeURL = oidcIssuer.AuthCodeURL(urlSafeState, oidcCfg.OfflineAccessAsScope, scopes...)
return rsp, nil
}
}
type encodeKubeConifgResponse struct {
clientCfg *clientcmdapi.Config
filePrefix string
}
func EncodeKubeconfig(c context.Context, w http.ResponseWriter, response interface{}) (err error) {
rsp := response.(*encodeKubeConifgResponse)
cfg := rsp.clientCfg
filename := "kubeconfig"
if len(rsp.filePrefix) > 0 {
filename = fmt.Sprintf("%s-%s", filename, rsp.filePrefix)
}
if len(cfg.Contexts) > 0 {
filename = fmt.Sprintf("%s-%s", filename, cfg.Contexts[cfg.CurrentContext].Cluster)
}
w.Header().Set("Content-Type", "application/yaml")
w.Header().Set("Content-disposition", fmt.Sprintf("attachment; filename=%s", filename))
w.Header().Add("Cache-Control", "no-cache")
b, err := clientcmd.Write(*cfg)
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
type createOIDCKubeconfigRsp struct {
// authCodeURL holds a URL to OpenID provider's consent page that asks for permissions for the required scopes explicitly.
authCodeURL string
// phase tells encoding function how to handle response
phase int
// oidcKubeConfig holds not serialized kubeconfig
oidcKubeConfig *clientcmdapi.Config
// nonce holds an arbitrary number storied in cookie to prevent Cross-site Request Forgery attack.
nonce string
// cookie received only with HTTPS, never with HTTP.
secureCookieMode bool
}
func EncodeOIDCKubeconfig(c context.Context, w http.ResponseWriter, response interface{}) (err error) {
rsp := response.(createOIDCKubeconfigRsp)
// handles kubeconfig Generated PHASE
// it means that kubeconfig was generated and we need to properly encode it.
if rsp.phase == kubeconfigGenerated {
// clear cookie by setting MaxAge<0
err = setCookie(w, "", rsp.secureCookieMode, -1)
if err != nil {
return fmt.Errorf("the cookie can't be removed, err = %v", err)
}
return EncodeKubeconfig(c, w, &encodeKubeConifgResponse{clientCfg: rsp.oidcKubeConfig})
}
// handles initialPhase
// redirects request to OpenID provider's consent page
// and set cookie with nonce
err = setCookie(w, rsp.nonce, rsp.secureCookieMode, cookieMaxAge)
if err != nil {
return fmt.Errorf("the cookie can't be created, err = %v", err)
}
w.Header().Add("Location", rsp.authCodeURL)
w.Header().Add("Cache-Control", "no-cache")
w.WriteHeader(http.StatusSeeOther)
return nil
}
func DecodeGetAdminKubeconfig(c context.Context, r *http.Request) (interface{}, error) {
req, err := common.DecodeGetClusterReq(c, r)
if err != nil |
return req, nil
}
const (
initialPhase = iota
exchangeCodePhase = iota
kubeconfigGenerated = iota
)
// OIDCState holds data that are send and retrieved from OIDC provider
type OIDCState struct {
// nonce a random string that binds requests / responses of API server and OIDC provider
// see https://tools.ietf.org/html/rfc6749#section-10.12
Nonce string `json:"nonce"`
ClusterID string `json:"cluster_id"`
ProjectID string `json:"project_id"`
// UserID holds the ID of the user on behalf of which the request is being handled.
UserID string `json:"user_id"`
Datacenter string `json:"datacenter"`
}
// CreateOIDCKubeconfigReq represent a request for creating kubeconfig for a cluster with OIDC credentials
// swagger:parameters createOIDCKubeconfig
type CreateOIDCKubeconfigReq struct {
// in: query
ClusterID string `json:"cluster_id,omitempty"`
ProjectID string `json:"project_id,omitempty"`
UserID string `json:"user_id,omitempty"`
Datacenter string `json:"datacenter,omitempty"`
// not exported so that they don't leak to swagger spec.
code string
encodedState string
decodedState OIDCState
phase int
cookieNonceValue string
}
func DecodeCreateOIDCKubeconfig(c context.Context, r *http.Request) (interface{}, error) {
req := CreateOIDCKubeconfigReq{}
// handle OIDC errors
{
errType := r.URL.Query().Get("error")
errMessage := r.URL.Query().Get("error_description")
if len(errMessage) != 0 {
return nil, fmt.Errorf("OIDC provider error type = %s, description = %s", errType, errMessage)
}
}
// if true - then this is a callback from OIDC provider and the next step is
// to exchange the given code and generate kubeconfig
// note: state is decoded here so that the middlewares can load providers (cluster) into the ctx.
req.code = r.URL.Query().Get("code")
req.encodedState = r.URL.Query().Get("state")
if len(req.code) != 0 && len(req.encodedState) != 0 {
unescapedState, err := url.QueryUnescape(req.encodedState)
if err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected url encoded value, err = %v", err)
}
rawState, err := base64.StdEncoding.DecodeString(unescapedState)
if err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected base64 encoded value, err = %v", err)
}
oidcState := OIDCState{}
if err := json.Unmarshal(rawState, &oidcState); err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected json encoded value, err = %v", err)
}
// handle cookie when new endpoint is created and secureCookie was initialized
if secureCookie != nil {
// cookie should be set in initial code phase
if cookie, err := r.Cookie(csrfCookieName); err == nil {
var value string
if err = secureCookie.Decode(csrfCookieName, cookie.Value, &value); err == nil {
req.cookieNonceValue = value
}
} else {
return nil, kcerrors.NewBadRequest("incorrect value of cookie or cookie not set, err = %v", err)
}
}
req.phase = exchangeCodePhase
req.Datacenter = oidcState.Datacenter
req.ProjectID = oidcState.ProjectID
req.UserID = oidcState.UserID
req.ClusterID = oidcState.ClusterID
req.decodedState = oidcState
return req, nil
}
// initial flow an end-user wants to authenticate using OIDC provider
req.ClusterID = r.URL.Query().Get("cluster_id")
req.ProjectID = r.URL.Query().Get("project_id")
req.UserID = r.URL.Query().Get("user_id")
req.Datacenter = r.URL.Query().Get("datacenter")
if len(req.ClusterID) == 0 || len(req.ProjectID) == 0 || len(req.UserID) == 0 || len(req.Datacenter) == 0 {
return nil, errors.New("the following query parameters cluster_id, project_id, user_id and datacenter are mandatory, please make sure that all are set")
}
req.phase = initialPhase
return req, nil
}
// GetUserID implements UserGetter interface
func (r CreateOIDCKubeconfigReq) GetUserID() string {
return r.UserID
}
// GetDC implements DCGetter interface
func (r CreateOIDCKubeconfigReq) GetDC() string {
return r.Datacenter
}
// GetProjectID implements ProjectGetter interface
func (r CreateOIDCKubeconfigReq) GetProjectID() string {
return r.ProjectID
}
// setCookie add cookie with random string value
func setCookie(w http.ResponseWriter, nonce string, secureMode bool, maxAge int) error {
encoded, err := secureCookie.Encode(csrfCookieName, nonce)
if err != nil {
return fmt.Errorf("the encode cookie failed, err = %v", err)
}
cookie := &http.Cookie{
Name: csrfCookieName,
Value: encoded,
MaxAge: maxAge,
HttpOnly: true,
Secure: secureMode,
SameSite: http.SameSiteLaxMode,
}
http.SetCookie(w, cookie)
return nil
}
| {
return nil, err
} | conditional_block |
kubeconfig.go | package cluster
import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/go-kit/kit/endpoint"
"github.com/gorilla/securecookie"
"github.com/kubermatic/kubermatic/api/pkg/handler/auth"
"github.com/kubermatic/kubermatic/api/pkg/handler/middleware"
"github.com/kubermatic/kubermatic/api/pkg/handler/v1/common"
"github.com/kubermatic/kubermatic/api/pkg/provider"
kcerrors "github.com/kubermatic/kubermatic/api/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
const (
csrfCookieName = "csrf_token"
cookieMaxAge = 180
)
var secureCookie *securecookie.SecureCookie
func GetAdminKubeconfigEndpoint(projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(common.GetClusterReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo, err := userInfoGetter(ctx, req.ProjectID)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
_, err = projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
filePrefix := "admin"
var adminClientCfg *clientcmdapi.Config
if strings.HasPrefix(userInfo.Group, "viewers") {
filePrefix = "viewer"
adminClientCfg, err = clusterProvider.GetViewerKubeconfigForCustomerCluster(cluster)
} else {
adminClientCfg, err = clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
}
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
return &encodeKubeConifgResponse{clientCfg: adminClientCfg, filePrefix: filePrefix}, nil
}
}
func GetOidcKubeconfigEndpoint(projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
req := request.(common.GetClusterReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo, err := userInfoGetter(ctx, req.ProjectID)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
_, err = projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
adminClientCfg, err := clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
clientCmdAuth := clientcmdapi.NewAuthInfo()
clientCmdAuthProvider := &clientcmdapi.AuthProviderConfig{Config: map[string]string{}}
clientCmdAuthProvider.Name = "oidc"
clientCmdAuthProvider.Config["idp-issuer-url"] = cluster.Spec.OIDC.IssuerURL
clientCmdAuthProvider.Config["client-id"] = cluster.Spec.OIDC.ClientID
if cluster.Spec.OIDC.ClientSecret != "" {
clientCmdAuthProvider.Config["client-secret"] = cluster.Spec.OIDC.ClientSecret
}
if cluster.Spec.OIDC.ExtraScopes != "" {
clientCmdAuthProvider.Config["extra-scopes"] = cluster.Spec.OIDC.ExtraScopes
}
clientCmdAuth.AuthProvider = clientCmdAuthProvider
adminClientCfg.AuthInfos = map[string]*clientcmdapi.AuthInfo{}
adminClientCfg.AuthInfos["default"] = clientCmdAuth
return &encodeKubeConifgResponse{clientCfg: adminClientCfg, filePrefix: "oidc"}, nil
}
}
func CreateOIDCKubeconfigEndpoint(projectProvider provider.ProjectProvider, oidcIssuerVerifier auth.OIDCIssuerVerifier, oidcCfg common.OIDCConfiguration) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (interface{}, error) {
oidcIssuer := oidcIssuerVerifier.(auth.OIDCIssuer)
oidcVerifier := oidcIssuerVerifier.(auth.TokenVerifier)
req := request.(CreateOIDCKubeconfigReq)
clusterProvider := ctx.Value(middleware.ClusterProviderContextKey).(provider.ClusterProvider)
userInfo := ctx.Value(middleware.UserInfoContextKey).(*provider.UserInfo)
if secureCookie == nil {
secureCookie = securecookie.New([]byte(oidcCfg.CookieHashKey), nil)
}
_, err := projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
cluster, err := clusterProvider.Get(userInfo, req.ClusterID, &provider.ClusterGetOptions{})
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
// PHASE exchangeCode handles callback response from OIDC provider
// and generates kubeconfig
if req.phase == exchangeCodePhase {
// validate the state
if req.decodedState.Nonce != req.cookieNonceValue {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter = %s", req.decodedState.Nonce)
}
oidcTokens, err := oidcIssuer.Exchange(ctx, req.code)
if err != nil {
return nil, kcerrors.NewBadRequest("error while exchaning oidc code for token = %v", err)
}
if len(oidcTokens.RefreshToken) == 0 {
return nil, kcerrors.NewBadRequest("the refresh token is missing but required, try setting/unsetting \"oidc-offline-access-as-scope\" command line flag")
}
claims, err := oidcVerifier.Verify(ctx, oidcTokens.IDToken)
if err != nil {
return nil, kcerrors.New(http.StatusUnauthorized, err.Error())
}
if len(claims.Email) == 0 {
return nil, kcerrors.NewBadRequest("the token doesn't contain the mandatory \"email\" claim")
}
adminKubeConfig, err := clusterProvider.GetAdminKubeconfigForCustomerCluster(cluster)
if err != nil {
return nil, common.KubernetesErrorToHTTPError(err)
}
// create a kubeconfig that contains OIDC tokens
oidcKubeCfg := clientcmdapi.NewConfig()
{
// grab admin kubeconfig to read the cluster info
var clusterFromAdminKubeCfg *clientcmdapi.Cluster
for clusterName, cluster := range adminKubeConfig.Clusters {
if clusterName == req.ClusterID {
clusterFromAdminKubeCfg = cluster
}
}
if clusterFromAdminKubeCfg == nil {
return nil, kcerrors.New(http.StatusInternalServerError, fmt.Sprintf("unable to construct kubeconfig because couldn't find %s cluster enty in existing kubecfg", req.ClusterID))
}
// create cluster entry
clientCmdCluster := clientcmdapi.NewCluster()
clientCmdCluster.Server = clusterFromAdminKubeCfg.Server
clientCmdCluster.CertificateAuthorityData = clusterFromAdminKubeCfg.CertificateAuthorityData
oidcKubeCfg.Clusters[req.ClusterID] = clientCmdCluster
// create auth entry
clientCmdAuth := clientcmdapi.NewAuthInfo()
clientCmdAuthProvider := &clientcmdapi.AuthProviderConfig{Config: map[string]string{}}
clientCmdAuthProvider.Name = "oidc"
clientCmdAuthProvider.Config["id-token"] = oidcTokens.IDToken
clientCmdAuthProvider.Config["refresh-token"] = oidcTokens.RefreshToken
clientCmdAuthProvider.Config["idp-issuer-url"] = oidcCfg.URL
clientCmdAuthProvider.Config["client-id"] = oidcCfg.ClientID
clientCmdAuthProvider.Config["client-secret"] = oidcCfg.ClientSecret
clientCmdAuth.AuthProvider = clientCmdAuthProvider
oidcKubeCfg.AuthInfos[claims.Email] = clientCmdAuth
// create default ctx
clientCmdCtx := clientcmdapi.NewContext()
clientCmdCtx.Cluster = req.ClusterID
clientCmdCtx.AuthInfo = claims.Email
oidcKubeCfg.Contexts["default"] = clientCmdCtx
oidcKubeCfg.CurrentContext = "default"
}
// prepare final rsp that holds kubeconfig
rsp := createOIDCKubeconfigRsp{}
rsp.phase = kubeconfigGenerated
rsp.oidcKubeConfig = oidcKubeCfg
rsp.secureCookieMode = oidcCfg.CookieSecureMode
return rsp, nil
}
// PHASE initial handles request from the end-user that wants to authenticate
// and kicksoff the process of kubeconfig generation
if req.phase != initialPhase {
return nil, kcerrors.NewBadRequest(fmt.Sprintf("bad request unexpected phase = %d, expected phase = %d, did you forget to set the phase while decoding the request ?", req.phase, initialPhase))
}
rsp := createOIDCKubeconfigRsp{}
scopes := []string{"openid", "email"}
if oidcCfg.OfflineAccessAsScope {
scopes = append(scopes, "offline_access")
}
// pass nonce
nonce := rand.String(rand.IntnRange(10, 15))
rsp.nonce = nonce
rsp.secureCookieMode = oidcCfg.CookieSecureMode
oidcState := OIDCState{
Nonce: nonce,
ClusterID: req.ClusterID,
ProjectID: req.ProjectID,
UserID: req.UserID,
Datacenter: req.Datacenter,
}
rawState, err := json.Marshal(oidcState)
if err != nil {
return nil, err
}
encodedState := base64.StdEncoding.EncodeToString(rawState)
urlSafeState := url.QueryEscape(encodedState) | return rsp, nil
}
}
type encodeKubeConifgResponse struct {
clientCfg *clientcmdapi.Config
filePrefix string
}
func EncodeKubeconfig(c context.Context, w http.ResponseWriter, response interface{}) (err error) {
rsp := response.(*encodeKubeConifgResponse)
cfg := rsp.clientCfg
filename := "kubeconfig"
if len(rsp.filePrefix) > 0 {
filename = fmt.Sprintf("%s-%s", filename, rsp.filePrefix)
}
if len(cfg.Contexts) > 0 {
filename = fmt.Sprintf("%s-%s", filename, cfg.Contexts[cfg.CurrentContext].Cluster)
}
w.Header().Set("Content-Type", "application/yaml")
w.Header().Set("Content-disposition", fmt.Sprintf("attachment; filename=%s", filename))
w.Header().Add("Cache-Control", "no-cache")
b, err := clientcmd.Write(*cfg)
if err != nil {
return err
}
_, err = w.Write(b)
return err
}
type createOIDCKubeconfigRsp struct {
// authCodeURL holds a URL to OpenID provider's consent page that asks for permissions for the required scopes explicitly.
authCodeURL string
// phase tells encoding function how to handle response
phase int
// oidcKubeConfig holds not serialized kubeconfig
oidcKubeConfig *clientcmdapi.Config
// nonce holds an arbitrary number storied in cookie to prevent Cross-site Request Forgery attack.
nonce string
// cookie received only with HTTPS, never with HTTP.
secureCookieMode bool
}
func EncodeOIDCKubeconfig(c context.Context, w http.ResponseWriter, response interface{}) (err error) {
rsp := response.(createOIDCKubeconfigRsp)
// handles kubeconfig Generated PHASE
// it means that kubeconfig was generated and we need to properly encode it.
if rsp.phase == kubeconfigGenerated {
// clear cookie by setting MaxAge<0
err = setCookie(w, "", rsp.secureCookieMode, -1)
if err != nil {
return fmt.Errorf("the cookie can't be removed, err = %v", err)
}
return EncodeKubeconfig(c, w, &encodeKubeConifgResponse{clientCfg: rsp.oidcKubeConfig})
}
// handles initialPhase
// redirects request to OpenID provider's consent page
// and set cookie with nonce
err = setCookie(w, rsp.nonce, rsp.secureCookieMode, cookieMaxAge)
if err != nil {
return fmt.Errorf("the cookie can't be created, err = %v", err)
}
w.Header().Add("Location", rsp.authCodeURL)
w.Header().Add("Cache-Control", "no-cache")
w.WriteHeader(http.StatusSeeOther)
return nil
}
func DecodeGetAdminKubeconfig(c context.Context, r *http.Request) (interface{}, error) {
req, err := common.DecodeGetClusterReq(c, r)
if err != nil {
return nil, err
}
return req, nil
}
const (
initialPhase = iota
exchangeCodePhase = iota
kubeconfigGenerated = iota
)
// OIDCState holds data that are send and retrieved from OIDC provider
type OIDCState struct {
// nonce a random string that binds requests / responses of API server and OIDC provider
// see https://tools.ietf.org/html/rfc6749#section-10.12
Nonce string `json:"nonce"`
ClusterID string `json:"cluster_id"`
ProjectID string `json:"project_id"`
// UserID holds the ID of the user on behalf of which the request is being handled.
UserID string `json:"user_id"`
Datacenter string `json:"datacenter"`
}
// CreateOIDCKubeconfigReq represent a request for creating kubeconfig for a cluster with OIDC credentials
// swagger:parameters createOIDCKubeconfig
type CreateOIDCKubeconfigReq struct {
// in: query
ClusterID string `json:"cluster_id,omitempty"`
ProjectID string `json:"project_id,omitempty"`
UserID string `json:"user_id,omitempty"`
Datacenter string `json:"datacenter,omitempty"`
// not exported so that they don't leak to swagger spec.
code string
encodedState string
decodedState OIDCState
phase int
cookieNonceValue string
}
func DecodeCreateOIDCKubeconfig(c context.Context, r *http.Request) (interface{}, error) {
req := CreateOIDCKubeconfigReq{}
// handle OIDC errors
{
errType := r.URL.Query().Get("error")
errMessage := r.URL.Query().Get("error_description")
if len(errMessage) != 0 {
return nil, fmt.Errorf("OIDC provider error type = %s, description = %s", errType, errMessage)
}
}
// if true - then this is a callback from OIDC provider and the next step is
// to exchange the given code and generate kubeconfig
// note: state is decoded here so that the middlewares can load providers (cluster) into the ctx.
req.code = r.URL.Query().Get("code")
req.encodedState = r.URL.Query().Get("state")
if len(req.code) != 0 && len(req.encodedState) != 0 {
unescapedState, err := url.QueryUnescape(req.encodedState)
if err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected url encoded value, err = %v", err)
}
rawState, err := base64.StdEncoding.DecodeString(unescapedState)
if err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected base64 encoded value, err = %v", err)
}
oidcState := OIDCState{}
if err := json.Unmarshal(rawState, &oidcState); err != nil {
return nil, kcerrors.NewBadRequest("incorrect value of state parameter, expected json encoded value, err = %v", err)
}
// handle cookie when new endpoint is created and secureCookie was initialized
if secureCookie != nil {
// cookie should be set in initial code phase
if cookie, err := r.Cookie(csrfCookieName); err == nil {
var value string
if err = secureCookie.Decode(csrfCookieName, cookie.Value, &value); err == nil {
req.cookieNonceValue = value
}
} else {
return nil, kcerrors.NewBadRequest("incorrect value of cookie or cookie not set, err = %v", err)
}
}
req.phase = exchangeCodePhase
req.Datacenter = oidcState.Datacenter
req.ProjectID = oidcState.ProjectID
req.UserID = oidcState.UserID
req.ClusterID = oidcState.ClusterID
req.decodedState = oidcState
return req, nil
}
// initial flow an end-user wants to authenticate using OIDC provider
req.ClusterID = r.URL.Query().Get("cluster_id")
req.ProjectID = r.URL.Query().Get("project_id")
req.UserID = r.URL.Query().Get("user_id")
req.Datacenter = r.URL.Query().Get("datacenter")
if len(req.ClusterID) == 0 || len(req.ProjectID) == 0 || len(req.UserID) == 0 || len(req.Datacenter) == 0 {
return nil, errors.New("the following query parameters cluster_id, project_id, user_id and datacenter are mandatory, please make sure that all are set")
}
req.phase = initialPhase
return req, nil
}
// GetUserID implements UserGetter interface
func (r CreateOIDCKubeconfigReq) GetUserID() string {
return r.UserID
}
// GetDC implements DCGetter interface
func (r CreateOIDCKubeconfigReq) GetDC() string {
return r.Datacenter
}
// GetProjectID implements ProjectGetter interface
func (r CreateOIDCKubeconfigReq) GetProjectID() string {
return r.ProjectID
}
// setCookie add cookie with random string value
func setCookie(w http.ResponseWriter, nonce string, secureMode bool, maxAge int) error {
encoded, err := secureCookie.Encode(csrfCookieName, nonce)
if err != nil {
return fmt.Errorf("the encode cookie failed, err = %v", err)
}
cookie := &http.Cookie{
Name: csrfCookieName,
Value: encoded,
MaxAge: maxAge,
HttpOnly: true,
Secure: secureMode,
SameSite: http.SameSiteLaxMode,
}
http.SetCookie(w, cookie)
return nil
} | rsp.authCodeURL = oidcIssuer.AuthCodeURL(urlSafeState, oidcCfg.OfflineAccessAsScope, scopes...)
| random_line_split |
corrector.py | # coding=utf-8
# TODO must
# beam search
# 3to2 쓸만한지 보기
# remove exactly same code
# feed source length as sequence_length
# attention
# 1.0 -> 1.3
# 연속되는 동일한 코드 제거하기
# TODO hyperparams
# dropout
# bucketing
# optimizer
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import time
import math
from datetime import datetime
import logging
import msgpack as pickle
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import code_loader
import code_model
import source_filter
tf.app.flags.DEFINE_float("learning_rate", 0.002, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99, "Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 64, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 2, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("vocab_size", 0, "Token vocabulary size. (0: set by data total vocab)")
tf.app.flags.DEFINE_integer("epoch", 0, "How many epochs (0: no limit)")
tf.app.flags.DEFINE_string("data_dir", "./code-data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./code-data", "Training directory.")
tf.app.flags.DEFINE_string("train_data_path", None, "Training data.")
tf.app.flags.DEFINE_string("dev_data_path", None, "Training data.")
tf.app.flags.DEFINE_string("out_tag", "", "A tag for certain output.")
tf.app.flags.DEFINE_string("data_path", "1000-6." + pickle.__name__, "path to data")
tf.app.flags.DEFINE_integer("max_train_data_size", 0, "Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 0, "How many training steps to do per checkpoint. (0: same with epoch)")
tf.app.flags.DEFINE_boolean("decode", False, "Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False, "Run a self-test if this is set to True.")
tf.app.flags.DEFINE_boolean("use_fp16", False, "Train using fp16 instead of fp32.")
tf.app.flags.DEFINE_boolean("cache", True, "Train using cached data.")
# tf.app.flags.DEFINE_boolean("decode_while_training", False, "Do test decode while training.")
tf.app.flags.DEFINE_boolean("decode_whole", False, "decode whole code or just a set.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
# encoding 이 두개니까 버킷을 어떻게 잡을지 생각좀 해봐야
# _buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
# _buckets = [(10, 15), (40, 50)]
_buckets = [(50, 50)]
def read_data(train_id_set, max_size=None):
data_set = [[] for _ in _buckets]
counter = 0
saved = 0
for code i | ""Create translation model and initialize or load parameters in session."""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
model = code_model.CodeModel(
vocab_size,
_buckets,
FLAGS.size,
FLAGS.num_layers,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
forward_only=forward_only,
dtype=dtype)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
return model
def get_perplexity(loss):
return math.exp(float(loss)) if loss < 300 else float("inf")
def save_checkpoint(model, sess, vocab_size):
# Save checkpoint and zero timer and loss.
with open(FLAGS.train_dir + "/" + os.path.basename(__file__) + ".ckpt.vb", mode="w") as vocab_size_file:
pickle.dump(vocab_size, vocab_size_file)
checkpoint_path = os.path.join(FLAGS.train_dir,
FLAGS.data_path.split(".")[0] + ".ckpt.size." + str(FLAGS.size) + ".nl." + str(FLAGS.num_layers) + ".vs." + str(vocab_size))
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
def train():
train_id_data, id_to_vocab, vocab_to_id = code_loader.prepare_data(FLAGS.data_dir, FLAGS.vocab_size, data_path=FLAGS.data_path, cache=FLAGS.cache)
vocab_size = FLAGS.vocab_size if FLAGS.vocab_size != 0 else len(id_to_vocab)
# Create decode model
# print("Creating %d layers of %d units for decode." % (FLAGS.num_layers, FLAGS.size))
# decode_sess = tf.Session()
# decode_model = create_model(decode_sess, True, vocab_size=(FLAGS.vocab_size if FLAGS.vocab_size != 0 else len(id_to_vocab)))
# Create model.
with tf.Session() as sess:
print("Creating %d layers of %d units with %d vocab." % (FLAGS.num_layers, FLAGS.size, vocab_size))
model = create_model(sess, False, vocab_size=vocab_size)
# Read data into buckets and compute their sizes.
print("Reading development and training data (limit: %d)." % FLAGS.max_train_data_size)
# dev_set_path = FLAGS.train_dir + "/dev_set." + str(FLAGS.from_vocab_size) + "." + pickle.__name__
train_set_path = FLAGS.train_dir + "/" + FLAGS.data_path.split(".")[0] + ".train_set.ids" + str(FLAGS.vocab_size) + ".ds" + str(FLAGS.max_train_data_size) + "." + pickle.__name__
if not tf.gfile.Exists(train_set_path) or not FLAGS.cache:
print("Reading training data (limit: %d)." % FLAGS.max_train_data_size)
train_set = read_data(train_id_data, FLAGS.max_train_data_size)
with tf.gfile.GFile(train_set_path, "w") as f:
pickle.dump(train_set, f)
else:
print("Loading training data (limit: %d)." % FLAGS.max_train_data_size)
with tf.gfile.GFile(train_set_path, mode="r") as f:
train_set = pickle.load(f)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
print("Total train %d" % train_total_size)
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
print("Running the training loop")
epoch_step_time, epoch_loss, ckpt_step_time, ckpt_loss = 0.0, 0.0, 0.0, 0.0
# current_step = 0
previous_losses = []
std_out = "Error: not enough steps to run with checkpoint_step."
test_out = ""
steps_per_epoch = round(int(train_total_size) / FLAGS.batch_size)
steps_per_checkpoint = FLAGS.steps_per_checkpoint if FLAGS.steps_per_checkpoint != 0 else steps_per_epoch # batch * total data size
epoch_step = model.global_step.eval() // steps_per_epoch
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs_front, encoder_inputs_back, decoder_inputs,
target_weights, bucket_id, False)
ckpt_step_time += (time.time() - start_time) / steps_per_checkpoint
ckpt_loss += step_loss / steps_per_checkpoint
epoch_step_time += (time.time() - start_time) / steps_per_epoch
epoch_loss += step_loss / steps_per_epoch
# current_step += 1
# print condition
if model.global_step.eval() % 1 == 0:
print(" global step %d learning rate %.4f step-time %.2f loss %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
(time.time() - start_time), step_loss, get_perplexity(step_loss)))
# per epoch
if model.global_step.eval() % steps_per_epoch == 0:
epoch_step += 1
print("epoch %d" % epoch_step)
# Print statistics for the previous epoch.
std_out = "epoch: global step %d learning rate %.4f step-time %.2f loss %.2f perplexity %.2f" % (
model.global_step.eval(), model.learning_rate.eval(), epoch_step_time, epoch_loss, get_perplexity(epoch_loss))
print(std_out)
# Decrease learning rate if no improvement was seen over last 3 epoch times.
if len(previous_losses) > 2 and epoch_loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(epoch_loss)
# with tf.variable_scope("decoding") as scope:
# decode()
epoch_step_time, epoch_loss = 0.0, 0.0
# escape if all epoch is done
if FLAGS.epoch != 0 and epoch_step >= FLAGS.epoch:
# Save checkpoint.
save_checkpoint(model, sess, vocab_size)
with open(FLAGS.train_dir + "/result.ids" + str(FLAGS.vocab_size) + ".ds" + str(FLAGS.max_train_data_size), "a") as output_file:
output_file.write(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + ": " + std_out + " // Tag: " + FLAGS.out_tag + "\n" +
"\ttest_out: " + test_out + "\n"
)
break
# per checkpoint jobs
if model.global_step.eval() % steps_per_checkpoint == 0:
# Print statistics for the previous checkpoint.
std_out = "checkpoint: global step %d learning rate %.4f step-time %.2f loss %.2f perplexity %.2f" % (
model.global_step.eval(), model.learning_rate.eval(), ckpt_step_time, ckpt_loss, get_perplexity(ckpt_loss))
print(std_out)
# Save checkpoint and zero timer and loss.
save_checkpoint(model, sess, vocab_size)
ckpt_step_time, ckpt_loss = 0.0, 0.0
def decode_with_session_and_model(sess, model):
# Load vocabularies.
vocab_path = os.path.join(FLAGS.data_dir, FLAGS.data_path.split(".")[0] + ".vocab%d.%s" % (FLAGS.vocab_size, pickle.__name__))
with gfile.GFile(vocab_path, mode="r") as vocab_file:
id_to_vocab, vocab_to_id, vocab_freq = pickle.load(vocab_file)
source = code_loader._START_LINE + '\n' + 'print(a+b)'
# source = "b = 2\n" + code_loader._END_LINE
data = [{
"source": source
}]
data = source_filter.filter_danger(data)
source_filter.remove_redundent_newlines_and_set_line_length(data)
data = source_filter.set_token(data)
source_data = code_loader.data_to_tokens_list(data)
model.batch_size = 1 if not FLAGS.decode_whole else len(source_data[0]) * 2 + 1 # We decode one sentence at a time.
id_data = []
for src in source_data:
id_source = [[code_loader.START_LINE_ID]]
for line in src:
id_line = [vocab_to_id.get(word[1], code_loader.UNK_ID) for word in line]
id_source.append(id_line)
id_source.append([code_loader.END_LINE_ID])
id_data.append(id_source)
bucket_id = -1
data_set = [[] for _ in _buckets]
for code in id_data:
if FLAGS.decode_whole:
for line_idx in xrange(len(code) - 2):
source_ids = [code[line_idx], code[line_idx + 2]]
target_ids = [code[line_idx + 1]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
for line_idx in xrange(len(code) - 1):
source_ids = [code[line_idx], code[line_idx + 1]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
else:
source_ids = [code[1], code[2]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
if bucket_id == -1:
logging.warning("Sentence truncated: %s", source)
return
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights = model.get_batch(data_set, bucket_id, random_set=False)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits] # logit 이 원래 벡턴데 5 row 행렬이 되서 그런 것 바꿔주면 됨.
# If there is an EOS symbol in outputs, cut them at that point.
if code_loader.EOS_ID in outputs:
outputs = outputs[:outputs.index(code_loader.EOS_ID)]
# Print out French sentence corresponding to outputs.
vocab_output = " ".join([tf.compat.as_str(id_to_vocab[output]) for output in outputs])
print("\toutput: " + vocab_output)
return vocab_output
def decode(sess=None, model=None):
vocab_size = FLAGS.vocab_size
with open(FLAGS.train_dir + "/" + os.path.basename(__file__) + ".ckpt.vb", mode="r") as vocab_size_file:
vocab_size = pickle.load(vocab_size_file)
if not sess:
sess = tf.Session()
if not model:
# Create model and load parameters.
print("Creating %d layers of %d units with %d vocab. for decode." % (FLAGS.num_layers, FLAGS.size, vocab_size))
model = create_model(sess, True, vocab_size=vocab_size)
decode_with_session_and_model(sess, model)
sess.close()
def main(_):
if FLAGS.decode:
decode()
else:
train()
if __name__ == "__main__":
tf.app.run()
| n train_id_set:
for line_idx in xrange(len(code) - 2):
if max_size and counter >= max_size:
break
counter += 1
source_ids = [code[line_idx], code[line_idx + 2]]
target_ids = [code[line_idx + 1]]
target_ids[0].append(code_loader.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size and len(target_ids[0]) < target_size:
saved += 1
data_set[bucket_id].append([source_ids, target_ids])
break
for line_idx in xrange(len(code) - 1):
if max_size and counter >= max_size:
break
counter += 1
source_ids = [code[line_idx], code[line_idx + 1]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size and len(target_ids[0]) < target_size:
saved += 1
data_set[bucket_id].append([source_ids, target_ids])
break
print(" not saved: %d" % (counter - saved))
print(" read data line total %d" % counter)
return data_set
def create_model(session, forward_only, vocab_size=FLAGS.vocab_size):
" | identifier_body |
corrector.py | # coding=utf-8
# TODO must
# beam search
# 3to2 쓸만한지 보기
# remove exactly same code
# feed source length as sequence_length
# attention
# 1.0 -> 1.3
# 연속되는 동일한 코드 제거하기
# TODO hyperparams
# dropout
# bucketing
# optimizer
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import time
import math
from datetime import datetime
import logging
import msgpack as pickle
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import code_loader
import code_model
import source_filter
tf.app.flags.DEFINE_float("learning_rate", 0.002, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99, "Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 64, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 2, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("vocab_size", 0, "Token vocabulary size. (0: set by data total vocab)")
tf.app.flags.DEFINE_integer("epoch", 0, "How many epochs (0: no limit)")
tf.app.flags.DEFINE_string("data_dir", "./code-data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./code-data", "Training directory.")
tf.app.flags.DEFINE_string("train_data_path", None, "Training data.")
tf.app.flags.DEFINE_string("dev_data_path", None, "Training data.")
tf.app.flags.DEFINE_string("out_tag", "", "A tag for certain output.")
tf.app.flags.DEFINE_string("data_path", "1000-6." + pickle.__name__, "path to data")
tf.app.flags.DEFINE_integer("max_train_data_size", 0, "Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 0, "How many training steps to do per checkpoint. (0: same with epoch)")
tf.app.flags.DEFINE_boolean("decode", False, "Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False, "Run a self-test if this is set to True.")
tf.app.flags.DEFINE_boolean("use_fp16", False, "Train using fp16 instead of fp32.")
tf.app.flags.DEFINE_boolean("cache", True, "Train using cached data.")
# tf.app.flags.DEFINE_boolean("decode_while_training", False, "Do test decode while training.")
tf.app.flags.DEFINE_boolean("decode_whole", False, "decode whole code or just a set.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
# encoding 이 두개니까 버킷을 어떻게 잡을지 생각좀 해봐야
# _buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
# _buckets = [(10, 15), (40, 50)]
_buckets = [(50, 50)]
def read_data(train_id_set, max_size=None):
data_set = [[] for _ in _buckets]
counter = 0
saved = 0
for code in train_id_set:
for line_idx in xrange(len(code) - 2):
if max_size and counter >= max_size:
break
counter += 1
source_ids = [code[line_idx], code[line_idx + 2]]
target_ids = [code[line_idx + 1]]
target_ids[0].append(code_loader.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size and len(target_ids[0]) < target_size:
saved += 1
data_set[bucket_id].append([source_ids, target_ids])
break
for line_idx in xrange(len(code) - 1):
if max_size and counter >= max_size:
break
counter += 1
source_ids = [code[line_idx], code[line_idx + 1]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size and len(target_ids[0]) < target_size:
saved += 1
data_set[bucket_id].append([source_ids, target_ids])
break
print(" not saved: %d" % (counter - saved))
print(" read data line total %d" % counter)
return data_set
def create_model(session, forward_only, vocab_size=FLAGS.vocab_size):
"""Create translation model and initialize or load parameters in session.""" | _buckets,
FLAGS.size,
FLAGS.num_layers,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
forward_only=forward_only,
dtype=dtype)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
return model
def get_perplexity(loss):
return math.exp(float(loss)) if loss < 300 else float("inf")
def save_checkpoint(model, sess, vocab_size):
# Save checkpoint and zero timer and loss.
with open(FLAGS.train_dir + "/" + os.path.basename(__file__) + ".ckpt.vb", mode="w") as vocab_size_file:
pickle.dump(vocab_size, vocab_size_file)
checkpoint_path = os.path.join(FLAGS.train_dir,
FLAGS.data_path.split(".")[0] + ".ckpt.size." + str(FLAGS.size) + ".nl." + str(FLAGS.num_layers) + ".vs." + str(vocab_size))
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
def train():
train_id_data, id_to_vocab, vocab_to_id = code_loader.prepare_data(FLAGS.data_dir, FLAGS.vocab_size, data_path=FLAGS.data_path, cache=FLAGS.cache)
vocab_size = FLAGS.vocab_size if FLAGS.vocab_size != 0 else len(id_to_vocab)
# Create decode model
# print("Creating %d layers of %d units for decode." % (FLAGS.num_layers, FLAGS.size))
# decode_sess = tf.Session()
# decode_model = create_model(decode_sess, True, vocab_size=(FLAGS.vocab_size if FLAGS.vocab_size != 0 else len(id_to_vocab)))
# Create model.
with tf.Session() as sess:
print("Creating %d layers of %d units with %d vocab." % (FLAGS.num_layers, FLAGS.size, vocab_size))
model = create_model(sess, False, vocab_size=vocab_size)
# Read data into buckets and compute their sizes.
print("Reading development and training data (limit: %d)." % FLAGS.max_train_data_size)
# dev_set_path = FLAGS.train_dir + "/dev_set." + str(FLAGS.from_vocab_size) + "." + pickle.__name__
train_set_path = FLAGS.train_dir + "/" + FLAGS.data_path.split(".")[0] + ".train_set.ids" + str(FLAGS.vocab_size) + ".ds" + str(FLAGS.max_train_data_size) + "." + pickle.__name__
if not tf.gfile.Exists(train_set_path) or not FLAGS.cache:
print("Reading training data (limit: %d)." % FLAGS.max_train_data_size)
train_set = read_data(train_id_data, FLAGS.max_train_data_size)
with tf.gfile.GFile(train_set_path, "w") as f:
pickle.dump(train_set, f)
else:
print("Loading training data (limit: %d)." % FLAGS.max_train_data_size)
with tf.gfile.GFile(train_set_path, mode="r") as f:
train_set = pickle.load(f)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
print("Total train %d" % train_total_size)
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
print("Running the training loop")
epoch_step_time, epoch_loss, ckpt_step_time, ckpt_loss = 0.0, 0.0, 0.0, 0.0
# current_step = 0
previous_losses = []
std_out = "Error: not enough steps to run with checkpoint_step."
test_out = ""
steps_per_epoch = round(int(train_total_size) / FLAGS.batch_size)
steps_per_checkpoint = FLAGS.steps_per_checkpoint if FLAGS.steps_per_checkpoint != 0 else steps_per_epoch # batch * total data size
epoch_step = model.global_step.eval() // steps_per_epoch
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs_front, encoder_inputs_back, decoder_inputs,
target_weights, bucket_id, False)
ckpt_step_time += (time.time() - start_time) / steps_per_checkpoint
ckpt_loss += step_loss / steps_per_checkpoint
epoch_step_time += (time.time() - start_time) / steps_per_epoch
epoch_loss += step_loss / steps_per_epoch
# current_step += 1
# print condition
if model.global_step.eval() % 1 == 0:
print(" global step %d learning rate %.4f step-time %.2f loss %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
(time.time() - start_time), step_loss, get_perplexity(step_loss)))
# per epoch
if model.global_step.eval() % steps_per_epoch == 0:
epoch_step += 1
print("epoch %d" % epoch_step)
# Print statistics for the previous epoch.
std_out = "epoch: global step %d learning rate %.4f step-time %.2f loss %.2f perplexity %.2f" % (
model.global_step.eval(), model.learning_rate.eval(), epoch_step_time, epoch_loss, get_perplexity(epoch_loss))
print(std_out)
# Decrease learning rate if no improvement was seen over last 3 epoch times.
if len(previous_losses) > 2 and epoch_loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(epoch_loss)
# with tf.variable_scope("decoding") as scope:
# decode()
epoch_step_time, epoch_loss = 0.0, 0.0
# escape if all epoch is done
if FLAGS.epoch != 0 and epoch_step >= FLAGS.epoch:
# Save checkpoint.
save_checkpoint(model, sess, vocab_size)
with open(FLAGS.train_dir + "/result.ids" + str(FLAGS.vocab_size) + ".ds" + str(FLAGS.max_train_data_size), "a") as output_file:
output_file.write(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + ": " + std_out + " // Tag: " + FLAGS.out_tag + "\n" +
"\ttest_out: " + test_out + "\n"
)
break
# per checkpoint jobs
if model.global_step.eval() % steps_per_checkpoint == 0:
# Print statistics for the previous checkpoint.
std_out = "checkpoint: global step %d learning rate %.4f step-time %.2f loss %.2f perplexity %.2f" % (
model.global_step.eval(), model.learning_rate.eval(), ckpt_step_time, ckpt_loss, get_perplexity(ckpt_loss))
print(std_out)
# Save checkpoint and zero timer and loss.
save_checkpoint(model, sess, vocab_size)
ckpt_step_time, ckpt_loss = 0.0, 0.0
def decode_with_session_and_model(sess, model):
# Load vocabularies.
vocab_path = os.path.join(FLAGS.data_dir, FLAGS.data_path.split(".")[0] + ".vocab%d.%s" % (FLAGS.vocab_size, pickle.__name__))
with gfile.GFile(vocab_path, mode="r") as vocab_file:
id_to_vocab, vocab_to_id, vocab_freq = pickle.load(vocab_file)
source = code_loader._START_LINE + '\n' + 'print(a+b)'
# source = "b = 2\n" + code_loader._END_LINE
data = [{
"source": source
}]
data = source_filter.filter_danger(data)
source_filter.remove_redundent_newlines_and_set_line_length(data)
data = source_filter.set_token(data)
source_data = code_loader.data_to_tokens_list(data)
model.batch_size = 1 if not FLAGS.decode_whole else len(source_data[0]) * 2 + 1 # We decode one sentence at a time.
id_data = []
for src in source_data:
id_source = [[code_loader.START_LINE_ID]]
for line in src:
id_line = [vocab_to_id.get(word[1], code_loader.UNK_ID) for word in line]
id_source.append(id_line)
id_source.append([code_loader.END_LINE_ID])
id_data.append(id_source)
bucket_id = -1
data_set = [[] for _ in _buckets]
for code in id_data:
if FLAGS.decode_whole:
for line_idx in xrange(len(code) - 2):
source_ids = [code[line_idx], code[line_idx + 2]]
target_ids = [code[line_idx + 1]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
for line_idx in xrange(len(code) - 1):
source_ids = [code[line_idx], code[line_idx + 1]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
else:
source_ids = [code[1], code[2]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
if bucket_id == -1:
logging.warning("Sentence truncated: %s", source)
return
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights = model.get_batch(data_set, bucket_id, random_set=False)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits] # logit 이 원래 벡턴데 5 row 행렬이 되서 그런 것 바꿔주면 됨.
# If there is an EOS symbol in outputs, cut them at that point.
if code_loader.EOS_ID in outputs:
outputs = outputs[:outputs.index(code_loader.EOS_ID)]
# Print out French sentence corresponding to outputs.
vocab_output = " ".join([tf.compat.as_str(id_to_vocab[output]) for output in outputs])
print("\toutput: " + vocab_output)
return vocab_output
def decode(sess=None, model=None):
vocab_size = FLAGS.vocab_size
with open(FLAGS.train_dir + "/" + os.path.basename(__file__) + ".ckpt.vb", mode="r") as vocab_size_file:
vocab_size = pickle.load(vocab_size_file)
if not sess:
sess = tf.Session()
if not model:
# Create model and load parameters.
print("Creating %d layers of %d units with %d vocab. for decode." % (FLAGS.num_layers, FLAGS.size, vocab_size))
model = create_model(sess, True, vocab_size=vocab_size)
decode_with_session_and_model(sess, model)
sess.close()
def main(_):
if FLAGS.decode:
decode()
else:
train()
if __name__ == "__main__":
tf.app.run() | dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
model = code_model.CodeModel(
vocab_size, | random_line_split |
corrector.py | # coding=utf-8
# TODO must
# beam search
# 3to2 쓸만한지 보기
# remove exactly same code
# feed source length as sequence_length
# attention
# 1.0 -> 1.3
# 연속되는 동일한 코드 제거하기
# TODO hyperparams
# dropout
# bucketing
# optimizer
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import time
import math
from datetime import datetime
import logging
import msgpack as pickle
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import code_loader
import code_model
import source_filter
tf.app.flags.DEFINE_float("learning_rate", 0.002, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99, "Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 64, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 2, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("vocab_size", 0, "Token vocabulary size. (0: set by data total vocab)")
tf.app.flags.DEFINE_integer("epoch", 0, "How many epochs (0: no limit)")
tf.app.flags.DEFINE_string("data_dir", "./code-data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./code-data", "Training directory.")
tf.app.flags.DEFINE_string("train_data_path", None, "Training data.")
tf.app.flags.DEFINE_string("dev_data_path", None, "Training data.")
tf.app.flags.DEFINE_string("out_tag", "", "A tag for certain output.")
tf.app.flags.DEFINE_string("data_path", "1000-6." + pickle.__name__, "path to data")
tf.app.flags.DEFINE_integer("max_train_data_size", 0, "Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 0, "How many training steps to do per checkpoint. (0: same with epoch)")
tf.app.flags.DEFINE_boolean("decode", False, "Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False, "Run a self-test if this is set to True.")
tf.app.flags.DEFINE_boolean("use_fp16", False, "Train using fp16 instead of fp32.")
tf.app.flags.DEFINE_boolean("cache", True, "Train using cached data.")
# tf.app.flags.DEFINE_boolean("decode_while_training", False, "Do test decode while training.")
tf.app.flags.DEFINE_boolean("decode_whole", False, "decode whole code or just a set.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
# encoding 이 두개니까 버킷을 어떻게 잡을지 생각좀 해봐야
# _buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
# _buckets = [(10, 15), (40, 50)]
_buckets = [(50, 50)]
def read_data(train_id_set, max_size=None):
data_set = [[] for _ in _buckets]
counter = 0
saved = 0
for code in train_id_set:
for line_idx in xrange(len(code) - 2):
if max_size and counter >= max_size:
break
counter += 1
source_ids = [code[line_idx], code[line_idx + 2]]
target_ids = [code[line_idx + 1]]
target_ids[0].append(code_loader.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size and len(target_ids[0]) < target_size:
saved += 1
data_set[bucket_id].append([source_ids, target_ids])
break
for line_idx in xrange(len(code) - 1):
if max_size and counter >= max_size:
break
counter += 1
source_ids = [code[line_idx], code[line_idx + 1]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size and len(target_ids[0]) < target_size:
saved += 1
data_set[bucket_id].append([source_ids, target_ids])
break
print(" not saved: %d" % (counter - saved))
print(" read data line total %d" % counter)
return data_set
def create_model(session, forward_only, vocab_size=FLAGS.vocab_size):
"""Create translation model and initialize or load parameters in session."""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
model = code_model.CodeModel(
vocab_size,
_buckets,
FLAGS.size,
FLAGS.num_layers,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
forward_only=forward_only,
dtype=dtype)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
return model
def get_perplexity(loss):
return math.exp(float(loss)) if loss < 300 else float("inf")
def save_checkpoint(model, sess, vocab_size):
# Save checkpoint and zero timer | with open(FLAGS.train_dir + "/" + os.path.basename(__file__) + ".ckpt.vb", mode="w") as vocab_size_file:
pickle.dump(vocab_size, vocab_size_file)
checkpoint_path = os.path.join(FLAGS.train_dir,
FLAGS.data_path.split(".")[0] + ".ckpt.size." + str(FLAGS.size) + ".nl." + str(FLAGS.num_layers) + ".vs." + str(vocab_size))
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
def train():
train_id_data, id_to_vocab, vocab_to_id = code_loader.prepare_data(FLAGS.data_dir, FLAGS.vocab_size, data_path=FLAGS.data_path, cache=FLAGS.cache)
vocab_size = FLAGS.vocab_size if FLAGS.vocab_size != 0 else len(id_to_vocab)
# Create decode model
# print("Creating %d layers of %d units for decode." % (FLAGS.num_layers, FLAGS.size))
# decode_sess = tf.Session()
# decode_model = create_model(decode_sess, True, vocab_size=(FLAGS.vocab_size if FLAGS.vocab_size != 0 else len(id_to_vocab)))
# Create model.
with tf.Session() as sess:
print("Creating %d layers of %d units with %d vocab." % (FLAGS.num_layers, FLAGS.size, vocab_size))
model = create_model(sess, False, vocab_size=vocab_size)
# Read data into buckets and compute their sizes.
print("Reading development and training data (limit: %d)." % FLAGS.max_train_data_size)
# dev_set_path = FLAGS.train_dir + "/dev_set." + str(FLAGS.from_vocab_size) + "." + pickle.__name__
train_set_path = FLAGS.train_dir + "/" + FLAGS.data_path.split(".")[0] + ".train_set.ids" + str(FLAGS.vocab_size) + ".ds" + str(FLAGS.max_train_data_size) + "." + pickle.__name__
if not tf.gfile.Exists(train_set_path) or not FLAGS.cache:
print("Reading training data (limit: %d)." % FLAGS.max_train_data_size)
train_set = read_data(train_id_data, FLAGS.max_train_data_size)
with tf.gfile.GFile(train_set_path, "w") as f:
pickle.dump(train_set, f)
else:
print("Loading training data (limit: %d)." % FLAGS.max_train_data_size)
with tf.gfile.GFile(train_set_path, mode="r") as f:
train_set = pickle.load(f)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
print("Total train %d" % train_total_size)
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
print("Running the training loop")
epoch_step_time, epoch_loss, ckpt_step_time, ckpt_loss = 0.0, 0.0, 0.0, 0.0
# current_step = 0
previous_losses = []
std_out = "Error: not enough steps to run with checkpoint_step."
test_out = ""
steps_per_epoch = round(int(train_total_size) / FLAGS.batch_size)
steps_per_checkpoint = FLAGS.steps_per_checkpoint if FLAGS.steps_per_checkpoint != 0 else steps_per_epoch # batch * total data size
epoch_step = model.global_step.eval() // steps_per_epoch
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs_front, encoder_inputs_back, decoder_inputs,
target_weights, bucket_id, False)
ckpt_step_time += (time.time() - start_time) / steps_per_checkpoint
ckpt_loss += step_loss / steps_per_checkpoint
epoch_step_time += (time.time() - start_time) / steps_per_epoch
epoch_loss += step_loss / steps_per_epoch
# current_step += 1
# print condition
if model.global_step.eval() % 1 == 0:
print(" global step %d learning rate %.4f step-time %.2f loss %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
(time.time() - start_time), step_loss, get_perplexity(step_loss)))
# per epoch
if model.global_step.eval() % steps_per_epoch == 0:
epoch_step += 1
print("epoch %d" % epoch_step)
# Print statistics for the previous epoch.
std_out = "epoch: global step %d learning rate %.4f step-time %.2f loss %.2f perplexity %.2f" % (
model.global_step.eval(), model.learning_rate.eval(), epoch_step_time, epoch_loss, get_perplexity(epoch_loss))
print(std_out)
# Decrease learning rate if no improvement was seen over last 3 epoch times.
if len(previous_losses) > 2 and epoch_loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(epoch_loss)
# with tf.variable_scope("decoding") as scope:
# decode()
epoch_step_time, epoch_loss = 0.0, 0.0
# escape if all epoch is done
if FLAGS.epoch != 0 and epoch_step >= FLAGS.epoch:
# Save checkpoint.
save_checkpoint(model, sess, vocab_size)
with open(FLAGS.train_dir + "/result.ids" + str(FLAGS.vocab_size) + ".ds" + str(FLAGS.max_train_data_size), "a") as output_file:
output_file.write(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + ": " + std_out + " // Tag: " + FLAGS.out_tag + "\n" +
"\ttest_out: " + test_out + "\n"
)
break
# per checkpoint jobs
if model.global_step.eval() % steps_per_checkpoint == 0:
# Print statistics for the previous checkpoint.
std_out = "checkpoint: global step %d learning rate %.4f step-time %.2f loss %.2f perplexity %.2f" % (
model.global_step.eval(), model.learning_rate.eval(), ckpt_step_time, ckpt_loss, get_perplexity(ckpt_loss))
print(std_out)
# Save checkpoint and zero timer and loss.
save_checkpoint(model, sess, vocab_size)
ckpt_step_time, ckpt_loss = 0.0, 0.0
def decode_with_session_and_model(sess, model):
# Load vocabularies.
vocab_path = os.path.join(FLAGS.data_dir, FLAGS.data_path.split(".")[0] + ".vocab%d.%s" % (FLAGS.vocab_size, pickle.__name__))
with gfile.GFile(vocab_path, mode="r") as vocab_file:
id_to_vocab, vocab_to_id, vocab_freq = pickle.load(vocab_file)
source = code_loader._START_LINE + '\n' + 'print(a+b)'
# source = "b = 2\n" + code_loader._END_LINE
data = [{
"source": source
}]
data = source_filter.filter_danger(data)
source_filter.remove_redundent_newlines_and_set_line_length(data)
data = source_filter.set_token(data)
source_data = code_loader.data_to_tokens_list(data)
model.batch_size = 1 if not FLAGS.decode_whole else len(source_data[0]) * 2 + 1 # We decode one sentence at a time.
id_data = []
for src in source_data:
id_source = [[code_loader.START_LINE_ID]]
for line in src:
id_line = [vocab_to_id.get(word[1], code_loader.UNK_ID) for word in line]
id_source.append(id_line)
id_source.append([code_loader.END_LINE_ID])
id_data.append(id_source)
bucket_id = -1
data_set = [[] for _ in _buckets]
for code in id_data:
if FLAGS.decode_whole:
for line_idx in xrange(len(code) - 2):
source_ids = [code[line_idx], code[line_idx + 2]]
target_ids = [code[line_idx + 1]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
for line_idx in xrange(len(code) - 1):
source_ids = [code[line_idx], code[line_idx + 1]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
else:
source_ids = [code[1], code[2]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
if bucket_id == -1:
logging.warning("Sentence truncated: %s", source)
return
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights = model.get_batch(data_set, bucket_id, random_set=False)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits] # logit 이 원래 벡턴데 5 row 행렬이 되서 그런 것 바꿔주면 됨.
# If there is an EOS symbol in outputs, cut them at that point.
if code_loader.EOS_ID in outputs:
outputs = outputs[:outputs.index(code_loader.EOS_ID)]
# Print out French sentence corresponding to outputs.
vocab_output = " ".join([tf.compat.as_str(id_to_vocab[output]) for output in outputs])
print("\toutput: " + vocab_output)
return vocab_output
def decode(sess=None, model=None):
vocab_size = FLAGS.vocab_size
with open(FLAGS.train_dir + "/" + os.path.basename(__file__) + ".ckpt.vb", mode="r") as vocab_size_file:
vocab_size = pickle.load(vocab_size_file)
if not sess:
sess = tf.Session()
if not model:
# Create model and load parameters.
print("Creating %d layers of %d units with %d vocab. for decode." % (FLAGS.num_layers, FLAGS.size, vocab_size))
model = create_model(sess, True, vocab_size=vocab_size)
decode_with_session_and_model(sess, model)
sess.close()
def main(_):
if FLAGS.decode:
decode()
else:
train()
if __name__ == "__main__":
tf.app.run()
| and loss.
| identifier_name |
corrector.py | # coding=utf-8
# TODO must
# beam search
# 3to2 쓸만한지 보기
# remove exactly same code
# feed source length as sequence_length
# attention
# 1.0 -> 1.3
# 연속되는 동일한 코드 제거하기
# TODO hyperparams
# dropout
# bucketing
# optimizer
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import time
import math
from datetime import datetime
import logging
import msgpack as pickle
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import code_loader
import code_model
import source_filter
tf.app.flags.DEFINE_float("learning_rate", 0.002, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99, "Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 32, "Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 64, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 2, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("vocab_size", 0, "Token vocabulary size. (0: set by data total vocab)")
tf.app.flags.DEFINE_integer("epoch", 0, "How many epochs (0: no limit)")
tf.app.flags.DEFINE_string("data_dir", "./code-data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./code-data", "Training directory.")
tf.app.flags.DEFINE_string("train_data_path", None, "Training data.")
tf.app.flags.DEFINE_string("dev_data_path", None, "Training data.")
tf.app.flags.DEFINE_string("out_tag", "", "A tag for certain output.")
tf.app.flags.DEFINE_string("data_path", "1000-6." + pickle.__name__, "path to data")
tf.app.flags.DEFINE_integer("max_train_data_size", 0, "Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 0, "How many training steps to do per checkpoint. (0: same with epoch)")
tf.app.flags.DEFINE_boolean("decode", False, "Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False, "Run a self-test if this is set to True.")
tf.app.flags.DEFINE_boolean("use_fp16", False, "Train using fp16 instead of fp32.")
tf.app.flags.DEFINE_boolean("cache", True, "Train using cached data.")
# tf.app.flags.DEFINE_boolean("decode_while_training", False, "Do test decode while training.")
tf.app.flags.DEFINE_boolean("decode_whole", False, "decode whole code or just a set.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
# encoding 이 두개니까 버킷을 어떻게 잡을지 생각좀 해봐야
# _buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
# _buckets = [(10, 15), (40, 50)]
_buckets = [(50, 50)]
def read_data(train_id_set, max_size=None):
data_set = [[] for _ in _buckets]
counter = 0
saved = 0
for code in train_id_set:
for line_idx in xrange(len(code) - 2):
if max_size and counter >= max_size:
break
counter += 1
source_ids = [code[line_idx], code[line_idx + 2]]
target_ids = [code[line_idx + 1]]
target_ids[0].append(code_loader.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size and len(target_ids[0]) < target_size:
saved += 1
data_set[bucket_id].append([source_ids, target_ids])
break
for line_idx in xrange(len(code) - 1):
if max_size and counter >= max_size:
break
counter += 1
source_ids = [code[line_idx], code[line_idx + 1]]
target_ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size and len(target_ids[0]) < target_size:
saved += 1
data_set[bucket_id].append([source_ids, target_ids])
break
print(" not saved: %d" % (counter - saved))
print(" read data line total %d" % counter)
return data_set
def create_model(session, forward_only, vocab_size=FLAGS.vocab_size):
"""Create translation model and initialize or load parameters in session."""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
model = code_model.CodeModel(
vocab_size,
_buckets,
FLAGS.size,
FLAGS.num_layers,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
forward_only=forward_only,
dtype=dtype)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
return model
def get_perplexity(loss):
return math.exp(float(loss)) if loss < 300 else float("inf")
def save_checkpoint(model, sess, vocab_size):
# Save checkpoint and zero timer and loss.
with open(FLAGS.train_dir + "/" + os.path.basename(__file__) + ".ckpt.vb", mode="w") as vocab_size_file:
pickle.dump(vocab_size, vocab_size_file)
checkpoint_path = os.path.join(FLAGS.train_dir,
FLAGS.data_path.split(".")[0] + ".ckpt.size." + str(FLAGS.size) + ".nl." + str(FLAGS.num_layers) + ".vs." + str(vocab_size))
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
def train():
train_id_data, id_to_vocab, vocab_to_id = code_loader.prepare_data(FLAGS.data_dir, FLAGS.vocab_size, data_path=FLAGS.data_path, cache=FLAGS.cache)
vocab_size = FLAGS.vocab_size if FLAGS.vocab_size != 0 else len(id_to_vocab)
# Create decode model
# print("Creating %d layers of %d units for decode." % (FLAGS.num_layers, FLAGS.size))
# decode_sess = tf.Session()
# decode_model = create_model(decode_sess, True, vocab_size=(FLAGS.vocab_size if FLAGS.vocab_size != 0 else len(id_to_vocab)))
# Create model.
with tf.Session() as sess:
print("Creating %d layers of %d units with %d vocab." % (FLAGS.num_layers, FLAGS.size, vocab_size))
model = create_model(sess, False, vocab_size=vocab_size)
# Read data into buckets and compute their sizes.
print("Reading development and training data (limit: %d)." % FLAGS.max_train_data_size)
# dev_set_path = FLAGS.train_dir + "/dev_set." + str(FLAGS.from_vocab_size) + "." + pickle.__name__
train_set_path = FLAGS.train_dir + "/" + FLAGS.data_path.split(".")[0] + ".train_set.ids" + str(FLAGS.vocab_size) + ".ds" + str(FLAGS.max_train_data_size) + "." + pickle.__name__
if not tf.gfile.Exists(train_set_path) or not FLAGS.cache:
print("Reading training data (limit: %d)." % FLAGS.max_train_data_size)
train_set = read_data(train_id_data, FLAGS.max_train_data_size)
with tf.gfile.GFile(train_set_path, "w") as f:
pickle.dump(train_set, f)
else:
print("Loading training data (limit: %d)." % FLAGS.max_train_data_size)
with tf.gfile.GFile(train_set_path, mode="r") as f:
train_set = pickle.load(f)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
print("Total train %d" % train_total_size)
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
print("Running the training loop")
epoch_step_time, epoch_loss, ckpt_step_time, ckpt_loss = 0.0, 0.0, 0.0, 0.0
# current_step = 0
previous_losses = []
std_out = "Error: not enough steps to run with checkpoint_step."
test_out = ""
steps_per_epoch = round(int(train_total_size) / FLAGS.batch_size)
steps_per_checkpoint = FLAGS.steps_per_checkpoint if FLAGS.steps_per_checkpoint != 0 else steps_per_epoch # batch * total data size
epoch_step = model.global_step.eval() // steps_per_epoch
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs_front, encoder_inputs_back, decoder_inputs,
target_weights, bucket_id, False)
ckpt_step_time += (time.time() - start_time) / steps_per_checkpoint
ckpt_loss += step_loss / steps_per_checkpoint
epoch_step_time += (time.time() - start_time) / steps_per_epoch
epoch_loss += step_loss / steps_per_epoch
# current_step += 1
# print condition
if model.global_step.eval() % 1 == 0:
print(" global step %d learning rate %.4f step-time %.2f loss %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
(time.time() - start_time), step_loss, get_perplexity(step_loss)))
# per epoch
if model.global_step.eval() % steps_per_epoch == 0:
epoch_step += 1
print("epoch %d" % epoch_step)
# Print statistics for the previous epoch.
std_out = "epoch: global step %d learning rate %.4f step-time %.2f loss %.2f perplexity %.2f" % (
model.global_step.eval(), model.learning_rate.eval(), epoch_step_time, epoch_loss, get_perplexity(epoch_loss))
print(std_out)
# Decrease learning rate if no improvement was seen over last 3 epoch times.
if len(previous_losses) > 2 and epoch_loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(epoch_loss)
# with tf.variable_scope("decoding") as scope:
# decode()
epoch_step_time, epoch_loss = 0.0, 0.0
# escape if all epoch is done
if FLAGS.epoch != 0 and epoch_step >= FLAGS.epoch:
# Save checkpoint.
save_checkpoint(model, sess, vocab_size)
with open(FLAGS.train_dir + "/result.ids" + str(FLAGS.vocab_size) + ".ds" + str(FLAGS.max_train_data_size), "a") as output_file:
output_file.write(
datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + ": " + std_out + " // Tag: " + FLAGS.out_tag + "\n" +
"\ttest_out: " + test_out + "\n"
)
break
# per checkpoint jobs
if model.global_step.eval() % steps_per_checkpoint == 0:
# Print statistics for the previous checkpoint.
std_out = "checkpoint: global step %d learning rate %.4f step-time %.2f loss %.2f perplexity %.2f" % (
model.global_step.eval(), model.learning_rate.eval(), ckpt_step_time, ckpt_loss, get_perplexity(ckpt_loss))
print(std_out)
# Save checkpoint and zero timer and loss.
save_checkpoint(model, sess, vocab_size)
ckpt_step_time, ckpt_loss = 0.0, 0.0
def decode_with_session_and_model(sess, model):
# Load vocabularies.
vocab_path = os.path.join(FLAGS.data_dir, FLAGS.data_path.split(".")[0] + ".vocab%d.%s" % (FLAGS.vocab_size, pickle.__name__))
with gfile.GFile(vocab_path, mode="r") as vocab_file:
id_to_vocab, vocab_to_id, vocab_freq = pickle.load(vocab_file)
source = code_loader._START_LINE + '\n' + 'print(a+b)'
# source = "b = 2\n" + code_loader._END_LINE
data = [{
"source": source
}]
data = source_filter.filter_danger(data)
source_filter.remove_redundent_newlines_and_set_line_length(data)
data = source_filter.set_token(data)
source_data = code_loader.data_to_tokens_list(data)
model.batch_size = 1 if not FLAGS.decode_whole else len(source_data[0]) * 2 + 1 # We decode one sentence at a time.
id_data = []
for src in source_data:
id_source = [[code_loader.START_LINE_ID]]
for line in src:
id_line = [vocab_to_id.get(word[1], code_loader.UNK_ID) for word in line]
id_source.append(id_line)
id_source.append([code_loader.END_LINE_ID])
id_data.append(id_source)
bucket_id = -1
data_set = [[] for _ in _buckets]
for code in id_data:
if FLAGS.decode_whole:
for line_idx in xrange(len(code) - 2):
source_ids = [code[line_idx], code[line_idx + 2]]
target_ids = [code[line_idx + 1]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
for line_idx in xrange(len(code) - 1):
source_ids = [code[line_idx], code[line_idx + 1]]
target_ids = | ids = [[]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
if bucket_id == -1:
logging.warning("Sentence truncated: %s", source)
return
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights = model.get_batch(data_set, bucket_id, random_set=False)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs_front, encoder_inputs_back, decoder_inputs, target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits] # logit 이 원래 벡턴데 5 row 행렬이 되서 그런 것 바꿔주면 됨.
# If there is an EOS symbol in outputs, cut them at that point.
if code_loader.EOS_ID in outputs:
outputs = outputs[:outputs.index(code_loader.EOS_ID)]
# Print out French sentence corresponding to outputs.
vocab_output = " ".join([tf.compat.as_str(id_to_vocab[output]) for output in outputs])
print("\toutput: " + vocab_output)
return vocab_output
def decode(sess=None, model=None):
vocab_size = FLAGS.vocab_size
with open(FLAGS.train_dir + "/" + os.path.basename(__file__) + ".ckpt.vb", mode="r") as vocab_size_file:
vocab_size = pickle.load(vocab_size_file)
if not sess:
sess = tf.Session()
if not model:
# Create model and load parameters.
print("Creating %d layers of %d units with %d vocab. for decode." % (FLAGS.num_layers, FLAGS.size, vocab_size))
model = create_model(sess, True, vocab_size=vocab_size)
decode_with_session_and_model(sess, model)
sess.close()
def main(_):
if FLAGS.decode:
decode()
else:
train()
if __name__ == "__main__":
tf.app.run()
| [[]]
target_ids[0].append(code_loader.EOS_ID)
for idx, (source_size, target_size) in enumerate(_buckets):
if len(source_ids[0]) < source_size and len(source_ids[1]) < source_size:
data_set[idx].append([source_ids, target_ids])
bucket_id = idx
break
else:
source_ids = [code[1], code[2]]
target_ | conditional_block |
trajectory_multiple.py | import math
import numpy as np
import cv2 as cv
# 1. Read TEST image.
img = cv.imread("image.jpg", cv.IMREAD_COLOR)
# 2. Initialize known information.
# - Table (corner coordinates).
# - Cue ball (location, radius).
# - Cue (2 points on the cue, including the coordinates of the tip).
table = [(163, 117), (1710, 120), (1836, 888), (63, 939)]
cue_ball = (590, 490)
cue_ball_radius = 30
cue_start = (203, 193) # Random point within cue
cue_tip = (533, 400)
balls = [
(180, 160),
(860, 373),
(963, 593),
(893, 693),
(1213, 783),
(1103, 643),
(1256, 556),
(1176, 243),
(1523, 206),
(1516, 253),
(1586, 503),
(1673, 716)
]
# 3. Get the warp matrix of the table angle and warp the image.
dst_shape = (table[1][0], table[2][1]-table[1][1])
src_pt = np.float32(table[:3])
dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape])
warp_matrix = cv.getAffineTransform(src_pt, dst_pt)
img = cv.warpAffine(img, warp_matrix, dst_shape)
# src_pt = np.float32(table)
# dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape, (0, dst_shape[1])])
# warp_matrix = cv.getPerspectiveTransform(src_pt, dst_pt)
# img = cv.warpPerspective(img, warp_matrix, dst_shape)
# 4. Correct known coordinates with the warp matrix.
def transform_point(pt, matrix):
is_list = type(pt) is list
if not is_list:
pt = [[pt]]
else:
pt = [[p] for p in pt]
array = np.array(pt)
transformed = cv.transform(array, matrix)
squeezed = np.squeeze(np.squeeze(transformed))
if not is_list:
return tuple(squeezed[:2])
return [tuple(x[:2]) for x in squeezed]
# table = transform_point(table, warp_matrix)
table = [(0, 0), (1710, 0), (1710, 768), (0, 768)]
cue_ball = transform_point(cue_ball, warp_matrix)
cue_start = transform_point(cue_start, warp_matrix)
cue_tip = transform_point(cue_tip, warp_matrix)
balls = transform_point(balls, warp_matrix)
pockets = table.copy()
x_center = int(img.shape[1] / 2)
pockets.append((x_center, 0))
pockets.append((x_center, img.shape[0]))
def update():
global img, table, cue_tip, cue_start, cue_ball, cue_ball_radius
hit_balls = []
shown_image = img.copy()
def points_to_angle(point1, point2):
return math.atan2(point2[1] - point1[1], point2[0] - point1[0])
# 5. Draw the trajectory
# 5.1 Get the cue angle based on 2 known points.
# - To get the angle in Radian measure use the atan2(y1-y2, x1-x2) function.
cue_angle = points_to_angle(cue_start, cue_tip)
# 5.2 Check if the cue angle overlaps with the location of the cue ball.
# - Check if the distance of the cue ball to the line is not more than the radius of the cue ball.
# - Distance is using: abs(ax - by + c) / sqrt(aa + bb)
# - a = y2-y1
# - b = x2-x1
# - c = x2y1 + y2x1
# - x, y = point of cue ball center.
# - Extra check if the cue ball is not behind the cue tip.
# http://www.pygame.org/wiki/IntersectingLineDetection
def line_intersect(line1, line2):
def gradient(points):
if points[0][0] == points[1][0]:
return None
return (points[0][1] - points[1][1]) / (points[0][0] - points[1][0])
def y_intersect(p, m):
return p[1] - (m * p[0])
m1, m2 = gradient(line1), gradient(line2)
if m1 == m2:
return None
elif m1 is not None and m2 is not None:
b1 = y_intersect(line1[0], m1)
b2 = y_intersect(line2[0], m2)
x = (b2 - b1) / (m1 - m2)
pt = x, (m1 * x) + b1
elif m1 is None:
b2 = y_intersect(line2[0], m2)
pt = line2[0][0], (m2 * line1[0][0]) + b2
else:
b1 = y_intersect(line1[0], m1)
pt = line2[0][0], (m1 * line2[0][0]) + b1
return tuple(int(x) for x in pt)
def line_circle_collision(pt1, pt2, center, circle_radius):
global img
# Point opposite of circle
if (min(pt2[0], img.shape[1]) - pt1[0]) < 0 == (max(pt2[0], 0) - center[0] < 0) or (pt2[1] - pt1[1]) < 0 == (
pt2[1] - center[1]) < 0:
return False
a = (pt2[1] - pt1[1])
b = (pt2[0] - pt1[0])
c = (pt2[0] * pt1[1]) - (pt2[1] * pt1[0])
x, y = center
dist = abs(a * x - b * y + c) / math.sqrt(a * a + b * b)
if circle_radius >= dist:
return True
else:
return False
# https://stackoverflow.com/questions/29384494/the-intersection-between-a-trajectory-and-the-circles-in-the-same-area
def line_circle_intersection(pt1, pt2, center, circle_radius):
x1, y1 = [int(x) for x in pt1]
x2, y2 = [int(x) for x in pt2]
xc, yc = [int(x) for x in center]
r = circle_radius
dx = x1 - x2
dy = y1 - y2
rx = xc - x1
ry = yc - y1
a = dx * dx + dy * dy
b = dx * rx + dy * ry
c = rx * rx + ry * ry - r * r
# Now solve a*t^2 + 2*b*t + c = 0
d = b * b - a * c
if d < 0.:
# no real intersection
return
s = math.sqrt(d)
t1 = (- b - s) / a
t2 = (- b + s) / a
points = []
if 0. <= t1 <= 1.:
points.append(tuple([round((1 - t1) * x1 + t1 * x2), round((1 - t1) * y1 + t1 * y2)]))
if 0. <= t2 <= 1.:
points.append(tuple([round((1 - t2) * x1 + t2 * x2), round((1 - t2) * y1 + t2 * y2)]))
return points
def invert_angle(angle):
return (angle + math.pi) % (2 * math.pi)
if line_circle_collision(cue_start, cue_tip, cue_ball, cue_ball_radius):
# 5.3 Get the angle of the cue ball trajectory.
trj_angle = cue_angle
start_point = cue_ball
collisions = 1
while collisions <= 5:
collisions += 1
# 5.4 Use the angle, center and radius of the cue ball to calculate at which point the line starts.
# - The point is: x = (x1 + r + cos(radians)), y = (y1 + r + sin(radians))
end_point = (int(start_point[0] + 2000 * np.cos(trj_angle)), int(start_point[1] + 2000 * np.sin(trj_angle)))
# 5.5 Draw the trajectory.
# - When the edge of the image is released then continue on a new angle or stop after 5 collision.
line = np.array([start_point, end_point])
# Filter out balls that are possible to hit
selected_balls = []
for i in range(0, len(balls)):
if i not in hit_balls:
selected_balls.append(balls[i])
# Sort the balls based on distance
def | (pt1, pt2):
return math.sqrt(math.pow(pt2[0]-pt1[0], 2)+math.pow(pt2[1]-pt1[1], 2))
def point_by_angle(pt, angle, distance):
x = pt[0] + (distance * math.cos(angle))
y = pt[1] + (distance * math.sin(angle))
return tuple([round(x), round(y)])
selected_balls.sort(key=lambda ball: point_distance(start_point, ball))
ball_hit = False
for ball in selected_balls:
if ball in hit_balls:
continue
if line_circle_collision(start_point, end_point, ball, cue_ball_radius*2):
points = line_circle_intersection(start_point, end_point, ball, cue_ball_radius*2)
if len(points) <= 0 or start_point == points[0]:
continue
end_point = points[0]
cv.circle(shown_image, end_point, cue_ball_radius, (0, 255,255), thickness=2)
ball_hit = True
trj_angle = invert_angle(points_to_angle(ball, end_point))
cv.line(shown_image, end_point, point_by_angle(end_point, trj_angle, img.shape[1]*2), (255, 100, 255), thickness=3)
if cue_angle > points_to_angle(start_point, ball):
trj_angle += math.pi / 2
else:
trj_angle -= math.pi / 2
hit_balls.append(ball)
break
if ball_hit:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
start_point = end_point
continue
# Added check so trajectory stops at pocket
in_pocket = False
for pocket in pockets:
if line_circle_collision(start_point, end_point, pocket, 40): # approximate pocket size in this example
points = line_circle_intersection(start_point, end_point, pocket, 40)
if len(points) <= 0:
continue
in_pocket = True
end_point = points[0]
break
if in_pocket:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
break
sides = [0, 0]
if trj_angle < 0:
sides[0] = 0
else:
sides[0] = 2
if abs(trj_angle) < (math.pi / 2):
sides[1] = 1
else:
sides[1] = 3
found = False
for i in sides:
boundary = np.array([table[i], table[0 if i + 1 > 3 else i + 1]], dtype=float)
point = line_intersect(line.astype(np.float), boundary)
if point is None:
continue
if 0 <= point[0] <= img.shape[1] and 0 <= point[1] <= img.shape[0]:
cv.circle(shown_image, point, 10, (0, 0, 255), thickness=3)
cv.line(shown_image, start_point, point, (100, 100, 255), thickness=3)
start_point = point
if i == 1 or i == 3:
if trj_angle > 0:
trj_angle = math.pi - trj_angle
else:
trj_angle = -(trj_angle + math.pi)
else:
trj_angle = -trj_angle
if trj_angle > math.pi:
trj_angle = math.pi - trj_angle
elif trj_angle < -math.pi:
trj_angle = math.pi + trj_angle
hit_x = int(start_point[0] + 2000 * np.cos(trj_angle))
hit_y = int(start_point[1] + 2000 * np.sin(trj_angle))
end_point = (hit_x, hit_y)
found = True
break
if not found:
break
# DEBUG OPTIONS:
# - Draw circles where the points are on the cue
cv.line(shown_image, cue_tip, cue_start, (255, 255, 0), thickness=6)
cv.circle(shown_image, cue_tip, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_start, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_ball, 6, (0, 255, 0), thickness=-1)
for pocket in pockets:
cv.circle(shown_image, pocket, 40, (0, 255, 0), thickness=2)
# - Let the cue be determined by mouse positions.
# - Clicking outputs the coordinates of the mouse.
def mouse_event(event, x, y, flags, param):
global cue_start, cue_tip
if event == cv.EVENT_LBUTTONDOWN:
cue_start = (x, y)
if event == cv.EVENT_RBUTTONDOWN:
cue_tip = (x, y)
update()
cv.namedWindow("img", cv.WINDOW_NORMAL)
cv.setMouseCallback("img", mouse_event)
cv.imshow("img", shown_image)
if(cv.waitKey(0) == 27):
exit(200)
update()
| point_distance | identifier_name |
trajectory_multiple.py | import math
import numpy as np
import cv2 as cv
# 1. Read TEST image.
img = cv.imread("image.jpg", cv.IMREAD_COLOR)
# 2. Initialize known information.
# - Table (corner coordinates).
# - Cue ball (location, radius).
# - Cue (2 points on the cue, including the coordinates of the tip).
table = [(163, 117), (1710, 120), (1836, 888), (63, 939)]
cue_ball = (590, 490)
cue_ball_radius = 30
cue_start = (203, 193) # Random point within cue
cue_tip = (533, 400)
balls = [
(180, 160),
(860, 373),
(963, 593),
(893, 693),
(1213, 783),
(1103, 643),
(1256, 556),
(1176, 243),
(1523, 206),
(1516, 253),
(1586, 503),
(1673, 716)
]
# 3. Get the warp matrix of the table angle and warp the image.
dst_shape = (table[1][0], table[2][1]-table[1][1])
src_pt = np.float32(table[:3])
dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape])
warp_matrix = cv.getAffineTransform(src_pt, dst_pt)
img = cv.warpAffine(img, warp_matrix, dst_shape)
# src_pt = np.float32(table)
# dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape, (0, dst_shape[1])])
# warp_matrix = cv.getPerspectiveTransform(src_pt, dst_pt)
# img = cv.warpPerspective(img, warp_matrix, dst_shape)
# 4. Correct known coordinates with the warp matrix.
def transform_point(pt, matrix):
is_list = type(pt) is list
if not is_list:
pt = [[pt]]
else:
pt = [[p] for p in pt]
array = np.array(pt)
transformed = cv.transform(array, matrix)
squeezed = np.squeeze(np.squeeze(transformed))
if not is_list:
|
return [tuple(x[:2]) for x in squeezed]
# table = transform_point(table, warp_matrix)
table = [(0, 0), (1710, 0), (1710, 768), (0, 768)]
cue_ball = transform_point(cue_ball, warp_matrix)
cue_start = transform_point(cue_start, warp_matrix)
cue_tip = transform_point(cue_tip, warp_matrix)
balls = transform_point(balls, warp_matrix)
pockets = table.copy()
x_center = int(img.shape[1] / 2)
pockets.append((x_center, 0))
pockets.append((x_center, img.shape[0]))
def update():
global img, table, cue_tip, cue_start, cue_ball, cue_ball_radius
hit_balls = []
shown_image = img.copy()
def points_to_angle(point1, point2):
return math.atan2(point2[1] - point1[1], point2[0] - point1[0])
# 5. Draw the trajectory
# 5.1 Get the cue angle based on 2 known points.
# - To get the angle in Radian measure use the atan2(y1-y2, x1-x2) function.
cue_angle = points_to_angle(cue_start, cue_tip)
# 5.2 Check if the cue angle overlaps with the location of the cue ball.
# - Check if the distance of the cue ball to the line is not more than the radius of the cue ball.
# - Distance is using: abs(ax - by + c) / sqrt(aa + bb)
# - a = y2-y1
# - b = x2-x1
# - c = x2y1 + y2x1
# - x, y = point of cue ball center.
# - Extra check if the cue ball is not behind the cue tip.
# http://www.pygame.org/wiki/IntersectingLineDetection
def line_intersect(line1, line2):
def gradient(points):
if points[0][0] == points[1][0]:
return None
return (points[0][1] - points[1][1]) / (points[0][0] - points[1][0])
def y_intersect(p, m):
return p[1] - (m * p[0])
m1, m2 = gradient(line1), gradient(line2)
if m1 == m2:
return None
elif m1 is not None and m2 is not None:
b1 = y_intersect(line1[0], m1)
b2 = y_intersect(line2[0], m2)
x = (b2 - b1) / (m1 - m2)
pt = x, (m1 * x) + b1
elif m1 is None:
b2 = y_intersect(line2[0], m2)
pt = line2[0][0], (m2 * line1[0][0]) + b2
else:
b1 = y_intersect(line1[0], m1)
pt = line2[0][0], (m1 * line2[0][0]) + b1
return tuple(int(x) for x in pt)
def line_circle_collision(pt1, pt2, center, circle_radius):
global img
# Point opposite of circle
if (min(pt2[0], img.shape[1]) - pt1[0]) < 0 == (max(pt2[0], 0) - center[0] < 0) or (pt2[1] - pt1[1]) < 0 == (
pt2[1] - center[1]) < 0:
return False
a = (pt2[1] - pt1[1])
b = (pt2[0] - pt1[0])
c = (pt2[0] * pt1[1]) - (pt2[1] * pt1[0])
x, y = center
dist = abs(a * x - b * y + c) / math.sqrt(a * a + b * b)
if circle_radius >= dist:
return True
else:
return False
# https://stackoverflow.com/questions/29384494/the-intersection-between-a-trajectory-and-the-circles-in-the-same-area
def line_circle_intersection(pt1, pt2, center, circle_radius):
x1, y1 = [int(x) for x in pt1]
x2, y2 = [int(x) for x in pt2]
xc, yc = [int(x) for x in center]
r = circle_radius
dx = x1 - x2
dy = y1 - y2
rx = xc - x1
ry = yc - y1
a = dx * dx + dy * dy
b = dx * rx + dy * ry
c = rx * rx + ry * ry - r * r
# Now solve a*t^2 + 2*b*t + c = 0
d = b * b - a * c
if d < 0.:
# no real intersection
return
s = math.sqrt(d)
t1 = (- b - s) / a
t2 = (- b + s) / a
points = []
if 0. <= t1 <= 1.:
points.append(tuple([round((1 - t1) * x1 + t1 * x2), round((1 - t1) * y1 + t1 * y2)]))
if 0. <= t2 <= 1.:
points.append(tuple([round((1 - t2) * x1 + t2 * x2), round((1 - t2) * y1 + t2 * y2)]))
return points
def invert_angle(angle):
return (angle + math.pi) % (2 * math.pi)
if line_circle_collision(cue_start, cue_tip, cue_ball, cue_ball_radius):
# 5.3 Get the angle of the cue ball trajectory.
trj_angle = cue_angle
start_point = cue_ball
collisions = 1
while collisions <= 5:
collisions += 1
# 5.4 Use the angle, center and radius of the cue ball to calculate at which point the line starts.
# - The point is: x = (x1 + r + cos(radians)), y = (y1 + r + sin(radians))
end_point = (int(start_point[0] + 2000 * np.cos(trj_angle)), int(start_point[1] + 2000 * np.sin(trj_angle)))
# 5.5 Draw the trajectory.
# - When the edge of the image is released then continue on a new angle or stop after 5 collision.
line = np.array([start_point, end_point])
# Filter out balls that are possible to hit
selected_balls = []
for i in range(0, len(balls)):
if i not in hit_balls:
selected_balls.append(balls[i])
# Sort the balls based on distance
def point_distance(pt1, pt2):
return math.sqrt(math.pow(pt2[0]-pt1[0], 2)+math.pow(pt2[1]-pt1[1], 2))
def point_by_angle(pt, angle, distance):
x = pt[0] + (distance * math.cos(angle))
y = pt[1] + (distance * math.sin(angle))
return tuple([round(x), round(y)])
selected_balls.sort(key=lambda ball: point_distance(start_point, ball))
ball_hit = False
for ball in selected_balls:
if ball in hit_balls:
continue
if line_circle_collision(start_point, end_point, ball, cue_ball_radius*2):
points = line_circle_intersection(start_point, end_point, ball, cue_ball_radius*2)
if len(points) <= 0 or start_point == points[0]:
continue
end_point = points[0]
cv.circle(shown_image, end_point, cue_ball_radius, (0, 255,255), thickness=2)
ball_hit = True
trj_angle = invert_angle(points_to_angle(ball, end_point))
cv.line(shown_image, end_point, point_by_angle(end_point, trj_angle, img.shape[1]*2), (255, 100, 255), thickness=3)
if cue_angle > points_to_angle(start_point, ball):
trj_angle += math.pi / 2
else:
trj_angle -= math.pi / 2
hit_balls.append(ball)
break
if ball_hit:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
start_point = end_point
continue
# Added check so trajectory stops at pocket
in_pocket = False
for pocket in pockets:
if line_circle_collision(start_point, end_point, pocket, 40): # approximate pocket size in this example
points = line_circle_intersection(start_point, end_point, pocket, 40)
if len(points) <= 0:
continue
in_pocket = True
end_point = points[0]
break
if in_pocket:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
break
sides = [0, 0]
if trj_angle < 0:
sides[0] = 0
else:
sides[0] = 2
if abs(trj_angle) < (math.pi / 2):
sides[1] = 1
else:
sides[1] = 3
found = False
for i in sides:
boundary = np.array([table[i], table[0 if i + 1 > 3 else i + 1]], dtype=float)
point = line_intersect(line.astype(np.float), boundary)
if point is None:
continue
if 0 <= point[0] <= img.shape[1] and 0 <= point[1] <= img.shape[0]:
cv.circle(shown_image, point, 10, (0, 0, 255), thickness=3)
cv.line(shown_image, start_point, point, (100, 100, 255), thickness=3)
start_point = point
if i == 1 or i == 3:
if trj_angle > 0:
trj_angle = math.pi - trj_angle
else:
trj_angle = -(trj_angle + math.pi)
else:
trj_angle = -trj_angle
if trj_angle > math.pi:
trj_angle = math.pi - trj_angle
elif trj_angle < -math.pi:
trj_angle = math.pi + trj_angle
hit_x = int(start_point[0] + 2000 * np.cos(trj_angle))
hit_y = int(start_point[1] + 2000 * np.sin(trj_angle))
end_point = (hit_x, hit_y)
found = True
break
if not found:
break
# DEBUG OPTIONS:
# - Draw circles where the points are on the cue
cv.line(shown_image, cue_tip, cue_start, (255, 255, 0), thickness=6)
cv.circle(shown_image, cue_tip, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_start, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_ball, 6, (0, 255, 0), thickness=-1)
for pocket in pockets:
cv.circle(shown_image, pocket, 40, (0, 255, 0), thickness=2)
# - Let the cue be determined by mouse positions.
# - Clicking outputs the coordinates of the mouse.
def mouse_event(event, x, y, flags, param):
global cue_start, cue_tip
if event == cv.EVENT_LBUTTONDOWN:
cue_start = (x, y)
if event == cv.EVENT_RBUTTONDOWN:
cue_tip = (x, y)
update()
cv.namedWindow("img", cv.WINDOW_NORMAL)
cv.setMouseCallback("img", mouse_event)
cv.imshow("img", shown_image)
if(cv.waitKey(0) == 27):
exit(200)
update()
| return tuple(squeezed[:2]) | conditional_block |
trajectory_multiple.py | import math
import numpy as np
import cv2 as cv
# 1. Read TEST image.
img = cv.imread("image.jpg", cv.IMREAD_COLOR)
# 2. Initialize known information.
# - Table (corner coordinates).
# - Cue ball (location, radius).
# - Cue (2 points on the cue, including the coordinates of the tip).
table = [(163, 117), (1710, 120), (1836, 888), (63, 939)]
cue_ball = (590, 490)
cue_ball_radius = 30
cue_start = (203, 193) # Random point within cue
cue_tip = (533, 400)
balls = [
(180, 160),
(860, 373),
(963, 593),
(893, 693),
(1213, 783),
(1103, 643),
(1256, 556),
(1176, 243),
(1523, 206),
(1516, 253),
(1586, 503),
(1673, 716)
]
# 3. Get the warp matrix of the table angle and warp the image.
dst_shape = (table[1][0], table[2][1]-table[1][1])
src_pt = np.float32(table[:3])
dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape])
warp_matrix = cv.getAffineTransform(src_pt, dst_pt)
img = cv.warpAffine(img, warp_matrix, dst_shape)
# src_pt = np.float32(table)
# dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape, (0, dst_shape[1])])
# warp_matrix = cv.getPerspectiveTransform(src_pt, dst_pt)
# img = cv.warpPerspective(img, warp_matrix, dst_shape)
# 4. Correct known coordinates with the warp matrix.
def transform_point(pt, matrix):
is_list = type(pt) is list
if not is_list:
pt = [[pt]]
else:
pt = [[p] for p in pt]
array = np.array(pt)
transformed = cv.transform(array, matrix)
squeezed = np.squeeze(np.squeeze(transformed))
if not is_list:
return tuple(squeezed[:2])
return [tuple(x[:2]) for x in squeezed]
# table = transform_point(table, warp_matrix)
table = [(0, 0), (1710, 0), (1710, 768), (0, 768)]
cue_ball = transform_point(cue_ball, warp_matrix)
cue_start = transform_point(cue_start, warp_matrix)
cue_tip = transform_point(cue_tip, warp_matrix)
balls = transform_point(balls, warp_matrix)
pockets = table.copy()
x_center = int(img.shape[1] / 2)
pockets.append((x_center, 0))
pockets.append((x_center, img.shape[0]))
| def update():
global img, table, cue_tip, cue_start, cue_ball, cue_ball_radius
hit_balls = []
shown_image = img.copy()
def points_to_angle(point1, point2):
return math.atan2(point2[1] - point1[1], point2[0] - point1[0])
# 5. Draw the trajectory
# 5.1 Get the cue angle based on 2 known points.
# - To get the angle in Radian measure use the atan2(y1-y2, x1-x2) function.
cue_angle = points_to_angle(cue_start, cue_tip)
# 5.2 Check if the cue angle overlaps with the location of the cue ball.
# - Check if the distance of the cue ball to the line is not more than the radius of the cue ball.
# - Distance is using: abs(ax - by + c) / sqrt(aa + bb)
# - a = y2-y1
# - b = x2-x1
# - c = x2y1 + y2x1
# - x, y = point of cue ball center.
# - Extra check if the cue ball is not behind the cue tip.
# http://www.pygame.org/wiki/IntersectingLineDetection
def line_intersect(line1, line2):
def gradient(points):
if points[0][0] == points[1][0]:
return None
return (points[0][1] - points[1][1]) / (points[0][0] - points[1][0])
def y_intersect(p, m):
return p[1] - (m * p[0])
m1, m2 = gradient(line1), gradient(line2)
if m1 == m2:
return None
elif m1 is not None and m2 is not None:
b1 = y_intersect(line1[0], m1)
b2 = y_intersect(line2[0], m2)
x = (b2 - b1) / (m1 - m2)
pt = x, (m1 * x) + b1
elif m1 is None:
b2 = y_intersect(line2[0], m2)
pt = line2[0][0], (m2 * line1[0][0]) + b2
else:
b1 = y_intersect(line1[0], m1)
pt = line2[0][0], (m1 * line2[0][0]) + b1
return tuple(int(x) for x in pt)
def line_circle_collision(pt1, pt2, center, circle_radius):
global img
# Point opposite of circle
if (min(pt2[0], img.shape[1]) - pt1[0]) < 0 == (max(pt2[0], 0) - center[0] < 0) or (pt2[1] - pt1[1]) < 0 == (
pt2[1] - center[1]) < 0:
return False
a = (pt2[1] - pt1[1])
b = (pt2[0] - pt1[0])
c = (pt2[0] * pt1[1]) - (pt2[1] * pt1[0])
x, y = center
dist = abs(a * x - b * y + c) / math.sqrt(a * a + b * b)
if circle_radius >= dist:
return True
else:
return False
# https://stackoverflow.com/questions/29384494/the-intersection-between-a-trajectory-and-the-circles-in-the-same-area
def line_circle_intersection(pt1, pt2, center, circle_radius):
x1, y1 = [int(x) for x in pt1]
x2, y2 = [int(x) for x in pt2]
xc, yc = [int(x) for x in center]
r = circle_radius
dx = x1 - x2
dy = y1 - y2
rx = xc - x1
ry = yc - y1
a = dx * dx + dy * dy
b = dx * rx + dy * ry
c = rx * rx + ry * ry - r * r
# Now solve a*t^2 + 2*b*t + c = 0
d = b * b - a * c
if d < 0.:
# no real intersection
return
s = math.sqrt(d)
t1 = (- b - s) / a
t2 = (- b + s) / a
points = []
if 0. <= t1 <= 1.:
points.append(tuple([round((1 - t1) * x1 + t1 * x2), round((1 - t1) * y1 + t1 * y2)]))
if 0. <= t2 <= 1.:
points.append(tuple([round((1 - t2) * x1 + t2 * x2), round((1 - t2) * y1 + t2 * y2)]))
return points
def invert_angle(angle):
return (angle + math.pi) % (2 * math.pi)
if line_circle_collision(cue_start, cue_tip, cue_ball, cue_ball_radius):
# 5.3 Get the angle of the cue ball trajectory.
trj_angle = cue_angle
start_point = cue_ball
collisions = 1
while collisions <= 5:
collisions += 1
# 5.4 Use the angle, center and radius of the cue ball to calculate at which point the line starts.
# - The point is: x = (x1 + r + cos(radians)), y = (y1 + r + sin(radians))
end_point = (int(start_point[0] + 2000 * np.cos(trj_angle)), int(start_point[1] + 2000 * np.sin(trj_angle)))
# 5.5 Draw the trajectory.
# - When the edge of the image is released then continue on a new angle or stop after 5 collision.
line = np.array([start_point, end_point])
# Filter out balls that are possible to hit
selected_balls = []
for i in range(0, len(balls)):
if i not in hit_balls:
selected_balls.append(balls[i])
# Sort the balls based on distance
def point_distance(pt1, pt2):
return math.sqrt(math.pow(pt2[0]-pt1[0], 2)+math.pow(pt2[1]-pt1[1], 2))
def point_by_angle(pt, angle, distance):
x = pt[0] + (distance * math.cos(angle))
y = pt[1] + (distance * math.sin(angle))
return tuple([round(x), round(y)])
selected_balls.sort(key=lambda ball: point_distance(start_point, ball))
ball_hit = False
for ball in selected_balls:
if ball in hit_balls:
continue
if line_circle_collision(start_point, end_point, ball, cue_ball_radius*2):
points = line_circle_intersection(start_point, end_point, ball, cue_ball_radius*2)
if len(points) <= 0 or start_point == points[0]:
continue
end_point = points[0]
cv.circle(shown_image, end_point, cue_ball_radius, (0, 255,255), thickness=2)
ball_hit = True
trj_angle = invert_angle(points_to_angle(ball, end_point))
cv.line(shown_image, end_point, point_by_angle(end_point, trj_angle, img.shape[1]*2), (255, 100, 255), thickness=3)
if cue_angle > points_to_angle(start_point, ball):
trj_angle += math.pi / 2
else:
trj_angle -= math.pi / 2
hit_balls.append(ball)
break
if ball_hit:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
start_point = end_point
continue
# Added check so trajectory stops at pocket
in_pocket = False
for pocket in pockets:
if line_circle_collision(start_point, end_point, pocket, 40): # approximate pocket size in this example
points = line_circle_intersection(start_point, end_point, pocket, 40)
if len(points) <= 0:
continue
in_pocket = True
end_point = points[0]
break
if in_pocket:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
break
sides = [0, 0]
if trj_angle < 0:
sides[0] = 0
else:
sides[0] = 2
if abs(trj_angle) < (math.pi / 2):
sides[1] = 1
else:
sides[1] = 3
found = False
for i in sides:
boundary = np.array([table[i], table[0 if i + 1 > 3 else i + 1]], dtype=float)
point = line_intersect(line.astype(np.float), boundary)
if point is None:
continue
if 0 <= point[0] <= img.shape[1] and 0 <= point[1] <= img.shape[0]:
cv.circle(shown_image, point, 10, (0, 0, 255), thickness=3)
cv.line(shown_image, start_point, point, (100, 100, 255), thickness=3)
start_point = point
if i == 1 or i == 3:
if trj_angle > 0:
trj_angle = math.pi - trj_angle
else:
trj_angle = -(trj_angle + math.pi)
else:
trj_angle = -trj_angle
if trj_angle > math.pi:
trj_angle = math.pi - trj_angle
elif trj_angle < -math.pi:
trj_angle = math.pi + trj_angle
hit_x = int(start_point[0] + 2000 * np.cos(trj_angle))
hit_y = int(start_point[1] + 2000 * np.sin(trj_angle))
end_point = (hit_x, hit_y)
found = True
break
if not found:
break
# DEBUG OPTIONS:
# - Draw circles where the points are on the cue
cv.line(shown_image, cue_tip, cue_start, (255, 255, 0), thickness=6)
cv.circle(shown_image, cue_tip, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_start, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_ball, 6, (0, 255, 0), thickness=-1)
for pocket in pockets:
cv.circle(shown_image, pocket, 40, (0, 255, 0), thickness=2)
# - Let the cue be determined by mouse positions.
# - Clicking outputs the coordinates of the mouse.
def mouse_event(event, x, y, flags, param):
global cue_start, cue_tip
if event == cv.EVENT_LBUTTONDOWN:
cue_start = (x, y)
if event == cv.EVENT_RBUTTONDOWN:
cue_tip = (x, y)
update()
cv.namedWindow("img", cv.WINDOW_NORMAL)
cv.setMouseCallback("img", mouse_event)
cv.imshow("img", shown_image)
if(cv.waitKey(0) == 27):
exit(200)
update() | random_line_split | |
trajectory_multiple.py | import math
import numpy as np
import cv2 as cv
# 1. Read TEST image.
img = cv.imread("image.jpg", cv.IMREAD_COLOR)
# 2. Initialize known information.
# - Table (corner coordinates).
# - Cue ball (location, radius).
# - Cue (2 points on the cue, including the coordinates of the tip).
table = [(163, 117), (1710, 120), (1836, 888), (63, 939)]
cue_ball = (590, 490)
cue_ball_radius = 30
cue_start = (203, 193) # Random point within cue
cue_tip = (533, 400)
balls = [
(180, 160),
(860, 373),
(963, 593),
(893, 693),
(1213, 783),
(1103, 643),
(1256, 556),
(1176, 243),
(1523, 206),
(1516, 253),
(1586, 503),
(1673, 716)
]
# 3. Get the warp matrix of the table angle and warp the image.
dst_shape = (table[1][0], table[2][1]-table[1][1])
src_pt = np.float32(table[:3])
dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape])
warp_matrix = cv.getAffineTransform(src_pt, dst_pt)
img = cv.warpAffine(img, warp_matrix, dst_shape)
# src_pt = np.float32(table)
# dst_pt = np.float32([(0,0), (dst_shape[0],0), dst_shape, (0, dst_shape[1])])
# warp_matrix = cv.getPerspectiveTransform(src_pt, dst_pt)
# img = cv.warpPerspective(img, warp_matrix, dst_shape)
# 4. Correct known coordinates with the warp matrix.
def transform_point(pt, matrix):
is_list = type(pt) is list
if not is_list:
pt = [[pt]]
else:
pt = [[p] for p in pt]
array = np.array(pt)
transformed = cv.transform(array, matrix)
squeezed = np.squeeze(np.squeeze(transformed))
if not is_list:
return tuple(squeezed[:2])
return [tuple(x[:2]) for x in squeezed]
# table = transform_point(table, warp_matrix)
table = [(0, 0), (1710, 0), (1710, 768), (0, 768)]
cue_ball = transform_point(cue_ball, warp_matrix)
cue_start = transform_point(cue_start, warp_matrix)
cue_tip = transform_point(cue_tip, warp_matrix)
balls = transform_point(balls, warp_matrix)
pockets = table.copy()
x_center = int(img.shape[1] / 2)
pockets.append((x_center, 0))
pockets.append((x_center, img.shape[0]))
def update():
global img, table, cue_tip, cue_start, cue_ball, cue_ball_radius
hit_balls = []
shown_image = img.copy()
def points_to_angle(point1, point2):
return math.atan2(point2[1] - point1[1], point2[0] - point1[0])
# 5. Draw the trajectory
# 5.1 Get the cue angle based on 2 known points.
# - To get the angle in Radian measure use the atan2(y1-y2, x1-x2) function.
cue_angle = points_to_angle(cue_start, cue_tip)
# 5.2 Check if the cue angle overlaps with the location of the cue ball.
# - Check if the distance of the cue ball to the line is not more than the radius of the cue ball.
# - Distance is using: abs(ax - by + c) / sqrt(aa + bb)
# - a = y2-y1
# - b = x2-x1
# - c = x2y1 + y2x1
# - x, y = point of cue ball center.
# - Extra check if the cue ball is not behind the cue tip.
# http://www.pygame.org/wiki/IntersectingLineDetection
def line_intersect(line1, line2):
def gradient(points):
if points[0][0] == points[1][0]:
return None
return (points[0][1] - points[1][1]) / (points[0][0] - points[1][0])
def y_intersect(p, m):
return p[1] - (m * p[0])
m1, m2 = gradient(line1), gradient(line2)
if m1 == m2:
return None
elif m1 is not None and m2 is not None:
b1 = y_intersect(line1[0], m1)
b2 = y_intersect(line2[0], m2)
x = (b2 - b1) / (m1 - m2)
pt = x, (m1 * x) + b1
elif m1 is None:
b2 = y_intersect(line2[0], m2)
pt = line2[0][0], (m2 * line1[0][0]) + b2
else:
b1 = y_intersect(line1[0], m1)
pt = line2[0][0], (m1 * line2[0][0]) + b1
return tuple(int(x) for x in pt)
def line_circle_collision(pt1, pt2, center, circle_radius):
global img
# Point opposite of circle
if (min(pt2[0], img.shape[1]) - pt1[0]) < 0 == (max(pt2[0], 0) - center[0] < 0) or (pt2[1] - pt1[1]) < 0 == (
pt2[1] - center[1]) < 0:
return False
a = (pt2[1] - pt1[1])
b = (pt2[0] - pt1[0])
c = (pt2[0] * pt1[1]) - (pt2[1] * pt1[0])
x, y = center
dist = abs(a * x - b * y + c) / math.sqrt(a * a + b * b)
if circle_radius >= dist:
return True
else:
return False
# https://stackoverflow.com/questions/29384494/the-intersection-between-a-trajectory-and-the-circles-in-the-same-area
def line_circle_intersection(pt1, pt2, center, circle_radius):
x1, y1 = [int(x) for x in pt1]
x2, y2 = [int(x) for x in pt2]
xc, yc = [int(x) for x in center]
r = circle_radius
dx = x1 - x2
dy = y1 - y2
rx = xc - x1
ry = yc - y1
a = dx * dx + dy * dy
b = dx * rx + dy * ry
c = rx * rx + ry * ry - r * r
# Now solve a*t^2 + 2*b*t + c = 0
d = b * b - a * c
if d < 0.:
# no real intersection
return
s = math.sqrt(d)
t1 = (- b - s) / a
t2 = (- b + s) / a
points = []
if 0. <= t1 <= 1.:
points.append(tuple([round((1 - t1) * x1 + t1 * x2), round((1 - t1) * y1 + t1 * y2)]))
if 0. <= t2 <= 1.:
points.append(tuple([round((1 - t2) * x1 + t2 * x2), round((1 - t2) * y1 + t2 * y2)]))
return points
def invert_angle(angle):
|
if line_circle_collision(cue_start, cue_tip, cue_ball, cue_ball_radius):
# 5.3 Get the angle of the cue ball trajectory.
trj_angle = cue_angle
start_point = cue_ball
collisions = 1
while collisions <= 5:
collisions += 1
# 5.4 Use the angle, center and radius of the cue ball to calculate at which point the line starts.
# - The point is: x = (x1 + r + cos(radians)), y = (y1 + r + sin(radians))
end_point = (int(start_point[0] + 2000 * np.cos(trj_angle)), int(start_point[1] + 2000 * np.sin(trj_angle)))
# 5.5 Draw the trajectory.
# - When the edge of the image is released then continue on a new angle or stop after 5 collision.
line = np.array([start_point, end_point])
# Filter out balls that are possible to hit
selected_balls = []
for i in range(0, len(balls)):
if i not in hit_balls:
selected_balls.append(balls[i])
# Sort the balls based on distance
def point_distance(pt1, pt2):
return math.sqrt(math.pow(pt2[0]-pt1[0], 2)+math.pow(pt2[1]-pt1[1], 2))
def point_by_angle(pt, angle, distance):
x = pt[0] + (distance * math.cos(angle))
y = pt[1] + (distance * math.sin(angle))
return tuple([round(x), round(y)])
selected_balls.sort(key=lambda ball: point_distance(start_point, ball))
ball_hit = False
for ball in selected_balls:
if ball in hit_balls:
continue
if line_circle_collision(start_point, end_point, ball, cue_ball_radius*2):
points = line_circle_intersection(start_point, end_point, ball, cue_ball_radius*2)
if len(points) <= 0 or start_point == points[0]:
continue
end_point = points[0]
cv.circle(shown_image, end_point, cue_ball_radius, (0, 255,255), thickness=2)
ball_hit = True
trj_angle = invert_angle(points_to_angle(ball, end_point))
cv.line(shown_image, end_point, point_by_angle(end_point, trj_angle, img.shape[1]*2), (255, 100, 255), thickness=3)
if cue_angle > points_to_angle(start_point, ball):
trj_angle += math.pi / 2
else:
trj_angle -= math.pi / 2
hit_balls.append(ball)
break
if ball_hit:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
start_point = end_point
continue
# Added check so trajectory stops at pocket
in_pocket = False
for pocket in pockets:
if line_circle_collision(start_point, end_point, pocket, 40): # approximate pocket size in this example
points = line_circle_intersection(start_point, end_point, pocket, 40)
if len(points) <= 0:
continue
in_pocket = True
end_point = points[0]
break
if in_pocket:
cv.line(shown_image, start_point, end_point, (100, 100, 255), thickness=3)
break
sides = [0, 0]
if trj_angle < 0:
sides[0] = 0
else:
sides[0] = 2
if abs(trj_angle) < (math.pi / 2):
sides[1] = 1
else:
sides[1] = 3
found = False
for i in sides:
boundary = np.array([table[i], table[0 if i + 1 > 3 else i + 1]], dtype=float)
point = line_intersect(line.astype(np.float), boundary)
if point is None:
continue
if 0 <= point[0] <= img.shape[1] and 0 <= point[1] <= img.shape[0]:
cv.circle(shown_image, point, 10, (0, 0, 255), thickness=3)
cv.line(shown_image, start_point, point, (100, 100, 255), thickness=3)
start_point = point
if i == 1 or i == 3:
if trj_angle > 0:
trj_angle = math.pi - trj_angle
else:
trj_angle = -(trj_angle + math.pi)
else:
trj_angle = -trj_angle
if trj_angle > math.pi:
trj_angle = math.pi - trj_angle
elif trj_angle < -math.pi:
trj_angle = math.pi + trj_angle
hit_x = int(start_point[0] + 2000 * np.cos(trj_angle))
hit_y = int(start_point[1] + 2000 * np.sin(trj_angle))
end_point = (hit_x, hit_y)
found = True
break
if not found:
break
# DEBUG OPTIONS:
# - Draw circles where the points are on the cue
cv.line(shown_image, cue_tip, cue_start, (255, 255, 0), thickness=6)
cv.circle(shown_image, cue_tip, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_start, 6, (0, 0, 0), thickness=-1)
cv.circle(shown_image, cue_ball, 6, (0, 255, 0), thickness=-1)
for pocket in pockets:
cv.circle(shown_image, pocket, 40, (0, 255, 0), thickness=2)
# - Let the cue be determined by mouse positions.
# - Clicking outputs the coordinates of the mouse.
def mouse_event(event, x, y, flags, param):
global cue_start, cue_tip
if event == cv.EVENT_LBUTTONDOWN:
cue_start = (x, y)
if event == cv.EVENT_RBUTTONDOWN:
cue_tip = (x, y)
update()
cv.namedWindow("img", cv.WINDOW_NORMAL)
cv.setMouseCallback("img", mouse_event)
cv.imshow("img", shown_image)
if(cv.waitKey(0) == 27):
exit(200)
update()
| return (angle + math.pi) % (2 * math.pi) | identifier_body |
lib.rs | //! Monie-in-the-middle http(s) proxy library
//!
//! Observe and manipulate requests by implementing `monie::Mitm`.
//!
//! Here is a skeleton to help get started:
//!
//! ```
//! use futures::future::Future;
//! use hyper::{Body, Chunk, Request, Response, Server};
//! use monie::{Mitm, MitmProxyService};
//!
//! struct MyMitm;
//!
//! impl Mitm for MyMitm {
//! fn new(_: http::uri::Uri) -> MyMitm { MyMitm {} }
//! fn request_headers(&self, req: Request<Body>) -> Request<Body> { req }
//! fn response_headers(&self, res: Response<Body>) -> Response<Body> { res }
//! fn request_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! fn response_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! }
//!
//! fn main() {
//! let addr = ([127, 0, 0, 1], 8000).into();
//! let svc = MitmProxyService::<MyMitm>::new();
//! let server = Server::bind(&addr)
//! .serve(svc)
//! .map_err(|e| eprintln!("server error: {}", e));
//! println!("noop mitm proxy listening on http://{}", addr);
//! hyper::rt::run(server);
//! }
//! ```
//!
//! Other examples can be found at
//! <https://github.com/nlevitt/monie/tree/master/examples>.
#![deny(warnings)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
pub mod certauth;
use std::error::Error;
use std::sync::Arc;
use bytes::Bytes;
use futures::future::{self, Future, FutureResult};
use futures::stream::Stream;
use http::method::Method;
use http::uri::{Authority, Uri};
use hyper::client::pool::Pooled;
use hyper::client::{HttpConnector, PoolClient};
use hyper::server::conn::Http;
use hyper::service::{service_fn, NewService, Service};
use hyper::upgrade::Upgraded;
use hyper::{Body, Chunk, Client, Request, Response};
use hyper_rustls::HttpsConnector;
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
/// Represents the interception of a single request. Users of the library must
/// implement this trait. With it you can observe and manipulate the request
/// and response payload and headers.
pub trait Mitm {
/// Create a new instance of this `Mitm` implementation. The argument `uri`
/// is the uri being proxied. Implementations may do with this what they
/// wish (log it, stash it, ignore it, etc).
fn new(uri: Uri) -> Self;
/// Observe and manipulate the request headers. The `req` argument contains
/// the original request headers received from the proxy client. The
/// request headers returned by this function are sent to the remote
/// server.
fn request_headers(&self, req: Request<Body>) -> Request<Body>;
/// Observe and manipulate a chunk of the request payload. This function
/// may be called zero or more times, depending on the size of the request
/// payload. It will not be called at all in the common case of a GET
/// request with no payload. The `chunk` argument contains an original
/// chunk of the request payload as received from the proxy client. The
/// return value of this function is sent to the remote server.
fn request_body_chunk(&self, chunk: Chunk) -> Chunk;
/// Observe and manipulate the response headers. The `res` argument
/// contains the original response headers received from the remote server.
/// The response headers returned by this function are sent to the proxy
/// client.
fn response_headers(&self, res: Response<Body>) -> Response<Body>;
/// Observe and manipulate a chunk of the response payload. This function
/// may be called zero or more times, depending on the size of the payload.
/// The `chunk` argument represents an unaltered chunk of the response
/// payload as received from the remote server. The return value of this
/// function is sent to the remote server.
fn response_body_chunk(&self, chunk: Chunk) -> Chunk;
}
lazy_static! {
static ref CLIENT: Client<HttpsConnector<HttpConnector>, Body> =
Client::builder().build(HttpsConnector::new(4));
static ref HTTP: Http = Http::new();
}
/// The `hyper::service::Service` that does the proxying and calls your `Mitm`
/// implementation.
#[derive(Debug)]
pub struct MitmProxyService<T: Mitm + Sync> {
_phantom: std::marker::PhantomData<T>,
}
impl<T: Mitm + Sync> MitmProxyService<T> {
/// Creates a new `MitmProxyService`.
#[inline]
pub fn new() -> Self |
}
impl<T: Mitm + Sync + Send + 'static> NewService for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Service = MitmProxyService<T>;
type InitError = std::io::Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
future::ok(MitmProxyService::new())
}
}
impl<T: Mitm + Sync + Send + 'static> Service for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Future =
Box<dyn Future<Item = Response<Body>, Error = std::io::Error> + Send>;
fn call(&mut self, req: Request<Body>) -> Self::Future {
info!("MitmProxyService::call() handling {:?}", req);
if *req.method() == Method::CONNECT {
Box::new(proxy_connect::<T>(req))
} else {
Box::new(proxy_request::<T>(req))
}
}
}
/// Obtains a connection to the scheme://authority of `uri` from the connection
/// pool.
fn obtain_connection(
uri: Uri,
) -> impl Future<Item = Pooled<PoolClient<Body>>, Error = std::io::Error> {
let key1 = Arc::new(format!(
"{}://{}",
uri.scheme_part().unwrap(),
uri.authority_part().unwrap()
));
let key2 = Arc::clone(&key1);
let result = CLIENT.connection_for(uri, key1).map_err(move |e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("error obtaining connection to {}: {:?}", key2, e),
)
});
result
}
/// Obtains a connection to the remote server and proxies the request, calling
/// the `Mitm` implementation functions, which may manipulate the request and
/// reponse. Returns a future that resolves to the response or error.
///
/// This function is called for plain http requests, and for https requests
/// received "inside" the fake, tapped `CONNECT` tunnel.
fn proxy_request<T: Mitm + Sync + Send + 'static>(
req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
obtain_connection(req.uri().to_owned())
.map(|mut connection| {
let mitm1 = Arc::new(T::new(req.uri().to_owned()));
let mitm2 = Arc::clone(&mitm1);
let req = mitm1.request_headers(req);
let (parts, body) = req.into_parts();
let body = Body::wrap_stream(
body.map(move |chunk| mitm1.request_body_chunk(chunk)),
);
let req = Request::from_parts(parts, body);
info!("proxy_request() sending request {:?}", req);
connection
.send_request_retryable(req)
.map(|response| {
let response = mitm2.response_headers(response);
let (parts, body) = response.into_parts();
let body =
Body::wrap_stream(body.map(move |chunk| {
mitm2.response_body_chunk(chunk)
}));
Response::from_parts(parts, body)
})
.map_err(|(e, _f)| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})
})
.flatten()
.or_else(|e| {
info!("proxy_request() returning 502 ({})", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Handles a CONNECT request. Tries to obtain an https connection to the
/// remote server. If that fails, returns 502 Bad Gateway. Otherwise returns
/// 200 OK, then attempts to establish a TLS connection with the proxy client,
/// masquerading as the remote server.
fn proxy_connect<T: Mitm + Sync + Send + 'static>(
connect_req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
info!("proxy_connect() impersonating {:?}", connect_req.uri());
let authority =
Authority::from_shared(Bytes::from(connect_req.uri().to_string()))
.unwrap();
let tls_cfg = certauth::tls_config(&authority);
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query("/")
.build()
.unwrap();
obtain_connection(uri)
.map(move |_pooled| {
let inner = connect_req.into_body().on_upgrade().map_err(|e| {
info!("proxy_connect() on_upgrade error: {:?}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
.and_then(|upgraded: Upgraded| -> Accept<Upgraded> {
TlsAcceptor::from(tls_cfg).accept(upgraded)
})
.map(move |stream: TlsStream<Upgraded, rustls::ServerSession>| {
info!("proxy_connect() tls connection established with proxy \
client: {:?}", stream);
service_inner_requests::<T>(stream)
})
.map_err(|e: std::io::Error| {
error!("proxy_connect() error from somewhere: {}", e);
})
.flatten();
hyper::rt::spawn(inner);
Response::builder().status(200).body(Body::empty()).unwrap()
})
.or_else(|e| {
info!("proxy_connect() returning 502, failed to connect: {:?}", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Called by `proxy_connect()` once the TLS session has been established with
/// the proxy client. Proxies requests received on the TLS connection.
fn service_inner_requests<T: Mitm + Sync + Send + 'static>(
stream: TlsStream<Upgraded, rustls::ServerSession>,
) -> impl Future<Item = (), Error = ()> {
let svc = service_fn(move |req: Request<Body>| {
// "host" header is required for http 1.1
// XXX but we could fall back on authority
let authority = req.headers().get("host").unwrap().to_str().unwrap();
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query(&req.uri().to_string() as &str)
.build()
.unwrap();
let (mut parts, body) = req.into_parts();
parts.uri = uri;
let req = Request::from_parts(parts, body);
proxy_request::<T>(req)
});
HTTP.serve_connection(stream, svc)
.map_err(|e: hyper::Error| {
if match e.source() {
Some(source) => source
.to_string()
.find("Connection reset by peer")
.is_some(),
None => false,
} {
info!(
"service_inner_requests() serve_connection: \
client closed connection"
);
} else {
error!("service_inner_requests() serve_connection: {}", e);
};
})
}
| {
MitmProxyService::<T> {
_phantom: std::marker::PhantomData,
}
} | identifier_body |
lib.rs | //! Monie-in-the-middle http(s) proxy library
//!
//! Observe and manipulate requests by implementing `monie::Mitm`.
//!
//! Here is a skeleton to help get started:
//!
//! ```
//! use futures::future::Future;
//! use hyper::{Body, Chunk, Request, Response, Server};
//! use monie::{Mitm, MitmProxyService};
//!
//! struct MyMitm;
//!
//! impl Mitm for MyMitm {
//! fn new(_: http::uri::Uri) -> MyMitm { MyMitm {} }
//! fn request_headers(&self, req: Request<Body>) -> Request<Body> { req }
//! fn response_headers(&self, res: Response<Body>) -> Response<Body> { res }
//! fn request_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! fn response_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! }
//!
//! fn main() {
//! let addr = ([127, 0, 0, 1], 8000).into();
//! let svc = MitmProxyService::<MyMitm>::new();
//! let server = Server::bind(&addr)
//! .serve(svc)
//! .map_err(|e| eprintln!("server error: {}", e));
//! println!("noop mitm proxy listening on http://{}", addr);
//! hyper::rt::run(server);
//! }
//! ```
//!
//! Other examples can be found at
//! <https://github.com/nlevitt/monie/tree/master/examples>.
#![deny(warnings)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
pub mod certauth;
use std::error::Error;
use std::sync::Arc;
use bytes::Bytes;
use futures::future::{self, Future, FutureResult};
use futures::stream::Stream;
use http::method::Method;
use http::uri::{Authority, Uri};
use hyper::client::pool::Pooled;
use hyper::client::{HttpConnector, PoolClient};
use hyper::server::conn::Http;
use hyper::service::{service_fn, NewService, Service};
use hyper::upgrade::Upgraded;
use hyper::{Body, Chunk, Client, Request, Response};
use hyper_rustls::HttpsConnector;
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
/// Represents the interception of a single request. Users of the library must
/// implement this trait. With it you can observe and manipulate the request
/// and response payload and headers.
pub trait Mitm {
/// Create a new instance of this `Mitm` implementation. The argument `uri`
/// is the uri being proxied. Implementations may do with this what they
/// wish (log it, stash it, ignore it, etc).
fn new(uri: Uri) -> Self;
/// Observe and manipulate the request headers. The `req` argument contains
/// the original request headers received from the proxy client. The
/// request headers returned by this function are sent to the remote
/// server.
fn request_headers(&self, req: Request<Body>) -> Request<Body>;
/// Observe and manipulate a chunk of the request payload. This function
/// may be called zero or more times, depending on the size of the request
/// payload. It will not be called at all in the common case of a GET
/// request with no payload. The `chunk` argument contains an original
/// chunk of the request payload as received from the proxy client. The
/// return value of this function is sent to the remote server.
fn request_body_chunk(&self, chunk: Chunk) -> Chunk;
/// Observe and manipulate the response headers. The `res` argument
/// contains the original response headers received from the remote server.
/// The response headers returned by this function are sent to the proxy
/// client.
fn response_headers(&self, res: Response<Body>) -> Response<Body>;
/// Observe and manipulate a chunk of the response payload. This function
/// may be called zero or more times, depending on the size of the payload.
/// The `chunk` argument represents an unaltered chunk of the response
/// payload as received from the remote server. The return value of this
/// function is sent to the remote server.
fn response_body_chunk(&self, chunk: Chunk) -> Chunk;
}
lazy_static! {
static ref CLIENT: Client<HttpsConnector<HttpConnector>, Body> =
Client::builder().build(HttpsConnector::new(4));
static ref HTTP: Http = Http::new();
}
/// The `hyper::service::Service` that does the proxying and calls your `Mitm`
/// implementation.
#[derive(Debug)]
pub struct MitmProxyService<T: Mitm + Sync> {
_phantom: std::marker::PhantomData<T>,
}
impl<T: Mitm + Sync> MitmProxyService<T> {
/// Creates a new `MitmProxyService`.
#[inline]
pub fn new() -> Self {
MitmProxyService::<T> {
_phantom: std::marker::PhantomData,
}
}
}
impl<T: Mitm + Sync + Send + 'static> NewService for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Service = MitmProxyService<T>;
type InitError = std::io::Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
future::ok(MitmProxyService::new())
}
}
impl<T: Mitm + Sync + Send + 'static> Service for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Future =
Box<dyn Future<Item = Response<Body>, Error = std::io::Error> + Send>;
fn call(&mut self, req: Request<Body>) -> Self::Future {
info!("MitmProxyService::call() handling {:?}", req);
if *req.method() == Method::CONNECT {
Box::new(proxy_connect::<T>(req))
} else {
Box::new(proxy_request::<T>(req))
}
}
}
/// Obtains a connection to the scheme://authority of `uri` from the connection
/// pool.
fn obtain_connection(
uri: Uri,
) -> impl Future<Item = Pooled<PoolClient<Body>>, Error = std::io::Error> {
let key1 = Arc::new(format!(
"{}://{}",
uri.scheme_part().unwrap(),
uri.authority_part().unwrap()
));
let key2 = Arc::clone(&key1);
let result = CLIENT.connection_for(uri, key1).map_err(move |e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("error obtaining connection to {}: {:?}", key2, e),
)
});
result
}
/// Obtains a connection to the remote server and proxies the request, calling
/// the `Mitm` implementation functions, which may manipulate the request and
/// reponse. Returns a future that resolves to the response or error.
///
/// This function is called for plain http requests, and for https requests
/// received "inside" the fake, tapped `CONNECT` tunnel.
fn proxy_request<T: Mitm + Sync + Send + 'static>(
req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
obtain_connection(req.uri().to_owned())
.map(|mut connection| {
let mitm1 = Arc::new(T::new(req.uri().to_owned()));
let mitm2 = Arc::clone(&mitm1);
let req = mitm1.request_headers(req);
let (parts, body) = req.into_parts();
let body = Body::wrap_stream(
body.map(move |chunk| mitm1.request_body_chunk(chunk)),
);
let req = Request::from_parts(parts, body);
info!("proxy_request() sending request {:?}", req);
connection
.send_request_retryable(req)
.map(|response| {
let response = mitm2.response_headers(response);
let (parts, body) = response.into_parts();
let body =
Body::wrap_stream(body.map(move |chunk| {
mitm2.response_body_chunk(chunk)
}));
Response::from_parts(parts, body)
})
.map_err(|(e, _f)| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})
})
.flatten()
.or_else(|e| {
info!("proxy_request() returning 502 ({})", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Handles a CONNECT request. Tries to obtain an https connection to the
/// remote server. If that fails, returns 502 Bad Gateway. Otherwise returns
/// 200 OK, then attempts to establish a TLS connection with the proxy client,
/// masquerading as the remote server.
fn proxy_connect<T: Mitm + Sync + Send + 'static>(
connect_req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
info!("proxy_connect() impersonating {:?}", connect_req.uri());
let authority =
Authority::from_shared(Bytes::from(connect_req.uri().to_string()))
.unwrap();
let tls_cfg = certauth::tls_config(&authority);
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query("/")
.build()
.unwrap();
obtain_connection(uri)
.map(move |_pooled| {
let inner = connect_req.into_body().on_upgrade().map_err(|e| {
info!("proxy_connect() on_upgrade error: {:?}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
.and_then(|upgraded: Upgraded| -> Accept<Upgraded> {
TlsAcceptor::from(tls_cfg).accept(upgraded)
})
.map(move |stream: TlsStream<Upgraded, rustls::ServerSession>| {
info!("proxy_connect() tls connection established with proxy \
client: {:?}", stream);
service_inner_requests::<T>(stream)
})
.map_err(|e: std::io::Error| {
error!("proxy_connect() error from somewhere: {}", e);
})
.flatten();
hyper::rt::spawn(inner);
Response::builder().status(200).body(Body::empty()).unwrap()
})
.or_else(|e| {
info!("proxy_connect() returning 502, failed to connect: {:?}", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Called by `proxy_connect()` once the TLS session has been established with
/// the proxy client. Proxies requests received on the TLS connection.
fn service_inner_requests<T: Mitm + Sync + Send + 'static>(
stream: TlsStream<Upgraded, rustls::ServerSession>,
) -> impl Future<Item = (), Error = ()> {
let svc = service_fn(move |req: Request<Body>| {
// "host" header is required for http 1.1
// XXX but we could fall back on authority
let authority = req.headers().get("host").unwrap().to_str().unwrap();
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query(&req.uri().to_string() as &str)
.build()
.unwrap();
let (mut parts, body) = req.into_parts();
parts.uri = uri;
let req = Request::from_parts(parts, body);
proxy_request::<T>(req)
});
HTTP.serve_connection(stream, svc)
.map_err(|e: hyper::Error| {
if match e.source() {
Some(source) => source
.to_string()
.find("Connection reset by peer")
.is_some(),
None => false,
} | else {
error!("service_inner_requests() serve_connection: {}", e);
};
})
}
| {
info!(
"service_inner_requests() serve_connection: \
client closed connection"
);
} | conditional_block |
lib.rs | //! Monie-in-the-middle http(s) proxy library
//!
//! Observe and manipulate requests by implementing `monie::Mitm`.
//!
//! Here is a skeleton to help get started:
//!
//! ```
//! use futures::future::Future;
//! use hyper::{Body, Chunk, Request, Response, Server};
//! use monie::{Mitm, MitmProxyService};
//!
//! struct MyMitm;
//!
//! impl Mitm for MyMitm {
//! fn new(_: http::uri::Uri) -> MyMitm { MyMitm {} }
//! fn request_headers(&self, req: Request<Body>) -> Request<Body> { req }
//! fn response_headers(&self, res: Response<Body>) -> Response<Body> { res }
//! fn request_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! fn response_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! }
//!
//! fn main() {
//! let addr = ([127, 0, 0, 1], 8000).into();
//! let svc = MitmProxyService::<MyMitm>::new();
//! let server = Server::bind(&addr)
//! .serve(svc)
//! .map_err(|e| eprintln!("server error: {}", e));
//! println!("noop mitm proxy listening on http://{}", addr);
//! hyper::rt::run(server);
//! }
//! ```
//!
//! Other examples can be found at
//! <https://github.com/nlevitt/monie/tree/master/examples>.
#![deny(warnings)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
pub mod certauth;
use std::error::Error;
use std::sync::Arc;
use bytes::Bytes;
use futures::future::{self, Future, FutureResult};
use futures::stream::Stream;
use http::method::Method;
use http::uri::{Authority, Uri};
use hyper::client::pool::Pooled;
use hyper::client::{HttpConnector, PoolClient};
use hyper::server::conn::Http;
use hyper::service::{service_fn, NewService, Service};
use hyper::upgrade::Upgraded;
use hyper::{Body, Chunk, Client, Request, Response};
use hyper_rustls::HttpsConnector;
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
/// Represents the interception of a single request. Users of the library must
/// implement this trait. With it you can observe and manipulate the request
/// and response payload and headers.
pub trait Mitm {
/// Create a new instance of this `Mitm` implementation. The argument `uri`
/// is the uri being proxied. Implementations may do with this what they
/// wish (log it, stash it, ignore it, etc).
fn new(uri: Uri) -> Self;
/// Observe and manipulate the request headers. The `req` argument contains
/// the original request headers received from the proxy client. The
/// request headers returned by this function are sent to the remote
/// server.
fn request_headers(&self, req: Request<Body>) -> Request<Body>;
/// Observe and manipulate a chunk of the request payload. This function
/// may be called zero or more times, depending on the size of the request
/// payload. It will not be called at all in the common case of a GET
/// request with no payload. The `chunk` argument contains an original
/// chunk of the request payload as received from the proxy client. The
/// return value of this function is sent to the remote server.
fn request_body_chunk(&self, chunk: Chunk) -> Chunk;
/// Observe and manipulate the response headers. The `res` argument
/// contains the original response headers received from the remote server.
/// The response headers returned by this function are sent to the proxy
/// client.
fn response_headers(&self, res: Response<Body>) -> Response<Body>;
/// Observe and manipulate a chunk of the response payload. This function
/// may be called zero or more times, depending on the size of the payload.
/// The `chunk` argument represents an unaltered chunk of the response
/// payload as received from the remote server. The return value of this
/// function is sent to the remote server.
fn response_body_chunk(&self, chunk: Chunk) -> Chunk;
}
lazy_static! {
static ref CLIENT: Client<HttpsConnector<HttpConnector>, Body> =
Client::builder().build(HttpsConnector::new(4));
static ref HTTP: Http = Http::new();
}
/// The `hyper::service::Service` that does the proxying and calls your `Mitm`
/// implementation.
#[derive(Debug)]
pub struct MitmProxyService<T: Mitm + Sync> {
_phantom: std::marker::PhantomData<T>,
}
impl<T: Mitm + Sync> MitmProxyService<T> {
/// Creates a new `MitmProxyService`.
#[inline]
pub fn new() -> Self {
MitmProxyService::<T> {
_phantom: std::marker::PhantomData,
}
}
}
impl<T: Mitm + Sync + Send + 'static> NewService for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Service = MitmProxyService<T>;
type InitError = std::io::Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
future::ok(MitmProxyService::new())
}
}
impl<T: Mitm + Sync + Send + 'static> Service for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Future =
Box<dyn Future<Item = Response<Body>, Error = std::io::Error> + Send>;
fn call(&mut self, req: Request<Body>) -> Self::Future {
info!("MitmProxyService::call() handling {:?}", req);
if *req.method() == Method::CONNECT {
Box::new(proxy_connect::<T>(req))
} else {
Box::new(proxy_request::<T>(req))
}
}
}
/// Obtains a connection to the scheme://authority of `uri` from the connection
/// pool.
fn obtain_connection(
uri: Uri,
) -> impl Future<Item = Pooled<PoolClient<Body>>, Error = std::io::Error> {
let key1 = Arc::new(format!(
"{}://{}",
uri.scheme_part().unwrap(),
uri.authority_part().unwrap()
));
let key2 = Arc::clone(&key1);
let result = CLIENT.connection_for(uri, key1).map_err(move |e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("error obtaining connection to {}: {:?}", key2, e),
)
});
result
}
/// Obtains a connection to the remote server and proxies the request, calling
/// the `Mitm` implementation functions, which may manipulate the request and
/// reponse. Returns a future that resolves to the response or error.
///
/// This function is called for plain http requests, and for https requests
/// received "inside" the fake, tapped `CONNECT` tunnel.
fn proxy_request<T: Mitm + Sync + Send + 'static>(
req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
obtain_connection(req.uri().to_owned())
.map(|mut connection| {
let mitm1 = Arc::new(T::new(req.uri().to_owned()));
let mitm2 = Arc::clone(&mitm1);
let req = mitm1.request_headers(req);
let (parts, body) = req.into_parts();
let body = Body::wrap_stream(
body.map(move |chunk| mitm1.request_body_chunk(chunk)),
);
let req = Request::from_parts(parts, body);
info!("proxy_request() sending request {:?}", req);
connection
.send_request_retryable(req)
.map(|response| {
let response = mitm2.response_headers(response);
let (parts, body) = response.into_parts();
let body =
Body::wrap_stream(body.map(move |chunk| {
mitm2.response_body_chunk(chunk)
}));
Response::from_parts(parts, body)
})
.map_err(|(e, _f)| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})
})
.flatten()
.or_else(|e| {
info!("proxy_request() returning 502 ({})", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Handles a CONNECT request. Tries to obtain an https connection to the
/// remote server. If that fails, returns 502 Bad Gateway. Otherwise returns
/// 200 OK, then attempts to establish a TLS connection with the proxy client,
/// masquerading as the remote server.
fn | <T: Mitm + Sync + Send + 'static>(
connect_req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
info!("proxy_connect() impersonating {:?}", connect_req.uri());
let authority =
Authority::from_shared(Bytes::from(connect_req.uri().to_string()))
.unwrap();
let tls_cfg = certauth::tls_config(&authority);
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query("/")
.build()
.unwrap();
obtain_connection(uri)
.map(move |_pooled| {
let inner = connect_req.into_body().on_upgrade().map_err(|e| {
info!("proxy_connect() on_upgrade error: {:?}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
.and_then(|upgraded: Upgraded| -> Accept<Upgraded> {
TlsAcceptor::from(tls_cfg).accept(upgraded)
})
.map(move |stream: TlsStream<Upgraded, rustls::ServerSession>| {
info!("proxy_connect() tls connection established with proxy \
client: {:?}", stream);
service_inner_requests::<T>(stream)
})
.map_err(|e: std::io::Error| {
error!("proxy_connect() error from somewhere: {}", e);
})
.flatten();
hyper::rt::spawn(inner);
Response::builder().status(200).body(Body::empty()).unwrap()
})
.or_else(|e| {
info!("proxy_connect() returning 502, failed to connect: {:?}", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Called by `proxy_connect()` once the TLS session has been established with
/// the proxy client. Proxies requests received on the TLS connection.
fn service_inner_requests<T: Mitm + Sync + Send + 'static>(
stream: TlsStream<Upgraded, rustls::ServerSession>,
) -> impl Future<Item = (), Error = ()> {
let svc = service_fn(move |req: Request<Body>| {
// "host" header is required for http 1.1
// XXX but we could fall back on authority
let authority = req.headers().get("host").unwrap().to_str().unwrap();
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query(&req.uri().to_string() as &str)
.build()
.unwrap();
let (mut parts, body) = req.into_parts();
parts.uri = uri;
let req = Request::from_parts(parts, body);
proxy_request::<T>(req)
});
HTTP.serve_connection(stream, svc)
.map_err(|e: hyper::Error| {
if match e.source() {
Some(source) => source
.to_string()
.find("Connection reset by peer")
.is_some(),
None => false,
} {
info!(
"service_inner_requests() serve_connection: \
client closed connection"
);
} else {
error!("service_inner_requests() serve_connection: {}", e);
};
})
}
| proxy_connect | identifier_name |
lib.rs | //! Monie-in-the-middle http(s) proxy library
//!
//! Observe and manipulate requests by implementing `monie::Mitm`.
//!
//! Here is a skeleton to help get started:
//!
//! ```
//! use futures::future::Future;
//! use hyper::{Body, Chunk, Request, Response, Server};
//! use monie::{Mitm, MitmProxyService};
//!
//! struct MyMitm;
//!
//! impl Mitm for MyMitm {
//! fn new(_: http::uri::Uri) -> MyMitm { MyMitm {} }
//! fn request_headers(&self, req: Request<Body>) -> Request<Body> { req }
//! fn response_headers(&self, res: Response<Body>) -> Response<Body> { res }
//! fn request_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! fn response_body_chunk(&self, chunk: Chunk) -> Chunk { chunk }
//! }
//!
//! fn main() {
//! let addr = ([127, 0, 0, 1], 8000).into();
//! let svc = MitmProxyService::<MyMitm>::new();
//! let server = Server::bind(&addr)
//! .serve(svc)
//! .map_err(|e| eprintln!("server error: {}", e));
//! println!("noop mitm proxy listening on http://{}", addr);
//! hyper::rt::run(server);
//! }
//! ```
//!
//! Other examples can be found at
//! <https://github.com/nlevitt/monie/tree/master/examples>.
#![deny(warnings)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
pub mod certauth;
use std::error::Error;
use std::sync::Arc;
use bytes::Bytes;
use futures::future::{self, Future, FutureResult};
use futures::stream::Stream;
use http::method::Method;
use http::uri::{Authority, Uri};
use hyper::client::pool::Pooled;
use hyper::client::{HttpConnector, PoolClient};
use hyper::server::conn::Http;
use hyper::service::{service_fn, NewService, Service};
use hyper::upgrade::Upgraded;
use hyper::{Body, Chunk, Client, Request, Response};
use hyper_rustls::HttpsConnector;
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
/// Represents the interception of a single request. Users of the library must
/// implement this trait. With it you can observe and manipulate the request
/// and response payload and headers.
pub trait Mitm {
/// Create a new instance of this `Mitm` implementation. The argument `uri`
/// is the uri being proxied. Implementations may do with this what they
/// wish (log it, stash it, ignore it, etc).
fn new(uri: Uri) -> Self;
/// Observe and manipulate the request headers. The `req` argument contains
/// the original request headers received from the proxy client. The
/// request headers returned by this function are sent to the remote
/// server.
fn request_headers(&self, req: Request<Body>) -> Request<Body>;
/// Observe and manipulate a chunk of the request payload. This function
/// may be called zero or more times, depending on the size of the request
/// payload. It will not be called at all in the common case of a GET
/// request with no payload. The `chunk` argument contains an original
/// chunk of the request payload as received from the proxy client. The
/// return value of this function is sent to the remote server.
fn request_body_chunk(&self, chunk: Chunk) -> Chunk;
/// Observe and manipulate the response headers. The `res` argument
/// contains the original response headers received from the remote server.
/// The response headers returned by this function are sent to the proxy
/// client.
fn response_headers(&self, res: Response<Body>) -> Response<Body>;
/// Observe and manipulate a chunk of the response payload. This function
/// may be called zero or more times, depending on the size of the payload.
/// The `chunk` argument represents an unaltered chunk of the response
/// payload as received from the remote server. The return value of this
/// function is sent to the remote server.
fn response_body_chunk(&self, chunk: Chunk) -> Chunk;
}
lazy_static! {
static ref CLIENT: Client<HttpsConnector<HttpConnector>, Body> =
Client::builder().build(HttpsConnector::new(4));
static ref HTTP: Http = Http::new();
}
/// The `hyper::service::Service` that does the proxying and calls your `Mitm`
/// implementation.
#[derive(Debug)]
pub struct MitmProxyService<T: Mitm + Sync> {
_phantom: std::marker::PhantomData<T>,
}
impl<T: Mitm + Sync> MitmProxyService<T> {
/// Creates a new `MitmProxyService`.
#[inline]
pub fn new() -> Self {
MitmProxyService::<T> {
_phantom: std::marker::PhantomData,
}
}
}
impl<T: Mitm + Sync + Send + 'static> NewService for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Service = MitmProxyService<T>;
type InitError = std::io::Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
future::ok(MitmProxyService::new())
}
}
impl<T: Mitm + Sync + Send + 'static> Service for MitmProxyService<T> {
type ReqBody = Body;
type ResBody = Body;
type Error = std::io::Error;
type Future =
Box<dyn Future<Item = Response<Body>, Error = std::io::Error> + Send>;
fn call(&mut self, req: Request<Body>) -> Self::Future {
info!("MitmProxyService::call() handling {:?}", req);
if *req.method() == Method::CONNECT {
Box::new(proxy_connect::<T>(req))
} else {
Box::new(proxy_request::<T>(req))
}
}
}
/// Obtains a connection to the scheme://authority of `uri` from the connection
/// pool.
fn obtain_connection(
uri: Uri,
) -> impl Future<Item = Pooled<PoolClient<Body>>, Error = std::io::Error> {
let key1 = Arc::new(format!(
"{}://{}",
uri.scheme_part().unwrap(),
uri.authority_part().unwrap()
));
let key2 = Arc::clone(&key1);
let result = CLIENT.connection_for(uri, key1).map_err(move |e| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("error obtaining connection to {}: {:?}", key2, e),
)
});
result
}
/// Obtains a connection to the remote server and proxies the request, calling
/// the `Mitm` implementation functions, which may manipulate the request and
/// reponse. Returns a future that resolves to the response or error. | /// received "inside" the fake, tapped `CONNECT` tunnel.
fn proxy_request<T: Mitm + Sync + Send + 'static>(
req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
obtain_connection(req.uri().to_owned())
.map(|mut connection| {
let mitm1 = Arc::new(T::new(req.uri().to_owned()));
let mitm2 = Arc::clone(&mitm1);
let req = mitm1.request_headers(req);
let (parts, body) = req.into_parts();
let body = Body::wrap_stream(
body.map(move |chunk| mitm1.request_body_chunk(chunk)),
);
let req = Request::from_parts(parts, body);
info!("proxy_request() sending request {:?}", req);
connection
.send_request_retryable(req)
.map(|response| {
let response = mitm2.response_headers(response);
let (parts, body) = response.into_parts();
let body =
Body::wrap_stream(body.map(move |chunk| {
mitm2.response_body_chunk(chunk)
}));
Response::from_parts(parts, body)
})
.map_err(|(e, _f)| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})
})
.flatten()
.or_else(|e| {
info!("proxy_request() returning 502 ({})", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Handles a CONNECT request. Tries to obtain an https connection to the
/// remote server. If that fails, returns 502 Bad Gateway. Otherwise returns
/// 200 OK, then attempts to establish a TLS connection with the proxy client,
/// masquerading as the remote server.
fn proxy_connect<T: Mitm + Sync + Send + 'static>(
connect_req: Request<Body>,
) -> impl Future<Item = Response<Body>, Error = std::io::Error> {
info!("proxy_connect() impersonating {:?}", connect_req.uri());
let authority =
Authority::from_shared(Bytes::from(connect_req.uri().to_string()))
.unwrap();
let tls_cfg = certauth::tls_config(&authority);
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query("/")
.build()
.unwrap();
obtain_connection(uri)
.map(move |_pooled| {
let inner = connect_req.into_body().on_upgrade().map_err(|e| {
info!("proxy_connect() on_upgrade error: {:?}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
.and_then(|upgraded: Upgraded| -> Accept<Upgraded> {
TlsAcceptor::from(tls_cfg).accept(upgraded)
})
.map(move |stream: TlsStream<Upgraded, rustls::ServerSession>| {
info!("proxy_connect() tls connection established with proxy \
client: {:?}", stream);
service_inner_requests::<T>(stream)
})
.map_err(|e: std::io::Error| {
error!("proxy_connect() error from somewhere: {}", e);
})
.flatten();
hyper::rt::spawn(inner);
Response::builder().status(200).body(Body::empty()).unwrap()
})
.or_else(|e| {
info!("proxy_connect() returning 502, failed to connect: {:?}", e);
future::ok(
Response::builder().status(502).body(Body::empty()).unwrap(),
)
})
}
/// Called by `proxy_connect()` once the TLS session has been established with
/// the proxy client. Proxies requests received on the TLS connection.
fn service_inner_requests<T: Mitm + Sync + Send + 'static>(
stream: TlsStream<Upgraded, rustls::ServerSession>,
) -> impl Future<Item = (), Error = ()> {
let svc = service_fn(move |req: Request<Body>| {
// "host" header is required for http 1.1
// XXX but we could fall back on authority
let authority = req.headers().get("host").unwrap().to_str().unwrap();
let uri = http::uri::Builder::new()
.scheme("https")
.authority(authority)
.path_and_query(&req.uri().to_string() as &str)
.build()
.unwrap();
let (mut parts, body) = req.into_parts();
parts.uri = uri;
let req = Request::from_parts(parts, body);
proxy_request::<T>(req)
});
HTTP.serve_connection(stream, svc)
.map_err(|e: hyper::Error| {
if match e.source() {
Some(source) => source
.to_string()
.find("Connection reset by peer")
.is_some(),
None => false,
} {
info!(
"service_inner_requests() serve_connection: \
client closed connection"
);
} else {
error!("service_inner_requests() serve_connection: {}", e);
};
})
} | ///
/// This function is called for plain http requests, and for https requests | random_line_split |
stereo_calibration.py | import cv2
import numpy as np
from glob import glob
import math
import matplotlib.pyplot as plt
import struct
from mpl_toolkits.mplot3d import Axes3D
def StereoCalib(imageList, boardSize, squareSize, displayCorners = True, useCalibrated = True, showRectified = True):
nImages = int(len(imageList)/2)
goodImageList = []
imageSize = None
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
topImagePoints = []
sideImagePoints = []
imagePoints = []
imagePoints.append([])
imagePoints.append([])
for n in range(0, nImages):
imagePoints[0].append(None)
imagePoints[1].append(None)
pattern_points = np.zeros((np.prod(boardSize), 3), np.float32)
pattern_points[:, :2] = np.indices(boardSize).T.reshape(-1, 2)
pattern_points *= squareSize
objectPoints = []
j = 0
tempTop = None
for i in range(0, nImages):
for k in range(0,2):
filename = imageList[i*2+k]
print('processsing %s... ' % filename)
img = cv2.imread(filename, 0)
if img is None:
break
if imageSize is None:
imageSize = img.shape[:2]
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, boardSize)
if not found:
print('chessboard not found') | else:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if displayCorners is True:
# print(filename)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, boardSize, corners, found)
cv2.imshow("corners", vis)
# cv2.waitKey(50)
if k is 1:
# Image from side camera
goodImageList.append(imageList[i*2])
goodImageList.append(imageList[i*2+1])
j = j + 1
sideImagePoints.append(corners.reshape(-1,2))
topImagePoints.append(tempTop)
objectPoints.append(pattern_points)
print('Added left and right points')
else:
# Image from top camera
# rightImagePoints.append(corners.reshape(-1,2))
tempTop= corners.reshape(-1,2)
# imagePoints[k].append(corners.reshape(-1,2))
# objectPoints.append(pattern_points)
print('OK')
# print(corners)
print(str(j) + " chessboard pairs have been detected\n")
nImages = j
if nImages < 2:
print("Too few pairs to run calibration\n")
return
# print(imagePoints[1])
# print(objectPoints)
print("Img count: " + str(len(topImagePoints)))
print("Obj count: " + str(len(objectPoints)))
# print(np.array(imagePoints[0]))
top_calibration = np.load('top_calibration.npz')
side_calibration = np.load('side_calibration.npz')
top_rms = top_calibration['rms']
top_camera_matrix = top_calibration['camera_matrix']
top_dist_coefs = top_calibration['dist_coefs']
side_rms = side_calibration['rms']
side_camera_matrix = side_calibration['camera_matrix']
side_dist_coefs = side_calibration['dist_coefs']
# camera_transform = np.load('camera_transform.npz')
# R = camera_transform['R']
# T = camera_transform['T']
# print('Calibrating top...')
# top_rms, top_camera_matrix, top_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, topImagePoints, imageSize, None, None)
print("Top Camera\nRMS:" + str(top_rms))
print("camera matrix: " + str(top_camera_matrix))
print("distortion coefficients: " + str(top_dist_coefs.ravel()))
# print('Calibrating side...')
# side_rms, side_camera_matrix, side_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, sideImagePoints, imageSize, None, None)
print("Side Camera\nRMS:", side_rms)
print("camera matrix:\n", side_camera_matrix)
print("distortion coefficients: ", side_dist_coefs.ravel())
# top_undistorted = cv2.undistort(cv2.imread(imageList[0]), top_camera_matrix, top_dist_coefs)
# cv2.imshow("Top Undistorted", top_undistorted)
# side_undistorted = cv2.undistort(cv2.imread(imageList[1]), side_camera_matrix, side_dist_coefs)
# cv2.imshow("Side Undistorted", side_undistorted)
# cameraMatrix[0] = cv2.initCameraMatrix2D(np.array(objectPoints), np.array(imagePoints[0]), imageSize, 0)
# cameraMatrix[1] = cv2.initCameraMatrix2D(objectPoints, imagePoints[1], imageSize, 0)
# print(objectPoints[0])
# ret, rvec_top, tvec_top = cv2.solvePnP(objectPoints[0], topImagePoints[0], top_camera_matrix, top_dist_coefs)
# ret, rvec_side, tvec_side = cv2.solvePnP(objectPoints[0], sideImagePoints[0], side_camera_matrix, side_dist_coefs)
# print('\n')
# print('rvec top', rvec_top)
# print('tvec top', tvec_top)
# print('rvec side', rvec_side)
# print('tvec side', tvec_side)
# print(topImagePoints[0])
print('\n')
print('Stereo calibration (cv2)...')
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objectPoints, topImagePoints, sideImagePoints, top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, (cv2.CALIB_FIX_INTRINSIC))
print("Rotation: ", R)
r_vec, jac = cv2.Rodrigues(R)
print("R_vec: ", np.multiply(r_vec, 180/math.pi))
print("Translation: ", T)
print("Essential: ", E)
print("Fundamental: ", F)
print('Stereo calibration (DIY)...')
topImagePointsConcat = topImagePoints[0]
sideImagePointsConcat = sideImagePoints[0]
for i in range(1, len(topImagePoints)):
topImagePointsConcat = np.concatenate((topImagePointsConcat, topImagePoints[i]))
sideImagePointsConcat = np.concatenate((sideImagePointsConcat, sideImagePoints[i]))
m1 = np.ones((len(topImagePointsConcat), 3))
m1[:,0:2] = topImagePointsConcat
m2 = np.ones((len(sideImagePointsConcat), 3))
m2[:,0:2] = sideImagePointsConcat
x1, T1 = normalizePoints(m1)
x2, T2 = normalizePoints(m2)
# print('Normalized', x1, T)
# Normalization matrix
# N = np.array([[2.0/w,0.0,-1.0],[0.0,2.0/h,-1.0],[0.0,0.0,1.0]], np.float64)
# print('N', N)
# x1 = np.dot(N,m1.T).T
# print('x1,',x1)
# x2 = np.dot(N,m2.T).T
# print('x2',x2)
A = np.ones((len(topImagePointsConcat),9))
A[:,0] = np.multiply(x1[:,0],x2[:,0])
A[:,1] = np.multiply(x1[:,1],x2[:,0])
A[:,2] = x2[:,0]
A[:,3] = np.multiply(x1[:,0],x2[:,1])
A[:,4] = np.multiply(x1[:,1],x2[:,1])
A[:,5] = x2[:,1]
A[:,6] = x1[:,0]
A[:,7] = x1[:,1]
# A[:,0] = np.multiply(x2[:,0],x1[:,0])
# A[:,1] = np.multiply(x2[:,0],x1[:,1])
# A[:,2] = x2[:,0]
# A[:,3] = np.multiply(x2[:,1],x1[:,0])
# A[:,4] = np.multiply(x2[:,1],x1[:,1])
# A[:,5] = x2[:,1]
# A[:,6] = x1[:,0]
# A[:,7] = x1[:,1]
print(A)
U, D, V = np.linalg.svd(A)
# print('U',U)
# print('D',D)
# print('V',V)
V = V.conj().T
F_new = V[:,8].reshape(3,3).copy()
# make rank 2
U, D, V = np.linalg.svd(F_new);
# print('U',U)
# print('D',D)
# print('V',V)
D_diag = np.diag([D[0], D[1], 0])
F_new = np.dot(np.dot(U, D_diag), V)
# F_new=np.dot(N.T,np.dot(F_new,N))
F_new = np.dot(np.dot(T2.T, F_new), T1)
print(F_new)
#
R, jac = cv2.Rodrigues(np.dot(np.array([[-90],[0],[0]], dtype = np.float64), math.pi/180))
T = np.array([[0], [-130], [130]], dtype=np.float64)
print("Rotation: ", R)
# r_vec, jac = cv2.Rodrigues(R)
# print("R_vec: ", r_vec)
print("Translation: ", T)
# print("Fundamental: ", F_new)
# F = F_new
# top_undistorted = cv2.undistort(cv2.imread(imageList[0]), cameraMatrix1, distCoeffs1)
# cv2.imshow("Top Undistorted", top_undistorted)
# side_undistorted = cv2.undistort(cv2.imread(imageList[1]), cameraMatrix2, distCoeffs2)
# cv2.imshow("Side Undistorted", side_undistorted)
# R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T)
# print('R', R)
# R_vec, jac = cv2.Rodrigues(R)
# print('R vec', R_vec)
# print('T', T)
print('\n')
# print('Stereo rectification (cv2)...')
# R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, R, T, alpha=1)
# print("R1: ", R1)
# R1_vec, jac = cv2.Rodrigues(R1)
# print("R1 vec: ", R1_vec)
# print("R2: ", R2)
# R2_vec, jac = cv2.Rodrigues(R2)
# print("P1: ", P1)
# print("P2: ", P2)
# print('Q: ', Q)
print('Stereo rectification (DIY)...')
P1 = np.concatenate((np.dot(side_camera_matrix,np.eye(3)),np.dot(side_camera_matrix,np.zeros((3,1)))), axis = 1)
P2 = np.concatenate((np.dot(side_camera_matrix,R),np.dot(side_camera_matrix,T)), axis = 1)
# print("R2 vec: ", R2_vec)
print("P1: ", P1)
print("P2: ", P2)
# np.savez_compressed('calibration.npz', R1=R1, R2=R2, P1=P1, P2=P2, CameraMatrix1=cameraMatrix1, CameraMatrix2=cameraMatrix2, DistCoeffs1=distCoeffs1, DistCoeffs2=distCoeffs2,R=R,T=T,E=E,F=F)
# np.savez_compressed('calibration.npz', CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs)
np.savez_compressed('calibration.npz', P1=P1, P2=P2, CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs,R=R,T=T,E=E,F=F)
# path = np.load("path.npz")
# top_path = path["top_path"]
# side_path = path["side_path"]
# tip3D_homogeneous = cv2.triangulatePoints(P1, P2, top_path.reshape(2,-1)[:,50:75], side_path.reshape(2,-1)[:,50:75])
# tip3D = (tip3D_homogeneous/tip3D_homogeneous[3])[0:3]
# # print("homogeneous coords: " , tip3D_homogeneous)
# print("3D coords: ", tip3D)
# ax.scatter(np.array(tip3D)[0,:],np.array(tip3D)[1,:],np.array(tip3D)[2,:])
# # plt.show()
# leftInputPoints = np.array(leftImagePoints[0]).reshape(2,-1)
# rightInputPoints = np.array(rightImagePoints[0]).reshape(2,-1)
# np.savez_compressed('points.npz',left=leftInputPoints,right=rightInputPoints)
# print("Left inputs: " + str(leftInputPoints))
# points = cv2.triangulatePoints(P1, P2, leftInputPoints[:,50:100], rightInputPoints[:,50:100])
# print('\n')
# testPoint = points[:,0]
# testPoint3D = testPoint/testPoint[3]
# point3D = points/points[3,:]
# print("3D points: " + str(point3D))
def main():
size = (9, 7)
squareSize = 6 # millimeters
sourcePath = '/home/jgschornak/NeedleGuidance/images_converging_cams/'
top_img_mask = sourcePath + 'top*.jpg'
top_img_names = glob(top_img_mask)
side_img_mask = sourcePath + 'side*.jpg'
side_img_names = glob(side_img_mask)
# print(left_img_names)
# print('\n')
# print(right_img_names)
numPairs = len(top_img_names)
imgList = []
for i in range(0, numPairs):
imgList.append(sourcePath + 'top%i' % i + '.jpg')
imgList.append(sourcePath + 'side%i' % i + '.jpg')
print(imgList)
StereoCalib(imgList, size, squareSize)
# while True:
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
def normalizePoints(pts):
centroid = np.mean(pts, axis=0)
# print('Centroid', centroid)
new_pts = np.array(pts - centroid)
# print('new_pts', new_pts)
mean_dist = np.mean(np.linalg.norm(new_pts, axis=1))
# print('mean dist', mean_dist)
scale = math.sqrt(2)/mean_dist
T = np.eye(3)
T[0,0] = scale
T[1,1] = scale
T[0,2] = -scale*centroid[0]
T[1,2] = -scale*centroid[1]
print(T)
return np.dot(T, pts.T).T, T
if __name__ == '__main__':
main() | break | random_line_split |
stereo_calibration.py | import cv2
import numpy as np
from glob import glob
import math
import matplotlib.pyplot as plt
import struct
from mpl_toolkits.mplot3d import Axes3D
def StereoCalib(imageList, boardSize, squareSize, displayCorners = True, useCalibrated = True, showRectified = True):
nImages = int(len(imageList)/2)
goodImageList = []
imageSize = None
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
topImagePoints = []
sideImagePoints = []
imagePoints = []
imagePoints.append([])
imagePoints.append([])
for n in range(0, nImages):
imagePoints[0].append(None)
imagePoints[1].append(None)
pattern_points = np.zeros((np.prod(boardSize), 3), np.float32)
pattern_points[:, :2] = np.indices(boardSize).T.reshape(-1, 2)
pattern_points *= squareSize
objectPoints = []
j = 0
tempTop = None
for i in range(0, nImages):
for k in range(0,2):
filename = imageList[i*2+k]
print('processsing %s... ' % filename)
img = cv2.imread(filename, 0)
if img is None:
break
if imageSize is None:
imageSize = img.shape[:2]
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, boardSize)
if not found:
print('chessboard not found')
break
else:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if displayCorners is True:
# print(filename)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, boardSize, corners, found)
cv2.imshow("corners", vis)
# cv2.waitKey(50)
if k is 1:
# Image from side camera
goodImageList.append(imageList[i*2])
goodImageList.append(imageList[i*2+1])
j = j + 1
sideImagePoints.append(corners.reshape(-1,2))
topImagePoints.append(tempTop)
objectPoints.append(pattern_points)
print('Added left and right points')
else:
# Image from top camera
# rightImagePoints.append(corners.reshape(-1,2))
tempTop= corners.reshape(-1,2)
# imagePoints[k].append(corners.reshape(-1,2))
# objectPoints.append(pattern_points)
print('OK')
# print(corners)
print(str(j) + " chessboard pairs have been detected\n")
nImages = j
if nImages < 2:
print("Too few pairs to run calibration\n")
return
# print(imagePoints[1])
# print(objectPoints)
print("Img count: " + str(len(topImagePoints)))
print("Obj count: " + str(len(objectPoints)))
# print(np.array(imagePoints[0]))
top_calibration = np.load('top_calibration.npz')
side_calibration = np.load('side_calibration.npz')
top_rms = top_calibration['rms']
top_camera_matrix = top_calibration['camera_matrix']
top_dist_coefs = top_calibration['dist_coefs']
side_rms = side_calibration['rms']
side_camera_matrix = side_calibration['camera_matrix']
side_dist_coefs = side_calibration['dist_coefs']
# camera_transform = np.load('camera_transform.npz')
# R = camera_transform['R']
# T = camera_transform['T']
# print('Calibrating top...')
# top_rms, top_camera_matrix, top_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, topImagePoints, imageSize, None, None)
print("Top Camera\nRMS:" + str(top_rms))
print("camera matrix: " + str(top_camera_matrix))
print("distortion coefficients: " + str(top_dist_coefs.ravel()))
# print('Calibrating side...')
# side_rms, side_camera_matrix, side_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, sideImagePoints, imageSize, None, None)
print("Side Camera\nRMS:", side_rms)
print("camera matrix:\n", side_camera_matrix)
print("distortion coefficients: ", side_dist_coefs.ravel())
# top_undistorted = cv2.undistort(cv2.imread(imageList[0]), top_camera_matrix, top_dist_coefs)
# cv2.imshow("Top Undistorted", top_undistorted)
# side_undistorted = cv2.undistort(cv2.imread(imageList[1]), side_camera_matrix, side_dist_coefs)
# cv2.imshow("Side Undistorted", side_undistorted)
# cameraMatrix[0] = cv2.initCameraMatrix2D(np.array(objectPoints), np.array(imagePoints[0]), imageSize, 0)
# cameraMatrix[1] = cv2.initCameraMatrix2D(objectPoints, imagePoints[1], imageSize, 0)
# print(objectPoints[0])
# ret, rvec_top, tvec_top = cv2.solvePnP(objectPoints[0], topImagePoints[0], top_camera_matrix, top_dist_coefs)
# ret, rvec_side, tvec_side = cv2.solvePnP(objectPoints[0], sideImagePoints[0], side_camera_matrix, side_dist_coefs)
# print('\n')
# print('rvec top', rvec_top)
# print('tvec top', tvec_top)
# print('rvec side', rvec_side)
# print('tvec side', tvec_side)
# print(topImagePoints[0])
print('\n')
print('Stereo calibration (cv2)...')
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objectPoints, topImagePoints, sideImagePoints, top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, (cv2.CALIB_FIX_INTRINSIC))
print("Rotation: ", R)
r_vec, jac = cv2.Rodrigues(R)
print("R_vec: ", np.multiply(r_vec, 180/math.pi))
print("Translation: ", T)
print("Essential: ", E)
print("Fundamental: ", F)
print('Stereo calibration (DIY)...')
topImagePointsConcat = topImagePoints[0]
sideImagePointsConcat = sideImagePoints[0]
for i in range(1, len(topImagePoints)):
topImagePointsConcat = np.concatenate((topImagePointsConcat, topImagePoints[i]))
sideImagePointsConcat = np.concatenate((sideImagePointsConcat, sideImagePoints[i]))
m1 = np.ones((len(topImagePointsConcat), 3))
m1[:,0:2] = topImagePointsConcat
m2 = np.ones((len(sideImagePointsConcat), 3))
m2[:,0:2] = sideImagePointsConcat
x1, T1 = normalizePoints(m1)
x2, T2 = normalizePoints(m2)
# print('Normalized', x1, T)
# Normalization matrix
# N = np.array([[2.0/w,0.0,-1.0],[0.0,2.0/h,-1.0],[0.0,0.0,1.0]], np.float64)
# print('N', N)
# x1 = np.dot(N,m1.T).T
# print('x1,',x1)
# x2 = np.dot(N,m2.T).T
# print('x2',x2)
A = np.ones((len(topImagePointsConcat),9))
A[:,0] = np.multiply(x1[:,0],x2[:,0])
A[:,1] = np.multiply(x1[:,1],x2[:,0])
A[:,2] = x2[:,0]
A[:,3] = np.multiply(x1[:,0],x2[:,1])
A[:,4] = np.multiply(x1[:,1],x2[:,1])
A[:,5] = x2[:,1]
A[:,6] = x1[:,0]
A[:,7] = x1[:,1]
# A[:,0] = np.multiply(x2[:,0],x1[:,0])
# A[:,1] = np.multiply(x2[:,0],x1[:,1])
# A[:,2] = x2[:,0]
# A[:,3] = np.multiply(x2[:,1],x1[:,0])
# A[:,4] = np.multiply(x2[:,1],x1[:,1])
# A[:,5] = x2[:,1]
# A[:,6] = x1[:,0]
# A[:,7] = x1[:,1]
print(A)
U, D, V = np.linalg.svd(A)
# print('U',U)
# print('D',D)
# print('V',V)
V = V.conj().T
F_new = V[:,8].reshape(3,3).copy()
# make rank 2
U, D, V = np.linalg.svd(F_new);
# print('U',U)
# print('D',D)
# print('V',V)
D_diag = np.diag([D[0], D[1], 0])
F_new = np.dot(np.dot(U, D_diag), V)
# F_new=np.dot(N.T,np.dot(F_new,N))
F_new = np.dot(np.dot(T2.T, F_new), T1)
print(F_new)
#
R, jac = cv2.Rodrigues(np.dot(np.array([[-90],[0],[0]], dtype = np.float64), math.pi/180))
T = np.array([[0], [-130], [130]], dtype=np.float64)
print("Rotation: ", R)
# r_vec, jac = cv2.Rodrigues(R)
# print("R_vec: ", r_vec)
print("Translation: ", T)
# print("Fundamental: ", F_new)
# F = F_new
# top_undistorted = cv2.undistort(cv2.imread(imageList[0]), cameraMatrix1, distCoeffs1)
# cv2.imshow("Top Undistorted", top_undistorted)
# side_undistorted = cv2.undistort(cv2.imread(imageList[1]), cameraMatrix2, distCoeffs2)
# cv2.imshow("Side Undistorted", side_undistorted)
# R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T)
# print('R', R)
# R_vec, jac = cv2.Rodrigues(R)
# print('R vec', R_vec)
# print('T', T)
print('\n')
# print('Stereo rectification (cv2)...')
# R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, R, T, alpha=1)
# print("R1: ", R1)
# R1_vec, jac = cv2.Rodrigues(R1)
# print("R1 vec: ", R1_vec)
# print("R2: ", R2)
# R2_vec, jac = cv2.Rodrigues(R2)
# print("P1: ", P1)
# print("P2: ", P2)
# print('Q: ', Q)
print('Stereo rectification (DIY)...')
P1 = np.concatenate((np.dot(side_camera_matrix,np.eye(3)),np.dot(side_camera_matrix,np.zeros((3,1)))), axis = 1)
P2 = np.concatenate((np.dot(side_camera_matrix,R),np.dot(side_camera_matrix,T)), axis = 1)
# print("R2 vec: ", R2_vec)
print("P1: ", P1)
print("P2: ", P2)
# np.savez_compressed('calibration.npz', R1=R1, R2=R2, P1=P1, P2=P2, CameraMatrix1=cameraMatrix1, CameraMatrix2=cameraMatrix2, DistCoeffs1=distCoeffs1, DistCoeffs2=distCoeffs2,R=R,T=T,E=E,F=F)
# np.savez_compressed('calibration.npz', CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs)
np.savez_compressed('calibration.npz', P1=P1, P2=P2, CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs,R=R,T=T,E=E,F=F)
# path = np.load("path.npz")
# top_path = path["top_path"]
# side_path = path["side_path"]
# tip3D_homogeneous = cv2.triangulatePoints(P1, P2, top_path.reshape(2,-1)[:,50:75], side_path.reshape(2,-1)[:,50:75])
# tip3D = (tip3D_homogeneous/tip3D_homogeneous[3])[0:3]
# # print("homogeneous coords: " , tip3D_homogeneous)
# print("3D coords: ", tip3D)
# ax.scatter(np.array(tip3D)[0,:],np.array(tip3D)[1,:],np.array(tip3D)[2,:])
# # plt.show()
# leftInputPoints = np.array(leftImagePoints[0]).reshape(2,-1)
# rightInputPoints = np.array(rightImagePoints[0]).reshape(2,-1)
# np.savez_compressed('points.npz',left=leftInputPoints,right=rightInputPoints)
# print("Left inputs: " + str(leftInputPoints))
# points = cv2.triangulatePoints(P1, P2, leftInputPoints[:,50:100], rightInputPoints[:,50:100])
# print('\n')
# testPoint = points[:,0]
# testPoint3D = testPoint/testPoint[3]
# point3D = points/points[3,:]
# print("3D points: " + str(point3D))
def main():
|
def normalizePoints(pts):
centroid = np.mean(pts, axis=0)
# print('Centroid', centroid)
new_pts = np.array(pts - centroid)
# print('new_pts', new_pts)
mean_dist = np.mean(np.linalg.norm(new_pts, axis=1))
# print('mean dist', mean_dist)
scale = math.sqrt(2)/mean_dist
T = np.eye(3)
T[0,0] = scale
T[1,1] = scale
T[0,2] = -scale*centroid[0]
T[1,2] = -scale*centroid[1]
print(T)
return np.dot(T, pts.T).T, T
if __name__ == '__main__':
main()
| size = (9, 7)
squareSize = 6 # millimeters
sourcePath = '/home/jgschornak/NeedleGuidance/images_converging_cams/'
top_img_mask = sourcePath + 'top*.jpg'
top_img_names = glob(top_img_mask)
side_img_mask = sourcePath + 'side*.jpg'
side_img_names = glob(side_img_mask)
# print(left_img_names)
# print('\n')
# print(right_img_names)
numPairs = len(top_img_names)
imgList = []
for i in range(0, numPairs):
imgList.append(sourcePath + 'top%i' % i + '.jpg')
imgList.append(sourcePath + 'side%i' % i + '.jpg')
print(imgList)
StereoCalib(imgList, size, squareSize)
# while True:
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break | identifier_body |
stereo_calibration.py | import cv2
import numpy as np
from glob import glob
import math
import matplotlib.pyplot as plt
import struct
from mpl_toolkits.mplot3d import Axes3D
def | (imageList, boardSize, squareSize, displayCorners = True, useCalibrated = True, showRectified = True):
nImages = int(len(imageList)/2)
goodImageList = []
imageSize = None
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
topImagePoints = []
sideImagePoints = []
imagePoints = []
imagePoints.append([])
imagePoints.append([])
for n in range(0, nImages):
imagePoints[0].append(None)
imagePoints[1].append(None)
pattern_points = np.zeros((np.prod(boardSize), 3), np.float32)
pattern_points[:, :2] = np.indices(boardSize).T.reshape(-1, 2)
pattern_points *= squareSize
objectPoints = []
j = 0
tempTop = None
for i in range(0, nImages):
for k in range(0,2):
filename = imageList[i*2+k]
print('processsing %s... ' % filename)
img = cv2.imread(filename, 0)
if img is None:
break
if imageSize is None:
imageSize = img.shape[:2]
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, boardSize)
if not found:
print('chessboard not found')
break
else:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if displayCorners is True:
# print(filename)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, boardSize, corners, found)
cv2.imshow("corners", vis)
# cv2.waitKey(50)
if k is 1:
# Image from side camera
goodImageList.append(imageList[i*2])
goodImageList.append(imageList[i*2+1])
j = j + 1
sideImagePoints.append(corners.reshape(-1,2))
topImagePoints.append(tempTop)
objectPoints.append(pattern_points)
print('Added left and right points')
else:
# Image from top camera
# rightImagePoints.append(corners.reshape(-1,2))
tempTop= corners.reshape(-1,2)
# imagePoints[k].append(corners.reshape(-1,2))
# objectPoints.append(pattern_points)
print('OK')
# print(corners)
print(str(j) + " chessboard pairs have been detected\n")
nImages = j
if nImages < 2:
print("Too few pairs to run calibration\n")
return
# print(imagePoints[1])
# print(objectPoints)
print("Img count: " + str(len(topImagePoints)))
print("Obj count: " + str(len(objectPoints)))
# print(np.array(imagePoints[0]))
top_calibration = np.load('top_calibration.npz')
side_calibration = np.load('side_calibration.npz')
top_rms = top_calibration['rms']
top_camera_matrix = top_calibration['camera_matrix']
top_dist_coefs = top_calibration['dist_coefs']
side_rms = side_calibration['rms']
side_camera_matrix = side_calibration['camera_matrix']
side_dist_coefs = side_calibration['dist_coefs']
# camera_transform = np.load('camera_transform.npz')
# R = camera_transform['R']
# T = camera_transform['T']
# print('Calibrating top...')
# top_rms, top_camera_matrix, top_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, topImagePoints, imageSize, None, None)
print("Top Camera\nRMS:" + str(top_rms))
print("camera matrix: " + str(top_camera_matrix))
print("distortion coefficients: " + str(top_dist_coefs.ravel()))
# print('Calibrating side...')
# side_rms, side_camera_matrix, side_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, sideImagePoints, imageSize, None, None)
print("Side Camera\nRMS:", side_rms)
print("camera matrix:\n", side_camera_matrix)
print("distortion coefficients: ", side_dist_coefs.ravel())
# top_undistorted = cv2.undistort(cv2.imread(imageList[0]), top_camera_matrix, top_dist_coefs)
# cv2.imshow("Top Undistorted", top_undistorted)
# side_undistorted = cv2.undistort(cv2.imread(imageList[1]), side_camera_matrix, side_dist_coefs)
# cv2.imshow("Side Undistorted", side_undistorted)
# cameraMatrix[0] = cv2.initCameraMatrix2D(np.array(objectPoints), np.array(imagePoints[0]), imageSize, 0)
# cameraMatrix[1] = cv2.initCameraMatrix2D(objectPoints, imagePoints[1], imageSize, 0)
# print(objectPoints[0])
# ret, rvec_top, tvec_top = cv2.solvePnP(objectPoints[0], topImagePoints[0], top_camera_matrix, top_dist_coefs)
# ret, rvec_side, tvec_side = cv2.solvePnP(objectPoints[0], sideImagePoints[0], side_camera_matrix, side_dist_coefs)
# print('\n')
# print('rvec top', rvec_top)
# print('tvec top', tvec_top)
# print('rvec side', rvec_side)
# print('tvec side', tvec_side)
# print(topImagePoints[0])
print('\n')
print('Stereo calibration (cv2)...')
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objectPoints, topImagePoints, sideImagePoints, top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, (cv2.CALIB_FIX_INTRINSIC))
print("Rotation: ", R)
r_vec, jac = cv2.Rodrigues(R)
print("R_vec: ", np.multiply(r_vec, 180/math.pi))
print("Translation: ", T)
print("Essential: ", E)
print("Fundamental: ", F)
print('Stereo calibration (DIY)...')
topImagePointsConcat = topImagePoints[0]
sideImagePointsConcat = sideImagePoints[0]
for i in range(1, len(topImagePoints)):
topImagePointsConcat = np.concatenate((topImagePointsConcat, topImagePoints[i]))
sideImagePointsConcat = np.concatenate((sideImagePointsConcat, sideImagePoints[i]))
m1 = np.ones((len(topImagePointsConcat), 3))
m1[:,0:2] = topImagePointsConcat
m2 = np.ones((len(sideImagePointsConcat), 3))
m2[:,0:2] = sideImagePointsConcat
x1, T1 = normalizePoints(m1)
x2, T2 = normalizePoints(m2)
# print('Normalized', x1, T)
# Normalization matrix
# N = np.array([[2.0/w,0.0,-1.0],[0.0,2.0/h,-1.0],[0.0,0.0,1.0]], np.float64)
# print('N', N)
# x1 = np.dot(N,m1.T).T
# print('x1,',x1)
# x2 = np.dot(N,m2.T).T
# print('x2',x2)
A = np.ones((len(topImagePointsConcat),9))
A[:,0] = np.multiply(x1[:,0],x2[:,0])
A[:,1] = np.multiply(x1[:,1],x2[:,0])
A[:,2] = x2[:,0]
A[:,3] = np.multiply(x1[:,0],x2[:,1])
A[:,4] = np.multiply(x1[:,1],x2[:,1])
A[:,5] = x2[:,1]
A[:,6] = x1[:,0]
A[:,7] = x1[:,1]
# A[:,0] = np.multiply(x2[:,0],x1[:,0])
# A[:,1] = np.multiply(x2[:,0],x1[:,1])
# A[:,2] = x2[:,0]
# A[:,3] = np.multiply(x2[:,1],x1[:,0])
# A[:,4] = np.multiply(x2[:,1],x1[:,1])
# A[:,5] = x2[:,1]
# A[:,6] = x1[:,0]
# A[:,7] = x1[:,1]
print(A)
U, D, V = np.linalg.svd(A)
# print('U',U)
# print('D',D)
# print('V',V)
V = V.conj().T
F_new = V[:,8].reshape(3,3).copy()
# make rank 2
U, D, V = np.linalg.svd(F_new);
# print('U',U)
# print('D',D)
# print('V',V)
D_diag = np.diag([D[0], D[1], 0])
F_new = np.dot(np.dot(U, D_diag), V)
# F_new=np.dot(N.T,np.dot(F_new,N))
F_new = np.dot(np.dot(T2.T, F_new), T1)
print(F_new)
#
R, jac = cv2.Rodrigues(np.dot(np.array([[-90],[0],[0]], dtype = np.float64), math.pi/180))
T = np.array([[0], [-130], [130]], dtype=np.float64)
print("Rotation: ", R)
# r_vec, jac = cv2.Rodrigues(R)
# print("R_vec: ", r_vec)
print("Translation: ", T)
# print("Fundamental: ", F_new)
# F = F_new
# top_undistorted = cv2.undistort(cv2.imread(imageList[0]), cameraMatrix1, distCoeffs1)
# cv2.imshow("Top Undistorted", top_undistorted)
# side_undistorted = cv2.undistort(cv2.imread(imageList[1]), cameraMatrix2, distCoeffs2)
# cv2.imshow("Side Undistorted", side_undistorted)
# R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T)
# print('R', R)
# R_vec, jac = cv2.Rodrigues(R)
# print('R vec', R_vec)
# print('T', T)
print('\n')
# print('Stereo rectification (cv2)...')
# R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, R, T, alpha=1)
# print("R1: ", R1)
# R1_vec, jac = cv2.Rodrigues(R1)
# print("R1 vec: ", R1_vec)
# print("R2: ", R2)
# R2_vec, jac = cv2.Rodrigues(R2)
# print("P1: ", P1)
# print("P2: ", P2)
# print('Q: ', Q)
print('Stereo rectification (DIY)...')
P1 = np.concatenate((np.dot(side_camera_matrix,np.eye(3)),np.dot(side_camera_matrix,np.zeros((3,1)))), axis = 1)
P2 = np.concatenate((np.dot(side_camera_matrix,R),np.dot(side_camera_matrix,T)), axis = 1)
# print("R2 vec: ", R2_vec)
print("P1: ", P1)
print("P2: ", P2)
# np.savez_compressed('calibration.npz', R1=R1, R2=R2, P1=P1, P2=P2, CameraMatrix1=cameraMatrix1, CameraMatrix2=cameraMatrix2, DistCoeffs1=distCoeffs1, DistCoeffs2=distCoeffs2,R=R,T=T,E=E,F=F)
# np.savez_compressed('calibration.npz', CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs)
np.savez_compressed('calibration.npz', P1=P1, P2=P2, CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs,R=R,T=T,E=E,F=F)
# path = np.load("path.npz")
# top_path = path["top_path"]
# side_path = path["side_path"]
# tip3D_homogeneous = cv2.triangulatePoints(P1, P2, top_path.reshape(2,-1)[:,50:75], side_path.reshape(2,-1)[:,50:75])
# tip3D = (tip3D_homogeneous/tip3D_homogeneous[3])[0:3]
# # print("homogeneous coords: " , tip3D_homogeneous)
# print("3D coords: ", tip3D)
# ax.scatter(np.array(tip3D)[0,:],np.array(tip3D)[1,:],np.array(tip3D)[2,:])
# # plt.show()
# leftInputPoints = np.array(leftImagePoints[0]).reshape(2,-1)
# rightInputPoints = np.array(rightImagePoints[0]).reshape(2,-1)
# np.savez_compressed('points.npz',left=leftInputPoints,right=rightInputPoints)
# print("Left inputs: " + str(leftInputPoints))
# points = cv2.triangulatePoints(P1, P2, leftInputPoints[:,50:100], rightInputPoints[:,50:100])
# print('\n')
# testPoint = points[:,0]
# testPoint3D = testPoint/testPoint[3]
# point3D = points/points[3,:]
# print("3D points: " + str(point3D))
def main():
size = (9, 7)
squareSize = 6 # millimeters
sourcePath = '/home/jgschornak/NeedleGuidance/images_converging_cams/'
top_img_mask = sourcePath + 'top*.jpg'
top_img_names = glob(top_img_mask)
side_img_mask = sourcePath + 'side*.jpg'
side_img_names = glob(side_img_mask)
# print(left_img_names)
# print('\n')
# print(right_img_names)
numPairs = len(top_img_names)
imgList = []
for i in range(0, numPairs):
imgList.append(sourcePath + 'top%i' % i + '.jpg')
imgList.append(sourcePath + 'side%i' % i + '.jpg')
print(imgList)
StereoCalib(imgList, size, squareSize)
# while True:
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
def normalizePoints(pts):
centroid = np.mean(pts, axis=0)
# print('Centroid', centroid)
new_pts = np.array(pts - centroid)
# print('new_pts', new_pts)
mean_dist = np.mean(np.linalg.norm(new_pts, axis=1))
# print('mean dist', mean_dist)
scale = math.sqrt(2)/mean_dist
T = np.eye(3)
T[0,0] = scale
T[1,1] = scale
T[0,2] = -scale*centroid[0]
T[1,2] = -scale*centroid[1]
print(T)
return np.dot(T, pts.T).T, T
if __name__ == '__main__':
main()
| StereoCalib | identifier_name |
stereo_calibration.py | import cv2
import numpy as np
from glob import glob
import math
import matplotlib.pyplot as plt
import struct
from mpl_toolkits.mplot3d import Axes3D
def StereoCalib(imageList, boardSize, squareSize, displayCorners = True, useCalibrated = True, showRectified = True):
nImages = int(len(imageList)/2)
goodImageList = []
imageSize = None
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
topImagePoints = []
sideImagePoints = []
imagePoints = []
imagePoints.append([])
imagePoints.append([])
for n in range(0, nImages):
imagePoints[0].append(None)
imagePoints[1].append(None)
pattern_points = np.zeros((np.prod(boardSize), 3), np.float32)
pattern_points[:, :2] = np.indices(boardSize).T.reshape(-1, 2)
pattern_points *= squareSize
objectPoints = []
j = 0
tempTop = None
for i in range(0, nImages):
for k in range(0,2):
filename = imageList[i*2+k]
print('processsing %s... ' % filename)
img = cv2.imread(filename, 0)
if img is None:
break
if imageSize is None:
imageSize = img.shape[:2]
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, boardSize)
if not found:
print('chessboard not found')
break
else:
term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1)
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if displayCorners is True:
# print(filename)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, boardSize, corners, found)
cv2.imshow("corners", vis)
# cv2.waitKey(50)
if k is 1:
# Image from side camera
goodImageList.append(imageList[i*2])
goodImageList.append(imageList[i*2+1])
j = j + 1
sideImagePoints.append(corners.reshape(-1,2))
topImagePoints.append(tempTop)
objectPoints.append(pattern_points)
print('Added left and right points')
else:
# Image from top camera
# rightImagePoints.append(corners.reshape(-1,2))
tempTop= corners.reshape(-1,2)
# imagePoints[k].append(corners.reshape(-1,2))
# objectPoints.append(pattern_points)
print('OK')
# print(corners)
print(str(j) + " chessboard pairs have been detected\n")
nImages = j
if nImages < 2:
|
# print(imagePoints[1])
# print(objectPoints)
print("Img count: " + str(len(topImagePoints)))
print("Obj count: " + str(len(objectPoints)))
# print(np.array(imagePoints[0]))
top_calibration = np.load('top_calibration.npz')
side_calibration = np.load('side_calibration.npz')
top_rms = top_calibration['rms']
top_camera_matrix = top_calibration['camera_matrix']
top_dist_coefs = top_calibration['dist_coefs']
side_rms = side_calibration['rms']
side_camera_matrix = side_calibration['camera_matrix']
side_dist_coefs = side_calibration['dist_coefs']
# camera_transform = np.load('camera_transform.npz')
# R = camera_transform['R']
# T = camera_transform['T']
# print('Calibrating top...')
# top_rms, top_camera_matrix, top_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, topImagePoints, imageSize, None, None)
print("Top Camera\nRMS:" + str(top_rms))
print("camera matrix: " + str(top_camera_matrix))
print("distortion coefficients: " + str(top_dist_coefs.ravel()))
# print('Calibrating side...')
# side_rms, side_camera_matrix, side_dist_coefs, rvecs, tvecs = cv2.calibrateCamera(objectPoints, sideImagePoints, imageSize, None, None)
print("Side Camera\nRMS:", side_rms)
print("camera matrix:\n", side_camera_matrix)
print("distortion coefficients: ", side_dist_coefs.ravel())
# top_undistorted = cv2.undistort(cv2.imread(imageList[0]), top_camera_matrix, top_dist_coefs)
# cv2.imshow("Top Undistorted", top_undistorted)
# side_undistorted = cv2.undistort(cv2.imread(imageList[1]), side_camera_matrix, side_dist_coefs)
# cv2.imshow("Side Undistorted", side_undistorted)
# cameraMatrix[0] = cv2.initCameraMatrix2D(np.array(objectPoints), np.array(imagePoints[0]), imageSize, 0)
# cameraMatrix[1] = cv2.initCameraMatrix2D(objectPoints, imagePoints[1], imageSize, 0)
# print(objectPoints[0])
# ret, rvec_top, tvec_top = cv2.solvePnP(objectPoints[0], topImagePoints[0], top_camera_matrix, top_dist_coefs)
# ret, rvec_side, tvec_side = cv2.solvePnP(objectPoints[0], sideImagePoints[0], side_camera_matrix, side_dist_coefs)
# print('\n')
# print('rvec top', rvec_top)
# print('tvec top', tvec_top)
# print('rvec side', rvec_side)
# print('tvec side', tvec_side)
# print(topImagePoints[0])
print('\n')
print('Stereo calibration (cv2)...')
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate(objectPoints, topImagePoints, sideImagePoints, top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, (cv2.CALIB_FIX_INTRINSIC))
print("Rotation: ", R)
r_vec, jac = cv2.Rodrigues(R)
print("R_vec: ", np.multiply(r_vec, 180/math.pi))
print("Translation: ", T)
print("Essential: ", E)
print("Fundamental: ", F)
print('Stereo calibration (DIY)...')
topImagePointsConcat = topImagePoints[0]
sideImagePointsConcat = sideImagePoints[0]
for i in range(1, len(topImagePoints)):
topImagePointsConcat = np.concatenate((topImagePointsConcat, topImagePoints[i]))
sideImagePointsConcat = np.concatenate((sideImagePointsConcat, sideImagePoints[i]))
m1 = np.ones((len(topImagePointsConcat), 3))
m1[:,0:2] = topImagePointsConcat
m2 = np.ones((len(sideImagePointsConcat), 3))
m2[:,0:2] = sideImagePointsConcat
x1, T1 = normalizePoints(m1)
x2, T2 = normalizePoints(m2)
# print('Normalized', x1, T)
# Normalization matrix
# N = np.array([[2.0/w,0.0,-1.0],[0.0,2.0/h,-1.0],[0.0,0.0,1.0]], np.float64)
# print('N', N)
# x1 = np.dot(N,m1.T).T
# print('x1,',x1)
# x2 = np.dot(N,m2.T).T
# print('x2',x2)
A = np.ones((len(topImagePointsConcat),9))
A[:,0] = np.multiply(x1[:,0],x2[:,0])
A[:,1] = np.multiply(x1[:,1],x2[:,0])
A[:,2] = x2[:,0]
A[:,3] = np.multiply(x1[:,0],x2[:,1])
A[:,4] = np.multiply(x1[:,1],x2[:,1])
A[:,5] = x2[:,1]
A[:,6] = x1[:,0]
A[:,7] = x1[:,1]
# A[:,0] = np.multiply(x2[:,0],x1[:,0])
# A[:,1] = np.multiply(x2[:,0],x1[:,1])
# A[:,2] = x2[:,0]
# A[:,3] = np.multiply(x2[:,1],x1[:,0])
# A[:,4] = np.multiply(x2[:,1],x1[:,1])
# A[:,5] = x2[:,1]
# A[:,6] = x1[:,0]
# A[:,7] = x1[:,1]
print(A)
U, D, V = np.linalg.svd(A)
# print('U',U)
# print('D',D)
# print('V',V)
V = V.conj().T
F_new = V[:,8].reshape(3,3).copy()
# make rank 2
U, D, V = np.linalg.svd(F_new);
# print('U',U)
# print('D',D)
# print('V',V)
D_diag = np.diag([D[0], D[1], 0])
F_new = np.dot(np.dot(U, D_diag), V)
# F_new=np.dot(N.T,np.dot(F_new,N))
F_new = np.dot(np.dot(T2.T, F_new), T1)
print(F_new)
#
R, jac = cv2.Rodrigues(np.dot(np.array([[-90],[0],[0]], dtype = np.float64), math.pi/180))
T = np.array([[0], [-130], [130]], dtype=np.float64)
print("Rotation: ", R)
# r_vec, jac = cv2.Rodrigues(R)
# print("R_vec: ", r_vec)
print("Translation: ", T)
# print("Fundamental: ", F_new)
# F = F_new
# top_undistorted = cv2.undistort(cv2.imread(imageList[0]), cameraMatrix1, distCoeffs1)
# cv2.imshow("Top Undistorted", top_undistorted)
# side_undistorted = cv2.undistort(cv2.imread(imageList[1]), cameraMatrix2, distCoeffs2)
# cv2.imshow("Side Undistorted", side_undistorted)
# R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T)
# print('R', R)
# R_vec, jac = cv2.Rodrigues(R)
# print('R vec', R_vec)
# print('T', T)
print('\n')
# print('Stereo rectification (cv2)...')
# R1, R2, P1, P2, Q, ret1, ret2 = cv2.stereoRectify(top_camera_matrix, top_dist_coefs, side_camera_matrix, side_dist_coefs, imageSize, R, T, alpha=1)
# print("R1: ", R1)
# R1_vec, jac = cv2.Rodrigues(R1)
# print("R1 vec: ", R1_vec)
# print("R2: ", R2)
# R2_vec, jac = cv2.Rodrigues(R2)
# print("P1: ", P1)
# print("P2: ", P2)
# print('Q: ', Q)
print('Stereo rectification (DIY)...')
P1 = np.concatenate((np.dot(side_camera_matrix,np.eye(3)),np.dot(side_camera_matrix,np.zeros((3,1)))), axis = 1)
P2 = np.concatenate((np.dot(side_camera_matrix,R),np.dot(side_camera_matrix,T)), axis = 1)
# print("R2 vec: ", R2_vec)
print("P1: ", P1)
print("P2: ", P2)
# np.savez_compressed('calibration.npz', R1=R1, R2=R2, P1=P1, P2=P2, CameraMatrix1=cameraMatrix1, CameraMatrix2=cameraMatrix2, DistCoeffs1=distCoeffs1, DistCoeffs2=distCoeffs2,R=R,T=T,E=E,F=F)
# np.savez_compressed('calibration.npz', CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs)
np.savez_compressed('calibration.npz', P1=P1, P2=P2, CameraMatrix1=top_camera_matrix, CameraMatrix2=side_camera_matrix, DistCoeffs1=top_dist_coefs, DistCoeffs2=side_dist_coefs,R=R,T=T,E=E,F=F)
# path = np.load("path.npz")
# top_path = path["top_path"]
# side_path = path["side_path"]
# tip3D_homogeneous = cv2.triangulatePoints(P1, P2, top_path.reshape(2,-1)[:,50:75], side_path.reshape(2,-1)[:,50:75])
# tip3D = (tip3D_homogeneous/tip3D_homogeneous[3])[0:3]
# # print("homogeneous coords: " , tip3D_homogeneous)
# print("3D coords: ", tip3D)
# ax.scatter(np.array(tip3D)[0,:],np.array(tip3D)[1,:],np.array(tip3D)[2,:])
# # plt.show()
# leftInputPoints = np.array(leftImagePoints[0]).reshape(2,-1)
# rightInputPoints = np.array(rightImagePoints[0]).reshape(2,-1)
# np.savez_compressed('points.npz',left=leftInputPoints,right=rightInputPoints)
# print("Left inputs: " + str(leftInputPoints))
# points = cv2.triangulatePoints(P1, P2, leftInputPoints[:,50:100], rightInputPoints[:,50:100])
# print('\n')
# testPoint = points[:,0]
# testPoint3D = testPoint/testPoint[3]
# point3D = points/points[3,:]
# print("3D points: " + str(point3D))
def main():
size = (9, 7)
squareSize = 6 # millimeters
sourcePath = '/home/jgschornak/NeedleGuidance/images_converging_cams/'
top_img_mask = sourcePath + 'top*.jpg'
top_img_names = glob(top_img_mask)
side_img_mask = sourcePath + 'side*.jpg'
side_img_names = glob(side_img_mask)
# print(left_img_names)
# print('\n')
# print(right_img_names)
numPairs = len(top_img_names)
imgList = []
for i in range(0, numPairs):
imgList.append(sourcePath + 'top%i' % i + '.jpg')
imgList.append(sourcePath + 'side%i' % i + '.jpg')
print(imgList)
StereoCalib(imgList, size, squareSize)
# while True:
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
def normalizePoints(pts):
centroid = np.mean(pts, axis=0)
# print('Centroid', centroid)
new_pts = np.array(pts - centroid)
# print('new_pts', new_pts)
mean_dist = np.mean(np.linalg.norm(new_pts, axis=1))
# print('mean dist', mean_dist)
scale = math.sqrt(2)/mean_dist
T = np.eye(3)
T[0,0] = scale
T[1,1] = scale
T[0,2] = -scale*centroid[0]
T[1,2] = -scale*centroid[1]
print(T)
return np.dot(T, pts.T).T, T
if __name__ == '__main__':
main()
| print("Too few pairs to run calibration\n")
return | conditional_block |
block.rs | use crate::lookup_table::LookupTable;
use crate::properties::Block;
use Block::*;
impl From<char> for Block {
#[inline]
fn from(c: char) -> Self {
if c < ROW0_LIMIT {
return ROW0_TABLE.get_or(&(c as u8), No_Block);
}
if c < PLANE0_LIMIT {
return PLANE0_TABLE.get_or(&(c as u16), No_Block);
}
return SUPPLEMENTARY_TABLE.get_or(&(c as u32), No_Block);
}
}
#[test]
fn validate_tables() |
const ROW0_TABLE: LookupTable<u8, Block> = lookup_table![
(0x00, 0x7F, Basic),
];
const ROW0_LIMIT: char = '\u{80}';
const PLANE0_TABLE: LookupTable<u16, Block> = lookup_table![
(0x0080, 0x024F, Latin),
(0x0250, 0x02AF, IPA),
(0x02B0, 0x02FF, Spacing),
(0x0300, 0x036F, Combining),
(0x0370, 0x03FF, Greek),
(0x0400, 0x052F, Cyrillic),
(0x0530, 0x058F, Armenian),
(0x0590, 0x05FF, Hebrew),
(0x0600, 0x06FF, Arabic),
(0x0700, 0x074F, Syriac),
(0x0750, 0x077F, Arabic),
(0x0780, 0x07BF, Thaana),
(0x07C0, 0x07FF, NKo),
(0x0800, 0x083F, Samaritan),
(0x0840, 0x085F, Mandaic),
(0x0860, 0x086F, Syriac),
(0x08A0, 0x08FF, Arabic),
(0x0900, 0x097F, Devanagari),
(0x0980, 0x09FF, Bengali),
(0x0A00, 0x0A7F, Gurmukhi),
(0x0A80, 0x0AFF, Gujarati),
(0x0B00, 0x0B7F, Oriya),
(0x0B80, 0x0BFF, Tamil),
(0x0C00, 0x0C7F, Telugu),
(0x0C80, 0x0CFF, Kannada),
(0x0D00, 0x0D7F, Malayalam),
(0x0D80, 0x0DFF, Sinhala),
(0x0E00, 0x0E7F, Thai),
(0x0E80, 0x0EFF, Lao),
(0x0F00, 0x0FFF, Tibetan),
(0x1000, 0x109F, Myanmar),
(0x10A0, 0x10FF, Georgian),
(0x1100, 0x11FF, Hangul),
(0x1200, 0x139F, Ethiopic),
(0x13A0, 0x13FF, Cherokee),
(0x1400, 0x167F, Unified),
(0x1680, 0x169F, Ogham),
(0x16A0, 0x16FF, Runic),
(0x1700, 0x171F, Tagalog),
(0x1720, 0x173F, Hanunoo),
(0x1740, 0x175F, Buhid),
(0x1760, 0x177F, Tagbanwa),
(0x1780, 0x17FF, Khmer),
(0x1800, 0x18AF, Mongolian),
(0x18B0, 0x18FF, Unified),
(0x1900, 0x194F, Limbu),
(0x1950, 0x197F, Tai),
(0x1980, 0x19DF, New),
(0x19E0, 0x19FF, Khmer),
(0x1A00, 0x1A1F, Buginese),
(0x1A20, 0x1AAF, Tai),
(0x1AB0, 0x1AFF, Combining),
(0x1B00, 0x1B7F, Balinese),
(0x1B80, 0x1BBF, Sundanese),
(0x1BC0, 0x1BFF, Batak),
(0x1C00, 0x1C4F, Lepcha),
(0x1C50, 0x1C7F, Ol),
(0x1C80, 0x1C8F, Cyrillic),
(0x1C90, 0x1CBF, Georgian),
(0x1CC0, 0x1CCF, Sundanese),
(0x1CD0, 0x1CFF, Vedic),
(0x1D00, 0x1DBF, Phonetic),
(0x1DC0, 0x1DFF, Combining),
(0x1E00, 0x1EFF, Latin),
(0x1F00, 0x1FFF, Greek),
(0x2000, 0x206F, General),
(0x2070, 0x209F, Superscripts),
(0x20A0, 0x20CF, Currency),
(0x20D0, 0x20FF, Combining),
(0x2100, 0x214F, Letterlike),
(0x2150, 0x218F, Number),
(0x2190, 0x21FF, Arrows),
(0x2200, 0x22FF, Mathematical),
(0x2300, 0x23FF, Miscellaneous),
(0x2400, 0x243F, Control),
(0x2440, 0x245F, Optical),
(0x2460, 0x24FF, Enclosed),
(0x2500, 0x257F, Box),
(0x2580, 0x259F, Block),
(0x25A0, 0x25FF, Geometric),
(0x2600, 0x26FF, Miscellaneous),
(0x2700, 0x27BF, Dingbats),
(0x27C0, 0x27EF, Miscellaneous),
(0x27F0, 0x27FF, Supplemental),
(0x2800, 0x28FF, Braille),
(0x2900, 0x297F, Supplemental),
(0x2980, 0x29FF, Miscellaneous),
(0x2A00, 0x2AFF, Supplemental),
(0x2B00, 0x2BFF, Miscellaneous),
(0x2C00, 0x2C5F, Glagolitic),
(0x2C60, 0x2C7F, Latin),
(0x2C80, 0x2CFF, Coptic),
(0x2D00, 0x2D2F, Georgian),
(0x2D30, 0x2D7F, Tifinagh),
(0x2D80, 0x2DDF, Ethiopic),
(0x2DE0, 0x2DFF, Cyrillic),
(0x2E00, 0x2E7F, Supplemental),
(0x2E80, 0x2EFF, CJK),
(0x2F00, 0x2FDF, Kangxi),
(0x2FF0, 0x2FFF, Ideographic),
(0x3000, 0x303F, CJK),
(0x3040, 0x309F, Hiragana),
(0x30A0, 0x30FF, Katakana),
(0x3100, 0x312F, Bopomofo),
(0x3130, 0x318F, Hangul),
(0x3190, 0x319F, Kanbun),
(0x31A0, 0x31BF, Bopomofo),
(0x31C0, 0x31EF, CJK),
(0x31F0, 0x31FF, Katakana),
(0x3200, 0x32FF, Enclosed),
(0x3300, 0x4DBF, CJK),
(0x4DC0, 0x4DFF, Yijing),
(0x4E00, 0x9FFF, CJK),
(0xA000, 0xA4CF, Yi),
(0xA4D0, 0xA4FF, Lisu),
(0xA500, 0xA63F, Vai),
(0xA640, 0xA69F, Cyrillic),
(0xA6A0, 0xA6FF, Bamum),
(0xA700, 0xA71F, Modifier),
(0xA720, 0xA7FF, Latin),
(0xA800, 0xA82F, Syloti),
(0xA830, 0xA83F, Common),
(0xA840, 0xA87F, Phags),
(0xA880, 0xA8DF, Saurashtra),
(0xA8E0, 0xA8FF, Devanagari),
(0xA900, 0xA92F, Kayah),
(0xA930, 0xA95F, Rejang),
(0xA960, 0xA97F, Hangul),
(0xA980, 0xA9DF, Javanese),
(0xA9E0, 0xA9FF, Myanmar),
(0xAA00, 0xAA5F, Cham),
(0xAA60, 0xAA7F, Myanmar),
(0xAA80, 0xAADF, Tai),
(0xAAE0, 0xAAFF, Meetei),
(0xAB00, 0xAB2F, Ethiopic),
(0xAB30, 0xAB6F, Latin),
(0xAB70, 0xABBF, Cherokee),
(0xABC0, 0xABFF, Meetei),
(0xAC00, 0xD7FF, Hangul),
(0xD800, 0xDBFF, High),
(0xDC00, 0xDFFF, Low),
(0xE000, 0xF8FF, Private),
(0xF900, 0xFAFF, CJK),
(0xFB00, 0xFB4F, Alphabetic),
(0xFB50, 0xFDFF, Arabic),
(0xFE00, 0xFE0F, Variation),
(0xFE10, 0xFE1F, Vertical),
(0xFE20, 0xFE2F, Combining),
(0xFE30, 0xFE4F, CJK),
(0xFE50, 0xFE6F, Small),
(0xFE70, 0xFEFF, Arabic),
(0xFF00, 0xFFEF, Halfwidth),
(0xFFF0, 0xFFFF, Specials),
];
const PLANE0_LIMIT: char = '\u{10000}';
const SUPPLEMENTARY_TABLE: LookupTable<u32, Block> = lookup_table![
(0x010000, 0x0100FF, Linear),
(0x010100, 0x01013F, Aegean),
(0x010140, 0x0101CF, Ancient),
(0x0101D0, 0x0101FF, Phaistos),
(0x010280, 0x01029F, Lycian),
(0x0102A0, 0x0102DF, Carian),
(0x0102E0, 0x0102FF, Coptic),
(0x010300, 0x01032F, Old),
(0x010330, 0x01034F, Gothic),
(0x010350, 0x01037F, Old),
(0x010380, 0x01039F, Ugaritic),
(0x0103A0, 0x0103DF, Old),
(0x010400, 0x01044F, Deseret),
(0x010450, 0x01047F, Shavian),
(0x010480, 0x0104AF, Osmanya),
(0x0104B0, 0x0104FF, Osage),
(0x010500, 0x01052F, Elbasan),
(0x010530, 0x01056F, Caucasian),
(0x010600, 0x01077F, Linear),
(0x010800, 0x01083F, Cypriot),
(0x010840, 0x01085F, Imperial),
(0x010860, 0x01087F, Palmyrene),
(0x010880, 0x0108AF, Nabataean),
(0x0108E0, 0x0108FF, Hatran),
(0x010900, 0x01091F, Phoenician),
(0x010920, 0x01093F, Lydian),
(0x010980, 0x0109FF, Meroitic),
(0x010A00, 0x010A5F, Kharoshthi),
(0x010A60, 0x010A9F, Old),
(0x010AC0, 0x010AFF, Manichaean),
(0x010B00, 0x010B3F, Avestan),
(0x010B40, 0x010B7F, Inscriptional),
(0x010B80, 0x010BAF, Psalter),
(0x010C00, 0x010C4F, Old),
(0x010C80, 0x010CFF, Old),
(0x010D00, 0x010D3F, Hanifi),
(0x010E60, 0x010E7F, Rumi),
(0x010F00, 0x010F2F, Old),
(0x010F30, 0x010F6F, Sogdian),
(0x010FE0, 0x010FFF, Elymaic),
(0x011000, 0x01107F, Brahmi),
(0x011080, 0x0110CF, Kaithi),
(0x0110D0, 0x0110FF, Sora),
(0x011100, 0x01114F, Chakma),
(0x011150, 0x01117F, Mahajani),
(0x011180, 0x0111DF, Sharada),
(0x0111E0, 0x0111FF, Sinhala),
(0x011200, 0x01124F, Khojki),
(0x011280, 0x0112AF, Multani),
(0x0112B0, 0x0112FF, Khudawadi),
(0x011300, 0x01137F, Grantha),
(0x011400, 0x01147F, Newa),
(0x011480, 0x0114DF, Tirhuta),
(0x011580, 0x0115FF, Siddham),
(0x011600, 0x01165F, Modi),
(0x011660, 0x01167F, Mongolian),
(0x011680, 0x0116CF, Takri),
(0x011700, 0x01173F, Ahom),
(0x011800, 0x01184F, Dogra),
(0x0118A0, 0x0118FF, Warang),
(0x0119A0, 0x0119FF, Nandinagari),
(0x011A00, 0x011A4F, Zanabazar),
(0x011A50, 0x011AAF, Soyombo),
(0x011AC0, 0x011AFF, Pau),
(0x011C00, 0x011C6F, Bhaiksuki),
(0x011C70, 0x011CBF, Marchen),
(0x011D00, 0x011D5F, Masaram),
(0x011D60, 0x011DAF, Gunjala),
(0x011EE0, 0x011EFF, Makasar),
(0x011FC0, 0x011FFF, Tamil),
(0x012000, 0x01247F, Cuneiform),
(0x012480, 0x01254F, Early),
(0x013000, 0x01343F, Egyptian),
(0x014400, 0x01467F, Anatolian),
(0x016800, 0x016A3F, Bamum),
(0x016A40, 0x016A6F, Mro),
(0x016AD0, 0x016AFF, Bassa),
(0x016B00, 0x016B8F, Pahawh),
(0x016E40, 0x016E9F, Medefaidrin),
(0x016F00, 0x016F9F, Miao),
(0x016FE0, 0x016FFF, Ideographic),
(0x017000, 0x018AFF, Tangut),
(0x01B000, 0x01B12F, Kana),
(0x01B130, 0x01B16F, Small),
(0x01B170, 0x01B2FF, Nushu),
(0x01BC00, 0x01BC9F, Duployan),
(0x01BCA0, 0x01BCAF, Shorthand),
(0x01D000, 0x01D0FF, Byzantine),
(0x01D100, 0x01D1FF, Musical),
(0x01D200, 0x01D24F, Ancient),
(0x01D2E0, 0x01D2FF, Mayan),
(0x01D300, 0x01D35F, Tai),
(0x01D360, 0x01D37F, Counting),
(0x01D400, 0x01D7FF, Mathematical),
(0x01D800, 0x01DAAF, Sutton),
(0x01E000, 0x01E02F, Glagolitic),
(0x01E100, 0x01E14F, Nyiakeng),
(0x01E2C0, 0x01E2FF, Wancho),
(0x01E800, 0x01E8DF, Mende),
(0x01E900, 0x01E95F, Adlam),
(0x01EC70, 0x01ECBF, Indic),
(0x01ED00, 0x01ED4F, Ottoman),
(0x01EE00, 0x01EEFF, Arabic),
(0x01F000, 0x01F02F, Mahjong),
(0x01F030, 0x01F09F, Domino),
(0x01F0A0, 0x01F0FF, Playing),
(0x01F100, 0x01F2FF, Enclosed),
(0x01F300, 0x01F5FF, Miscellaneous),
(0x01F600, 0x01F64F, Emoticons),
(0x01F650, 0x01F67F, Ornamental),
(0x01F680, 0x01F6FF, Transport),
(0x01F700, 0x01F77F, Alchemical),
(0x01F780, 0x01F7FF, Geometric),
(0x01F800, 0x01F9FF, Supplemental),
(0x01FA00, 0x01FA6F, Chess),
(0x01FA70, 0x01FAFF, Symbols),
(0x020000, 0x02A6DF, CJK),
(0x02A700, 0x02EBEF, CJK),
(0x02F800, 0x02FA1F, CJK),
(0x0E0000, 0x0E007F, Tags),
(0x0E0100, 0x0E01EF, Variation),
(0x0F0000, 0x10FFFF, Supplementary),
];
| {
use std::convert::TryInto;
ROW0_TABLE.validate();
if let Ok(x) = (ROW0_LIMIT as u32).try_into() { assert!(!ROW0_TABLE.contains(&x)); }
PLANE0_TABLE.validate();
if let Ok(x) = (PLANE0_LIMIT as u32).try_into() { assert!(!PLANE0_TABLE.contains(&x)); }
SUPPLEMENTARY_TABLE.validate();
} | identifier_body |
block.rs | use crate::lookup_table::LookupTable;
use crate::properties::Block;
use Block::*;
impl From<char> for Block {
#[inline]
fn from(c: char) -> Self {
if c < ROW0_LIMIT {
return ROW0_TABLE.get_or(&(c as u8), No_Block);
}
if c < PLANE0_LIMIT {
return PLANE0_TABLE.get_or(&(c as u16), No_Block);
}
return SUPPLEMENTARY_TABLE.get_or(&(c as u32), No_Block);
}
}
#[test]
fn validate_tables() {
use std::convert::TryInto;
ROW0_TABLE.validate();
if let Ok(x) = (ROW0_LIMIT as u32).try_into() { assert!(!ROW0_TABLE.contains(&x)); }
PLANE0_TABLE.validate();
if let Ok(x) = (PLANE0_LIMIT as u32).try_into() { assert!(!PLANE0_TABLE.contains(&x)); }
SUPPLEMENTARY_TABLE.validate();
}
const ROW0_TABLE: LookupTable<u8, Block> = lookup_table![
(0x00, 0x7F, Basic),
];
const ROW0_LIMIT: char = '\u{80}';
const PLANE0_TABLE: LookupTable<u16, Block> = lookup_table![
(0x0080, 0x024F, Latin),
(0x0250, 0x02AF, IPA),
(0x02B0, 0x02FF, Spacing),
(0x0300, 0x036F, Combining),
(0x0370, 0x03FF, Greek),
(0x0400, 0x052F, Cyrillic),
(0x0530, 0x058F, Armenian),
(0x0590, 0x05FF, Hebrew),
(0x0600, 0x06FF, Arabic),
(0x0700, 0x074F, Syriac),
(0x0750, 0x077F, Arabic),
(0x0780, 0x07BF, Thaana),
(0x07C0, 0x07FF, NKo),
(0x0800, 0x083F, Samaritan),
(0x0840, 0x085F, Mandaic),
(0x0860, 0x086F, Syriac),
(0x08A0, 0x08FF, Arabic),
(0x0900, 0x097F, Devanagari),
(0x0980, 0x09FF, Bengali),
(0x0A00, 0x0A7F, Gurmukhi),
(0x0A80, 0x0AFF, Gujarati),
(0x0B00, 0x0B7F, Oriya),
(0x0B80, 0x0BFF, Tamil),
(0x0C00, 0x0C7F, Telugu),
(0x0C80, 0x0CFF, Kannada),
(0x0D00, 0x0D7F, Malayalam),
(0x0D80, 0x0DFF, Sinhala),
(0x0E00, 0x0E7F, Thai),
(0x0E80, 0x0EFF, Lao),
(0x0F00, 0x0FFF, Tibetan),
(0x1000, 0x109F, Myanmar),
(0x10A0, 0x10FF, Georgian),
(0x1100, 0x11FF, Hangul),
(0x1200, 0x139F, Ethiopic),
(0x13A0, 0x13FF, Cherokee),
(0x1400, 0x167F, Unified),
(0x1680, 0x169F, Ogham),
(0x16A0, 0x16FF, Runic),
(0x1700, 0x171F, Tagalog),
(0x1720, 0x173F, Hanunoo),
(0x1740, 0x175F, Buhid),
(0x1760, 0x177F, Tagbanwa),
(0x1780, 0x17FF, Khmer),
(0x1800, 0x18AF, Mongolian),
(0x18B0, 0x18FF, Unified),
(0x1900, 0x194F, Limbu),
(0x1950, 0x197F, Tai),
(0x1980, 0x19DF, New),
(0x19E0, 0x19FF, Khmer),
(0x1A00, 0x1A1F, Buginese),
(0x1A20, 0x1AAF, Tai),
(0x1AB0, 0x1AFF, Combining),
(0x1B00, 0x1B7F, Balinese),
(0x1B80, 0x1BBF, Sundanese),
(0x1BC0, 0x1BFF, Batak),
(0x1C00, 0x1C4F, Lepcha),
(0x1C50, 0x1C7F, Ol), | (0x1CC0, 0x1CCF, Sundanese),
(0x1CD0, 0x1CFF, Vedic),
(0x1D00, 0x1DBF, Phonetic),
(0x1DC0, 0x1DFF, Combining),
(0x1E00, 0x1EFF, Latin),
(0x1F00, 0x1FFF, Greek),
(0x2000, 0x206F, General),
(0x2070, 0x209F, Superscripts),
(0x20A0, 0x20CF, Currency),
(0x20D0, 0x20FF, Combining),
(0x2100, 0x214F, Letterlike),
(0x2150, 0x218F, Number),
(0x2190, 0x21FF, Arrows),
(0x2200, 0x22FF, Mathematical),
(0x2300, 0x23FF, Miscellaneous),
(0x2400, 0x243F, Control),
(0x2440, 0x245F, Optical),
(0x2460, 0x24FF, Enclosed),
(0x2500, 0x257F, Box),
(0x2580, 0x259F, Block),
(0x25A0, 0x25FF, Geometric),
(0x2600, 0x26FF, Miscellaneous),
(0x2700, 0x27BF, Dingbats),
(0x27C0, 0x27EF, Miscellaneous),
(0x27F0, 0x27FF, Supplemental),
(0x2800, 0x28FF, Braille),
(0x2900, 0x297F, Supplemental),
(0x2980, 0x29FF, Miscellaneous),
(0x2A00, 0x2AFF, Supplemental),
(0x2B00, 0x2BFF, Miscellaneous),
(0x2C00, 0x2C5F, Glagolitic),
(0x2C60, 0x2C7F, Latin),
(0x2C80, 0x2CFF, Coptic),
(0x2D00, 0x2D2F, Georgian),
(0x2D30, 0x2D7F, Tifinagh),
(0x2D80, 0x2DDF, Ethiopic),
(0x2DE0, 0x2DFF, Cyrillic),
(0x2E00, 0x2E7F, Supplemental),
(0x2E80, 0x2EFF, CJK),
(0x2F00, 0x2FDF, Kangxi),
(0x2FF0, 0x2FFF, Ideographic),
(0x3000, 0x303F, CJK),
(0x3040, 0x309F, Hiragana),
(0x30A0, 0x30FF, Katakana),
(0x3100, 0x312F, Bopomofo),
(0x3130, 0x318F, Hangul),
(0x3190, 0x319F, Kanbun),
(0x31A0, 0x31BF, Bopomofo),
(0x31C0, 0x31EF, CJK),
(0x31F0, 0x31FF, Katakana),
(0x3200, 0x32FF, Enclosed),
(0x3300, 0x4DBF, CJK),
(0x4DC0, 0x4DFF, Yijing),
(0x4E00, 0x9FFF, CJK),
(0xA000, 0xA4CF, Yi),
(0xA4D0, 0xA4FF, Lisu),
(0xA500, 0xA63F, Vai),
(0xA640, 0xA69F, Cyrillic),
(0xA6A0, 0xA6FF, Bamum),
(0xA700, 0xA71F, Modifier),
(0xA720, 0xA7FF, Latin),
(0xA800, 0xA82F, Syloti),
(0xA830, 0xA83F, Common),
(0xA840, 0xA87F, Phags),
(0xA880, 0xA8DF, Saurashtra),
(0xA8E0, 0xA8FF, Devanagari),
(0xA900, 0xA92F, Kayah),
(0xA930, 0xA95F, Rejang),
(0xA960, 0xA97F, Hangul),
(0xA980, 0xA9DF, Javanese),
(0xA9E0, 0xA9FF, Myanmar),
(0xAA00, 0xAA5F, Cham),
(0xAA60, 0xAA7F, Myanmar),
(0xAA80, 0xAADF, Tai),
(0xAAE0, 0xAAFF, Meetei),
(0xAB00, 0xAB2F, Ethiopic),
(0xAB30, 0xAB6F, Latin),
(0xAB70, 0xABBF, Cherokee),
(0xABC0, 0xABFF, Meetei),
(0xAC00, 0xD7FF, Hangul),
(0xD800, 0xDBFF, High),
(0xDC00, 0xDFFF, Low),
(0xE000, 0xF8FF, Private),
(0xF900, 0xFAFF, CJK),
(0xFB00, 0xFB4F, Alphabetic),
(0xFB50, 0xFDFF, Arabic),
(0xFE00, 0xFE0F, Variation),
(0xFE10, 0xFE1F, Vertical),
(0xFE20, 0xFE2F, Combining),
(0xFE30, 0xFE4F, CJK),
(0xFE50, 0xFE6F, Small),
(0xFE70, 0xFEFF, Arabic),
(0xFF00, 0xFFEF, Halfwidth),
(0xFFF0, 0xFFFF, Specials),
];
const PLANE0_LIMIT: char = '\u{10000}';
const SUPPLEMENTARY_TABLE: LookupTable<u32, Block> = lookup_table![
(0x010000, 0x0100FF, Linear),
(0x010100, 0x01013F, Aegean),
(0x010140, 0x0101CF, Ancient),
(0x0101D0, 0x0101FF, Phaistos),
(0x010280, 0x01029F, Lycian),
(0x0102A0, 0x0102DF, Carian),
(0x0102E0, 0x0102FF, Coptic),
(0x010300, 0x01032F, Old),
(0x010330, 0x01034F, Gothic),
(0x010350, 0x01037F, Old),
(0x010380, 0x01039F, Ugaritic),
(0x0103A0, 0x0103DF, Old),
(0x010400, 0x01044F, Deseret),
(0x010450, 0x01047F, Shavian),
(0x010480, 0x0104AF, Osmanya),
(0x0104B0, 0x0104FF, Osage),
(0x010500, 0x01052F, Elbasan),
(0x010530, 0x01056F, Caucasian),
(0x010600, 0x01077F, Linear),
(0x010800, 0x01083F, Cypriot),
(0x010840, 0x01085F, Imperial),
(0x010860, 0x01087F, Palmyrene),
(0x010880, 0x0108AF, Nabataean),
(0x0108E0, 0x0108FF, Hatran),
(0x010900, 0x01091F, Phoenician),
(0x010920, 0x01093F, Lydian),
(0x010980, 0x0109FF, Meroitic),
(0x010A00, 0x010A5F, Kharoshthi),
(0x010A60, 0x010A9F, Old),
(0x010AC0, 0x010AFF, Manichaean),
(0x010B00, 0x010B3F, Avestan),
(0x010B40, 0x010B7F, Inscriptional),
(0x010B80, 0x010BAF, Psalter),
(0x010C00, 0x010C4F, Old),
(0x010C80, 0x010CFF, Old),
(0x010D00, 0x010D3F, Hanifi),
(0x010E60, 0x010E7F, Rumi),
(0x010F00, 0x010F2F, Old),
(0x010F30, 0x010F6F, Sogdian),
(0x010FE0, 0x010FFF, Elymaic),
(0x011000, 0x01107F, Brahmi),
(0x011080, 0x0110CF, Kaithi),
(0x0110D0, 0x0110FF, Sora),
(0x011100, 0x01114F, Chakma),
(0x011150, 0x01117F, Mahajani),
(0x011180, 0x0111DF, Sharada),
(0x0111E0, 0x0111FF, Sinhala),
(0x011200, 0x01124F, Khojki),
(0x011280, 0x0112AF, Multani),
(0x0112B0, 0x0112FF, Khudawadi),
(0x011300, 0x01137F, Grantha),
(0x011400, 0x01147F, Newa),
(0x011480, 0x0114DF, Tirhuta),
(0x011580, 0x0115FF, Siddham),
(0x011600, 0x01165F, Modi),
(0x011660, 0x01167F, Mongolian),
(0x011680, 0x0116CF, Takri),
(0x011700, 0x01173F, Ahom),
(0x011800, 0x01184F, Dogra),
(0x0118A0, 0x0118FF, Warang),
(0x0119A0, 0x0119FF, Nandinagari),
(0x011A00, 0x011A4F, Zanabazar),
(0x011A50, 0x011AAF, Soyombo),
(0x011AC0, 0x011AFF, Pau),
(0x011C00, 0x011C6F, Bhaiksuki),
(0x011C70, 0x011CBF, Marchen),
(0x011D00, 0x011D5F, Masaram),
(0x011D60, 0x011DAF, Gunjala),
(0x011EE0, 0x011EFF, Makasar),
(0x011FC0, 0x011FFF, Tamil),
(0x012000, 0x01247F, Cuneiform),
(0x012480, 0x01254F, Early),
(0x013000, 0x01343F, Egyptian),
(0x014400, 0x01467F, Anatolian),
(0x016800, 0x016A3F, Bamum),
(0x016A40, 0x016A6F, Mro),
(0x016AD0, 0x016AFF, Bassa),
(0x016B00, 0x016B8F, Pahawh),
(0x016E40, 0x016E9F, Medefaidrin),
(0x016F00, 0x016F9F, Miao),
(0x016FE0, 0x016FFF, Ideographic),
(0x017000, 0x018AFF, Tangut),
(0x01B000, 0x01B12F, Kana),
(0x01B130, 0x01B16F, Small),
(0x01B170, 0x01B2FF, Nushu),
(0x01BC00, 0x01BC9F, Duployan),
(0x01BCA0, 0x01BCAF, Shorthand),
(0x01D000, 0x01D0FF, Byzantine),
(0x01D100, 0x01D1FF, Musical),
(0x01D200, 0x01D24F, Ancient),
(0x01D2E0, 0x01D2FF, Mayan),
(0x01D300, 0x01D35F, Tai),
(0x01D360, 0x01D37F, Counting),
(0x01D400, 0x01D7FF, Mathematical),
(0x01D800, 0x01DAAF, Sutton),
(0x01E000, 0x01E02F, Glagolitic),
(0x01E100, 0x01E14F, Nyiakeng),
(0x01E2C0, 0x01E2FF, Wancho),
(0x01E800, 0x01E8DF, Mende),
(0x01E900, 0x01E95F, Adlam),
(0x01EC70, 0x01ECBF, Indic),
(0x01ED00, 0x01ED4F, Ottoman),
(0x01EE00, 0x01EEFF, Arabic),
(0x01F000, 0x01F02F, Mahjong),
(0x01F030, 0x01F09F, Domino),
(0x01F0A0, 0x01F0FF, Playing),
(0x01F100, 0x01F2FF, Enclosed),
(0x01F300, 0x01F5FF, Miscellaneous),
(0x01F600, 0x01F64F, Emoticons),
(0x01F650, 0x01F67F, Ornamental),
(0x01F680, 0x01F6FF, Transport),
(0x01F700, 0x01F77F, Alchemical),
(0x01F780, 0x01F7FF, Geometric),
(0x01F800, 0x01F9FF, Supplemental),
(0x01FA00, 0x01FA6F, Chess),
(0x01FA70, 0x01FAFF, Symbols),
(0x020000, 0x02A6DF, CJK),
(0x02A700, 0x02EBEF, CJK),
(0x02F800, 0x02FA1F, CJK),
(0x0E0000, 0x0E007F, Tags),
(0x0E0100, 0x0E01EF, Variation),
(0x0F0000, 0x10FFFF, Supplementary),
]; | (0x1C80, 0x1C8F, Cyrillic),
(0x1C90, 0x1CBF, Georgian), | random_line_split |
block.rs | use crate::lookup_table::LookupTable;
use crate::properties::Block;
use Block::*;
impl From<char> for Block {
#[inline]
fn from(c: char) -> Self {
if c < ROW0_LIMIT {
return ROW0_TABLE.get_or(&(c as u8), No_Block);
}
if c < PLANE0_LIMIT {
return PLANE0_TABLE.get_or(&(c as u16), No_Block);
}
return SUPPLEMENTARY_TABLE.get_or(&(c as u32), No_Block);
}
}
#[test]
fn validate_tables() {
use std::convert::TryInto;
ROW0_TABLE.validate();
if let Ok(x) = (ROW0_LIMIT as u32).try_into() { assert!(!ROW0_TABLE.contains(&x)); }
PLANE0_TABLE.validate();
if let Ok(x) = (PLANE0_LIMIT as u32).try_into() |
SUPPLEMENTARY_TABLE.validate();
}
const ROW0_TABLE: LookupTable<u8, Block> = lookup_table![
(0x00, 0x7F, Basic),
];
const ROW0_LIMIT: char = '\u{80}';
const PLANE0_TABLE: LookupTable<u16, Block> = lookup_table![
(0x0080, 0x024F, Latin),
(0x0250, 0x02AF, IPA),
(0x02B0, 0x02FF, Spacing),
(0x0300, 0x036F, Combining),
(0x0370, 0x03FF, Greek),
(0x0400, 0x052F, Cyrillic),
(0x0530, 0x058F, Armenian),
(0x0590, 0x05FF, Hebrew),
(0x0600, 0x06FF, Arabic),
(0x0700, 0x074F, Syriac),
(0x0750, 0x077F, Arabic),
(0x0780, 0x07BF, Thaana),
(0x07C0, 0x07FF, NKo),
(0x0800, 0x083F, Samaritan),
(0x0840, 0x085F, Mandaic),
(0x0860, 0x086F, Syriac),
(0x08A0, 0x08FF, Arabic),
(0x0900, 0x097F, Devanagari),
(0x0980, 0x09FF, Bengali),
(0x0A00, 0x0A7F, Gurmukhi),
(0x0A80, 0x0AFF, Gujarati),
(0x0B00, 0x0B7F, Oriya),
(0x0B80, 0x0BFF, Tamil),
(0x0C00, 0x0C7F, Telugu),
(0x0C80, 0x0CFF, Kannada),
(0x0D00, 0x0D7F, Malayalam),
(0x0D80, 0x0DFF, Sinhala),
(0x0E00, 0x0E7F, Thai),
(0x0E80, 0x0EFF, Lao),
(0x0F00, 0x0FFF, Tibetan),
(0x1000, 0x109F, Myanmar),
(0x10A0, 0x10FF, Georgian),
(0x1100, 0x11FF, Hangul),
(0x1200, 0x139F, Ethiopic),
(0x13A0, 0x13FF, Cherokee),
(0x1400, 0x167F, Unified),
(0x1680, 0x169F, Ogham),
(0x16A0, 0x16FF, Runic),
(0x1700, 0x171F, Tagalog),
(0x1720, 0x173F, Hanunoo),
(0x1740, 0x175F, Buhid),
(0x1760, 0x177F, Tagbanwa),
(0x1780, 0x17FF, Khmer),
(0x1800, 0x18AF, Mongolian),
(0x18B0, 0x18FF, Unified),
(0x1900, 0x194F, Limbu),
(0x1950, 0x197F, Tai),
(0x1980, 0x19DF, New),
(0x19E0, 0x19FF, Khmer),
(0x1A00, 0x1A1F, Buginese),
(0x1A20, 0x1AAF, Tai),
(0x1AB0, 0x1AFF, Combining),
(0x1B00, 0x1B7F, Balinese),
(0x1B80, 0x1BBF, Sundanese),
(0x1BC0, 0x1BFF, Batak),
(0x1C00, 0x1C4F, Lepcha),
(0x1C50, 0x1C7F, Ol),
(0x1C80, 0x1C8F, Cyrillic),
(0x1C90, 0x1CBF, Georgian),
(0x1CC0, 0x1CCF, Sundanese),
(0x1CD0, 0x1CFF, Vedic),
(0x1D00, 0x1DBF, Phonetic),
(0x1DC0, 0x1DFF, Combining),
(0x1E00, 0x1EFF, Latin),
(0x1F00, 0x1FFF, Greek),
(0x2000, 0x206F, General),
(0x2070, 0x209F, Superscripts),
(0x20A0, 0x20CF, Currency),
(0x20D0, 0x20FF, Combining),
(0x2100, 0x214F, Letterlike),
(0x2150, 0x218F, Number),
(0x2190, 0x21FF, Arrows),
(0x2200, 0x22FF, Mathematical),
(0x2300, 0x23FF, Miscellaneous),
(0x2400, 0x243F, Control),
(0x2440, 0x245F, Optical),
(0x2460, 0x24FF, Enclosed),
(0x2500, 0x257F, Box),
(0x2580, 0x259F, Block),
(0x25A0, 0x25FF, Geometric),
(0x2600, 0x26FF, Miscellaneous),
(0x2700, 0x27BF, Dingbats),
(0x27C0, 0x27EF, Miscellaneous),
(0x27F0, 0x27FF, Supplemental),
(0x2800, 0x28FF, Braille),
(0x2900, 0x297F, Supplemental),
(0x2980, 0x29FF, Miscellaneous),
(0x2A00, 0x2AFF, Supplemental),
(0x2B00, 0x2BFF, Miscellaneous),
(0x2C00, 0x2C5F, Glagolitic),
(0x2C60, 0x2C7F, Latin),
(0x2C80, 0x2CFF, Coptic),
(0x2D00, 0x2D2F, Georgian),
(0x2D30, 0x2D7F, Tifinagh),
(0x2D80, 0x2DDF, Ethiopic),
(0x2DE0, 0x2DFF, Cyrillic),
(0x2E00, 0x2E7F, Supplemental),
(0x2E80, 0x2EFF, CJK),
(0x2F00, 0x2FDF, Kangxi),
(0x2FF0, 0x2FFF, Ideographic),
(0x3000, 0x303F, CJK),
(0x3040, 0x309F, Hiragana),
(0x30A0, 0x30FF, Katakana),
(0x3100, 0x312F, Bopomofo),
(0x3130, 0x318F, Hangul),
(0x3190, 0x319F, Kanbun),
(0x31A0, 0x31BF, Bopomofo),
(0x31C0, 0x31EF, CJK),
(0x31F0, 0x31FF, Katakana),
(0x3200, 0x32FF, Enclosed),
(0x3300, 0x4DBF, CJK),
(0x4DC0, 0x4DFF, Yijing),
(0x4E00, 0x9FFF, CJK),
(0xA000, 0xA4CF, Yi),
(0xA4D0, 0xA4FF, Lisu),
(0xA500, 0xA63F, Vai),
(0xA640, 0xA69F, Cyrillic),
(0xA6A0, 0xA6FF, Bamum),
(0xA700, 0xA71F, Modifier),
(0xA720, 0xA7FF, Latin),
(0xA800, 0xA82F, Syloti),
(0xA830, 0xA83F, Common),
(0xA840, 0xA87F, Phags),
(0xA880, 0xA8DF, Saurashtra),
(0xA8E0, 0xA8FF, Devanagari),
(0xA900, 0xA92F, Kayah),
(0xA930, 0xA95F, Rejang),
(0xA960, 0xA97F, Hangul),
(0xA980, 0xA9DF, Javanese),
(0xA9E0, 0xA9FF, Myanmar),
(0xAA00, 0xAA5F, Cham),
(0xAA60, 0xAA7F, Myanmar),
(0xAA80, 0xAADF, Tai),
(0xAAE0, 0xAAFF, Meetei),
(0xAB00, 0xAB2F, Ethiopic),
(0xAB30, 0xAB6F, Latin),
(0xAB70, 0xABBF, Cherokee),
(0xABC0, 0xABFF, Meetei),
(0xAC00, 0xD7FF, Hangul),
(0xD800, 0xDBFF, High),
(0xDC00, 0xDFFF, Low),
(0xE000, 0xF8FF, Private),
(0xF900, 0xFAFF, CJK),
(0xFB00, 0xFB4F, Alphabetic),
(0xFB50, 0xFDFF, Arabic),
(0xFE00, 0xFE0F, Variation),
(0xFE10, 0xFE1F, Vertical),
(0xFE20, 0xFE2F, Combining),
(0xFE30, 0xFE4F, CJK),
(0xFE50, 0xFE6F, Small),
(0xFE70, 0xFEFF, Arabic),
(0xFF00, 0xFFEF, Halfwidth),
(0xFFF0, 0xFFFF, Specials),
];
const PLANE0_LIMIT: char = '\u{10000}';
const SUPPLEMENTARY_TABLE: LookupTable<u32, Block> = lookup_table![
(0x010000, 0x0100FF, Linear),
(0x010100, 0x01013F, Aegean),
(0x010140, 0x0101CF, Ancient),
(0x0101D0, 0x0101FF, Phaistos),
(0x010280, 0x01029F, Lycian),
(0x0102A0, 0x0102DF, Carian),
(0x0102E0, 0x0102FF, Coptic),
(0x010300, 0x01032F, Old),
(0x010330, 0x01034F, Gothic),
(0x010350, 0x01037F, Old),
(0x010380, 0x01039F, Ugaritic),
(0x0103A0, 0x0103DF, Old),
(0x010400, 0x01044F, Deseret),
(0x010450, 0x01047F, Shavian),
(0x010480, 0x0104AF, Osmanya),
(0x0104B0, 0x0104FF, Osage),
(0x010500, 0x01052F, Elbasan),
(0x010530, 0x01056F, Caucasian),
(0x010600, 0x01077F, Linear),
(0x010800, 0x01083F, Cypriot),
(0x010840, 0x01085F, Imperial),
(0x010860, 0x01087F, Palmyrene),
(0x010880, 0x0108AF, Nabataean),
(0x0108E0, 0x0108FF, Hatran),
(0x010900, 0x01091F, Phoenician),
(0x010920, 0x01093F, Lydian),
(0x010980, 0x0109FF, Meroitic),
(0x010A00, 0x010A5F, Kharoshthi),
(0x010A60, 0x010A9F, Old),
(0x010AC0, 0x010AFF, Manichaean),
(0x010B00, 0x010B3F, Avestan),
(0x010B40, 0x010B7F, Inscriptional),
(0x010B80, 0x010BAF, Psalter),
(0x010C00, 0x010C4F, Old),
(0x010C80, 0x010CFF, Old),
(0x010D00, 0x010D3F, Hanifi),
(0x010E60, 0x010E7F, Rumi),
(0x010F00, 0x010F2F, Old),
(0x010F30, 0x010F6F, Sogdian),
(0x010FE0, 0x010FFF, Elymaic),
(0x011000, 0x01107F, Brahmi),
(0x011080, 0x0110CF, Kaithi),
(0x0110D0, 0x0110FF, Sora),
(0x011100, 0x01114F, Chakma),
(0x011150, 0x01117F, Mahajani),
(0x011180, 0x0111DF, Sharada),
(0x0111E0, 0x0111FF, Sinhala),
(0x011200, 0x01124F, Khojki),
(0x011280, 0x0112AF, Multani),
(0x0112B0, 0x0112FF, Khudawadi),
(0x011300, 0x01137F, Grantha),
(0x011400, 0x01147F, Newa),
(0x011480, 0x0114DF, Tirhuta),
(0x011580, 0x0115FF, Siddham),
(0x011600, 0x01165F, Modi),
(0x011660, 0x01167F, Mongolian),
(0x011680, 0x0116CF, Takri),
(0x011700, 0x01173F, Ahom),
(0x011800, 0x01184F, Dogra),
(0x0118A0, 0x0118FF, Warang),
(0x0119A0, 0x0119FF, Nandinagari),
(0x011A00, 0x011A4F, Zanabazar),
(0x011A50, 0x011AAF, Soyombo),
(0x011AC0, 0x011AFF, Pau),
(0x011C00, 0x011C6F, Bhaiksuki),
(0x011C70, 0x011CBF, Marchen),
(0x011D00, 0x011D5F, Masaram),
(0x011D60, 0x011DAF, Gunjala),
(0x011EE0, 0x011EFF, Makasar),
(0x011FC0, 0x011FFF, Tamil),
(0x012000, 0x01247F, Cuneiform),
(0x012480, 0x01254F, Early),
(0x013000, 0x01343F, Egyptian),
(0x014400, 0x01467F, Anatolian),
(0x016800, 0x016A3F, Bamum),
(0x016A40, 0x016A6F, Mro),
(0x016AD0, 0x016AFF, Bassa),
(0x016B00, 0x016B8F, Pahawh),
(0x016E40, 0x016E9F, Medefaidrin),
(0x016F00, 0x016F9F, Miao),
(0x016FE0, 0x016FFF, Ideographic),
(0x017000, 0x018AFF, Tangut),
(0x01B000, 0x01B12F, Kana),
(0x01B130, 0x01B16F, Small),
(0x01B170, 0x01B2FF, Nushu),
(0x01BC00, 0x01BC9F, Duployan),
(0x01BCA0, 0x01BCAF, Shorthand),
(0x01D000, 0x01D0FF, Byzantine),
(0x01D100, 0x01D1FF, Musical),
(0x01D200, 0x01D24F, Ancient),
(0x01D2E0, 0x01D2FF, Mayan),
(0x01D300, 0x01D35F, Tai),
(0x01D360, 0x01D37F, Counting),
(0x01D400, 0x01D7FF, Mathematical),
(0x01D800, 0x01DAAF, Sutton),
(0x01E000, 0x01E02F, Glagolitic),
(0x01E100, 0x01E14F, Nyiakeng),
(0x01E2C0, 0x01E2FF, Wancho),
(0x01E800, 0x01E8DF, Mende),
(0x01E900, 0x01E95F, Adlam),
(0x01EC70, 0x01ECBF, Indic),
(0x01ED00, 0x01ED4F, Ottoman),
(0x01EE00, 0x01EEFF, Arabic),
(0x01F000, 0x01F02F, Mahjong),
(0x01F030, 0x01F09F, Domino),
(0x01F0A0, 0x01F0FF, Playing),
(0x01F100, 0x01F2FF, Enclosed),
(0x01F300, 0x01F5FF, Miscellaneous),
(0x01F600, 0x01F64F, Emoticons),
(0x01F650, 0x01F67F, Ornamental),
(0x01F680, 0x01F6FF, Transport),
(0x01F700, 0x01F77F, Alchemical),
(0x01F780, 0x01F7FF, Geometric),
(0x01F800, 0x01F9FF, Supplemental),
(0x01FA00, 0x01FA6F, Chess),
(0x01FA70, 0x01FAFF, Symbols),
(0x020000, 0x02A6DF, CJK),
(0x02A700, 0x02EBEF, CJK),
(0x02F800, 0x02FA1F, CJK),
(0x0E0000, 0x0E007F, Tags),
(0x0E0100, 0x0E01EF, Variation),
(0x0F0000, 0x10FFFF, Supplementary),
];
| { assert!(!PLANE0_TABLE.contains(&x)); } | conditional_block |
block.rs | use crate::lookup_table::LookupTable;
use crate::properties::Block;
use Block::*;
impl From<char> for Block {
#[inline]
fn from(c: char) -> Self {
if c < ROW0_LIMIT {
return ROW0_TABLE.get_or(&(c as u8), No_Block);
}
if c < PLANE0_LIMIT {
return PLANE0_TABLE.get_or(&(c as u16), No_Block);
}
return SUPPLEMENTARY_TABLE.get_or(&(c as u32), No_Block);
}
}
#[test]
fn | () {
use std::convert::TryInto;
ROW0_TABLE.validate();
if let Ok(x) = (ROW0_LIMIT as u32).try_into() { assert!(!ROW0_TABLE.contains(&x)); }
PLANE0_TABLE.validate();
if let Ok(x) = (PLANE0_LIMIT as u32).try_into() { assert!(!PLANE0_TABLE.contains(&x)); }
SUPPLEMENTARY_TABLE.validate();
}
const ROW0_TABLE: LookupTable<u8, Block> = lookup_table![
(0x00, 0x7F, Basic),
];
const ROW0_LIMIT: char = '\u{80}';
const PLANE0_TABLE: LookupTable<u16, Block> = lookup_table![
(0x0080, 0x024F, Latin),
(0x0250, 0x02AF, IPA),
(0x02B0, 0x02FF, Spacing),
(0x0300, 0x036F, Combining),
(0x0370, 0x03FF, Greek),
(0x0400, 0x052F, Cyrillic),
(0x0530, 0x058F, Armenian),
(0x0590, 0x05FF, Hebrew),
(0x0600, 0x06FF, Arabic),
(0x0700, 0x074F, Syriac),
(0x0750, 0x077F, Arabic),
(0x0780, 0x07BF, Thaana),
(0x07C0, 0x07FF, NKo),
(0x0800, 0x083F, Samaritan),
(0x0840, 0x085F, Mandaic),
(0x0860, 0x086F, Syriac),
(0x08A0, 0x08FF, Arabic),
(0x0900, 0x097F, Devanagari),
(0x0980, 0x09FF, Bengali),
(0x0A00, 0x0A7F, Gurmukhi),
(0x0A80, 0x0AFF, Gujarati),
(0x0B00, 0x0B7F, Oriya),
(0x0B80, 0x0BFF, Tamil),
(0x0C00, 0x0C7F, Telugu),
(0x0C80, 0x0CFF, Kannada),
(0x0D00, 0x0D7F, Malayalam),
(0x0D80, 0x0DFF, Sinhala),
(0x0E00, 0x0E7F, Thai),
(0x0E80, 0x0EFF, Lao),
(0x0F00, 0x0FFF, Tibetan),
(0x1000, 0x109F, Myanmar),
(0x10A0, 0x10FF, Georgian),
(0x1100, 0x11FF, Hangul),
(0x1200, 0x139F, Ethiopic),
(0x13A0, 0x13FF, Cherokee),
(0x1400, 0x167F, Unified),
(0x1680, 0x169F, Ogham),
(0x16A0, 0x16FF, Runic),
(0x1700, 0x171F, Tagalog),
(0x1720, 0x173F, Hanunoo),
(0x1740, 0x175F, Buhid),
(0x1760, 0x177F, Tagbanwa),
(0x1780, 0x17FF, Khmer),
(0x1800, 0x18AF, Mongolian),
(0x18B0, 0x18FF, Unified),
(0x1900, 0x194F, Limbu),
(0x1950, 0x197F, Tai),
(0x1980, 0x19DF, New),
(0x19E0, 0x19FF, Khmer),
(0x1A00, 0x1A1F, Buginese),
(0x1A20, 0x1AAF, Tai),
(0x1AB0, 0x1AFF, Combining),
(0x1B00, 0x1B7F, Balinese),
(0x1B80, 0x1BBF, Sundanese),
(0x1BC0, 0x1BFF, Batak),
(0x1C00, 0x1C4F, Lepcha),
(0x1C50, 0x1C7F, Ol),
(0x1C80, 0x1C8F, Cyrillic),
(0x1C90, 0x1CBF, Georgian),
(0x1CC0, 0x1CCF, Sundanese),
(0x1CD0, 0x1CFF, Vedic),
(0x1D00, 0x1DBF, Phonetic),
(0x1DC0, 0x1DFF, Combining),
(0x1E00, 0x1EFF, Latin),
(0x1F00, 0x1FFF, Greek),
(0x2000, 0x206F, General),
(0x2070, 0x209F, Superscripts),
(0x20A0, 0x20CF, Currency),
(0x20D0, 0x20FF, Combining),
(0x2100, 0x214F, Letterlike),
(0x2150, 0x218F, Number),
(0x2190, 0x21FF, Arrows),
(0x2200, 0x22FF, Mathematical),
(0x2300, 0x23FF, Miscellaneous),
(0x2400, 0x243F, Control),
(0x2440, 0x245F, Optical),
(0x2460, 0x24FF, Enclosed),
(0x2500, 0x257F, Box),
(0x2580, 0x259F, Block),
(0x25A0, 0x25FF, Geometric),
(0x2600, 0x26FF, Miscellaneous),
(0x2700, 0x27BF, Dingbats),
(0x27C0, 0x27EF, Miscellaneous),
(0x27F0, 0x27FF, Supplemental),
(0x2800, 0x28FF, Braille),
(0x2900, 0x297F, Supplemental),
(0x2980, 0x29FF, Miscellaneous),
(0x2A00, 0x2AFF, Supplemental),
(0x2B00, 0x2BFF, Miscellaneous),
(0x2C00, 0x2C5F, Glagolitic),
(0x2C60, 0x2C7F, Latin),
(0x2C80, 0x2CFF, Coptic),
(0x2D00, 0x2D2F, Georgian),
(0x2D30, 0x2D7F, Tifinagh),
(0x2D80, 0x2DDF, Ethiopic),
(0x2DE0, 0x2DFF, Cyrillic),
(0x2E00, 0x2E7F, Supplemental),
(0x2E80, 0x2EFF, CJK),
(0x2F00, 0x2FDF, Kangxi),
(0x2FF0, 0x2FFF, Ideographic),
(0x3000, 0x303F, CJK),
(0x3040, 0x309F, Hiragana),
(0x30A0, 0x30FF, Katakana),
(0x3100, 0x312F, Bopomofo),
(0x3130, 0x318F, Hangul),
(0x3190, 0x319F, Kanbun),
(0x31A0, 0x31BF, Bopomofo),
(0x31C0, 0x31EF, CJK),
(0x31F0, 0x31FF, Katakana),
(0x3200, 0x32FF, Enclosed),
(0x3300, 0x4DBF, CJK),
(0x4DC0, 0x4DFF, Yijing),
(0x4E00, 0x9FFF, CJK),
(0xA000, 0xA4CF, Yi),
(0xA4D0, 0xA4FF, Lisu),
(0xA500, 0xA63F, Vai),
(0xA640, 0xA69F, Cyrillic),
(0xA6A0, 0xA6FF, Bamum),
(0xA700, 0xA71F, Modifier),
(0xA720, 0xA7FF, Latin),
(0xA800, 0xA82F, Syloti),
(0xA830, 0xA83F, Common),
(0xA840, 0xA87F, Phags),
(0xA880, 0xA8DF, Saurashtra),
(0xA8E0, 0xA8FF, Devanagari),
(0xA900, 0xA92F, Kayah),
(0xA930, 0xA95F, Rejang),
(0xA960, 0xA97F, Hangul),
(0xA980, 0xA9DF, Javanese),
(0xA9E0, 0xA9FF, Myanmar),
(0xAA00, 0xAA5F, Cham),
(0xAA60, 0xAA7F, Myanmar),
(0xAA80, 0xAADF, Tai),
(0xAAE0, 0xAAFF, Meetei),
(0xAB00, 0xAB2F, Ethiopic),
(0xAB30, 0xAB6F, Latin),
(0xAB70, 0xABBF, Cherokee),
(0xABC0, 0xABFF, Meetei),
(0xAC00, 0xD7FF, Hangul),
(0xD800, 0xDBFF, High),
(0xDC00, 0xDFFF, Low),
(0xE000, 0xF8FF, Private),
(0xF900, 0xFAFF, CJK),
(0xFB00, 0xFB4F, Alphabetic),
(0xFB50, 0xFDFF, Arabic),
(0xFE00, 0xFE0F, Variation),
(0xFE10, 0xFE1F, Vertical),
(0xFE20, 0xFE2F, Combining),
(0xFE30, 0xFE4F, CJK),
(0xFE50, 0xFE6F, Small),
(0xFE70, 0xFEFF, Arabic),
(0xFF00, 0xFFEF, Halfwidth),
(0xFFF0, 0xFFFF, Specials),
];
const PLANE0_LIMIT: char = '\u{10000}';
const SUPPLEMENTARY_TABLE: LookupTable<u32, Block> = lookup_table![
(0x010000, 0x0100FF, Linear),
(0x010100, 0x01013F, Aegean),
(0x010140, 0x0101CF, Ancient),
(0x0101D0, 0x0101FF, Phaistos),
(0x010280, 0x01029F, Lycian),
(0x0102A0, 0x0102DF, Carian),
(0x0102E0, 0x0102FF, Coptic),
(0x010300, 0x01032F, Old),
(0x010330, 0x01034F, Gothic),
(0x010350, 0x01037F, Old),
(0x010380, 0x01039F, Ugaritic),
(0x0103A0, 0x0103DF, Old),
(0x010400, 0x01044F, Deseret),
(0x010450, 0x01047F, Shavian),
(0x010480, 0x0104AF, Osmanya),
(0x0104B0, 0x0104FF, Osage),
(0x010500, 0x01052F, Elbasan),
(0x010530, 0x01056F, Caucasian),
(0x010600, 0x01077F, Linear),
(0x010800, 0x01083F, Cypriot),
(0x010840, 0x01085F, Imperial),
(0x010860, 0x01087F, Palmyrene),
(0x010880, 0x0108AF, Nabataean),
(0x0108E0, 0x0108FF, Hatran),
(0x010900, 0x01091F, Phoenician),
(0x010920, 0x01093F, Lydian),
(0x010980, 0x0109FF, Meroitic),
(0x010A00, 0x010A5F, Kharoshthi),
(0x010A60, 0x010A9F, Old),
(0x010AC0, 0x010AFF, Manichaean),
(0x010B00, 0x010B3F, Avestan),
(0x010B40, 0x010B7F, Inscriptional),
(0x010B80, 0x010BAF, Psalter),
(0x010C00, 0x010C4F, Old),
(0x010C80, 0x010CFF, Old),
(0x010D00, 0x010D3F, Hanifi),
(0x010E60, 0x010E7F, Rumi),
(0x010F00, 0x010F2F, Old),
(0x010F30, 0x010F6F, Sogdian),
(0x010FE0, 0x010FFF, Elymaic),
(0x011000, 0x01107F, Brahmi),
(0x011080, 0x0110CF, Kaithi),
(0x0110D0, 0x0110FF, Sora),
(0x011100, 0x01114F, Chakma),
(0x011150, 0x01117F, Mahajani),
(0x011180, 0x0111DF, Sharada),
(0x0111E0, 0x0111FF, Sinhala),
(0x011200, 0x01124F, Khojki),
(0x011280, 0x0112AF, Multani),
(0x0112B0, 0x0112FF, Khudawadi),
(0x011300, 0x01137F, Grantha),
(0x011400, 0x01147F, Newa),
(0x011480, 0x0114DF, Tirhuta),
(0x011580, 0x0115FF, Siddham),
(0x011600, 0x01165F, Modi),
(0x011660, 0x01167F, Mongolian),
(0x011680, 0x0116CF, Takri),
(0x011700, 0x01173F, Ahom),
(0x011800, 0x01184F, Dogra),
(0x0118A0, 0x0118FF, Warang),
(0x0119A0, 0x0119FF, Nandinagari),
(0x011A00, 0x011A4F, Zanabazar),
(0x011A50, 0x011AAF, Soyombo),
(0x011AC0, 0x011AFF, Pau),
(0x011C00, 0x011C6F, Bhaiksuki),
(0x011C70, 0x011CBF, Marchen),
(0x011D00, 0x011D5F, Masaram),
(0x011D60, 0x011DAF, Gunjala),
(0x011EE0, 0x011EFF, Makasar),
(0x011FC0, 0x011FFF, Tamil),
(0x012000, 0x01247F, Cuneiform),
(0x012480, 0x01254F, Early),
(0x013000, 0x01343F, Egyptian),
(0x014400, 0x01467F, Anatolian),
(0x016800, 0x016A3F, Bamum),
(0x016A40, 0x016A6F, Mro),
(0x016AD0, 0x016AFF, Bassa),
(0x016B00, 0x016B8F, Pahawh),
(0x016E40, 0x016E9F, Medefaidrin),
(0x016F00, 0x016F9F, Miao),
(0x016FE0, 0x016FFF, Ideographic),
(0x017000, 0x018AFF, Tangut),
(0x01B000, 0x01B12F, Kana),
(0x01B130, 0x01B16F, Small),
(0x01B170, 0x01B2FF, Nushu),
(0x01BC00, 0x01BC9F, Duployan),
(0x01BCA0, 0x01BCAF, Shorthand),
(0x01D000, 0x01D0FF, Byzantine),
(0x01D100, 0x01D1FF, Musical),
(0x01D200, 0x01D24F, Ancient),
(0x01D2E0, 0x01D2FF, Mayan),
(0x01D300, 0x01D35F, Tai),
(0x01D360, 0x01D37F, Counting),
(0x01D400, 0x01D7FF, Mathematical),
(0x01D800, 0x01DAAF, Sutton),
(0x01E000, 0x01E02F, Glagolitic),
(0x01E100, 0x01E14F, Nyiakeng),
(0x01E2C0, 0x01E2FF, Wancho),
(0x01E800, 0x01E8DF, Mende),
(0x01E900, 0x01E95F, Adlam),
(0x01EC70, 0x01ECBF, Indic),
(0x01ED00, 0x01ED4F, Ottoman),
(0x01EE00, 0x01EEFF, Arabic),
(0x01F000, 0x01F02F, Mahjong),
(0x01F030, 0x01F09F, Domino),
(0x01F0A0, 0x01F0FF, Playing),
(0x01F100, 0x01F2FF, Enclosed),
(0x01F300, 0x01F5FF, Miscellaneous),
(0x01F600, 0x01F64F, Emoticons),
(0x01F650, 0x01F67F, Ornamental),
(0x01F680, 0x01F6FF, Transport),
(0x01F700, 0x01F77F, Alchemical),
(0x01F780, 0x01F7FF, Geometric),
(0x01F800, 0x01F9FF, Supplemental),
(0x01FA00, 0x01FA6F, Chess),
(0x01FA70, 0x01FAFF, Symbols),
(0x020000, 0x02A6DF, CJK),
(0x02A700, 0x02EBEF, CJK),
(0x02F800, 0x02FA1F, CJK),
(0x0E0000, 0x0E007F, Tags),
(0x0E0100, 0x0E01EF, Variation),
(0x0F0000, 0x10FFFF, Supplementary),
];
| validate_tables | identifier_name |
drawing.go | package gui
import (
"korok.io/korok/math/f32"
"korok.io/korok/math"
"korok.io/korok/gfx/bk"
"korok.io/korok/gfx/font"
)
type DrawListFlags uint32
const (
FlagAntiAliasedLine DrawListFlags = iota
FlagAntiAliasedFill
)
// Rounding corner:
// A: 0x0000 0001 top-left
// B: 0x0000 0002 top-right
// C: 0x0000 0004 down-right
// D: 0x0000 0008 down-left
type FlagCorner uint32
const (
FlagCornerNone FlagCorner = 0x0000
FlagCornerTopLeft = 0x0001
FlagCornerTopRight = 0x0002
FlagCornerBottomRight = 0x0004
FlagCornerBottomLeft = 0x0008
FlagCornerAll = 0x000F
)
type Align uint32
const (
AlignCenter Align = iota
AlignLeft = 1 << iota
AlignRight = 1 << iota
AlignTop = 1 << iota
AlignBottom = 1 << iota
)
const (
DefaultZOrder = int16(0xFFFF>>1-100)
)
// DrawList provide method to write primitives to buffer
type DrawCmd struct {
FirstIndex uint16
ElemCount uint16
ClipRect f32.Vec4
TextureId uint16
zOrder int16
}
type DrawIdx uint16
type DrawVert struct {
xy f32.Vec2
uv f32.Vec2
color uint32
}
type DrawList struct {
CmdBuffer []DrawCmd
IdxBuffer []DrawIdx
VtxBuffer []DrawVert
cmdIndex, idxIndex, vtxIndex int
cmdCap, idxCap, vtxCap int
// Data *DrawListSharedData
OwnerName string // 窗口名
VtxCurrentIdx int // VtxBuffer.Size
// 指向当前正在使用的 cmdbuffer 的位置
VtxWriter []DrawVert
IdxWriter []DrawIdx
ClipRectStack[]f32.Vec4
TextureIdStack []uint16
// path
path [64]f32.Vec2
pathUsed int
FullScreen f32.Vec4
TexUVWhitePixel f32.Vec2
CircleVtx12 [12]f32.Vec2
Font font.Font
FontSize float32
Flags DrawListFlags
ZOrder int16
}
func NewDrawList() *DrawList {
dl := &DrawList{}
dl.Initialize()
return dl
}
func (dl *DrawList) Initialize() {
dl.CmdBuffer = make([]DrawCmd, 1024)
dl.IdxBuffer = make([]DrawIdx, 2024)
dl.VtxBuffer = make([]DrawVert, 2024)
// TODO
dl.TexUVWhitePixel = f32.Vec2{0, 0}
// TODO bake circle vertex!!
for i := 0; i < 12; i++ {
sin := math.Sin((6.28/12)*float32(i))
cos := math.Cos((6.28/12)*float32(i))
dl.CircleVtx12[i] = f32.Vec2{cos, sin}
}
dl.ZOrder = DefaultZOrder
dl.cmdIndex = 1 // skip first one
}
func (dl *DrawList) Empty() bool {
return dl.vtxIndex == 0 || dl.idxIndex == 0
}
func (dl *DrawList) Size() (idx, vdx int) {
idx = dl.idxIndex
vdx = dl.vtxIndex
return
}
// TODO
func (dl *DrawList) Clear() {
dl.cmdIndex = 1
dl.idxIndex = 0
dl.vtxIndex = 0
}
func (dl *DrawList) PathClear() {
dl.pathUsed = 0
}
func (dl *DrawList) PathLineTo(pos f32.Vec2) {
if n := len(dl.path); dl.pathUsed < n-1 {
dl.path[dl.pathUsed] = pos
dl.pathUsed += 1
}
}
func (dl *DrawList) PathLineToMergeDuplicate(pos f32.Vec2) {
//if (_Path.Size == 0 || memcmp(&_Path[_Path.Size-1], &pos, 8) != 0)
// _Path.push_back(pos);
}
func (dl *DrawList) PathFillConvex(col uint32) {
dl.AddConvexPolyFilled(dl.path[:dl.pathUsed], col);
dl.pathUsed = 0
}
// default: thickness=1.0
func (dl *DrawList) PathStroke(color uint32, thickness float32, closed bool) {
dl.AddPolyLine(dl.path[:dl.pathUsed], color, thickness, closed)
dl.PathClear()
}
func (dl *DrawList) CurrentClipRect() (clip f32.Vec4) {
if n := len(dl.ClipRectStack); n > 0 {
clip = dl.ClipRectStack[n-1]
} else {
clip = dl.FullScreen
}
return
}
func (dl *DrawList) CurrentTextureId() (id uint16) {
if n := len(dl.TextureIdStack); n > 0 {
id = dl.TextureIdStack[n-1]
}
return
}
// will result in new draw-call
func (dl *DrawList) UpdateClipRect() {
//clip := dl.CurrentClipRect()
}
func (dl *DrawList) UpdateTextureId() {
}
// Clip 相关的操作
func (dl *DrawList) PushClipRect(min, max f32.Vec2, intersectCurrentClip bool) {
cr := f32.Vec4{min[0], min[1], max[0], max[1]}
if intersectCurrentClip && len(dl.ClipRectStack) > 0{
current := dl.ClipRectStack[len(dl.ClipRectStack)-1]
if cr[0] < current[0] {
cr[0] = current[0]
}
if cr[1] < current[1] {
cr[1] = current[1]
}
if cr[2] > current[2] {
cr[2] = current[2]
}
if cr[3] > current[3] {
cr[3] = current[3]
}
cr[2] = math.Max(cr[0], cr[2])
cr[3] = math.Max(cr[1], cr[3])
dl.ClipRectStack = append(dl.ClipRectStack, cr)
dl.UpdateClipRect()
}
}
func (dl *DrawList) PushClipRectFullScreen() {
min := f32.Vec2{dl.FullScreen[0], dl.FullScreen[1]}
max := f32.Vec2{dl.FullScreen[2], dl.FullScreen[3]}
dl.PushClipRect(min, max, false)
}
func (dl *DrawList) PopClipRect() {
if n := len(dl.ClipRectStack); n > 0 {
dl.ClipRectStack = dl.ClipRectStack[:n-1]
}
}
func (dl *DrawList) GetClipRectMin() f32.Vec2 {
return f32.Vec2{0, 0 }
}
func (dl *DrawList) GetClipRectMax() f32.Vec2 {
return f32.Vec2{0, 0 }
}
func (dl *DrawList) PushTextureId(texId uint16) {
dl.TextureIdStack = append(dl.TextureIdStack, texId)
}
func (dl *DrawList) PopTextureId() {
if n := len(dl.TextureIdStack); n > 0 {
dl.TextureIdStack = dl.TextureIdStack[:n-1]
}
}
// primitive operation, auto scale by 1024
func (dl *DrawList) PrimReserve(idxCount, vtxCount int) {
if sz, require := len(dl.VtxBuffer), dl.vtxIndex+vtxCount; require >= sz {
vtxBuffer := make([]DrawVert, sz+1024)
copy(vtxBuffer, dl.VtxBuffer)
dl.VtxBuffer = vtxBuffer
}
if sz, require := len(dl.IdxBuffer), dl.idxIndex+idxCount; require >= sz {
idxBuffer := make([]DrawIdx, sz+1024)
copy(idxBuffer, dl.IdxBuffer)
dl.IdxBuffer = idxBuffer
}
dl.VtxWriter = dl.VtxBuffer[dl.vtxIndex:dl.vtxIndex+vtxCount]
dl.IdxWriter = dl.IdxBuffer[dl.idxIndex:dl.idxIndex+idxCount]
}
func (dl *DrawList) PrimRect(min, max f32.Vec2, color uint32) {
uv := dl.TexUVWhitePixel
a, b, c, d := min, f32.Vec2{max[0], min[1]}, max, f32.Vec2{min[0], max[1]}
dl.VtxWriter[0] = DrawVert{a, uv, color}
dl.VtxWriter[1] = DrawVert{b, uv, color}
dl.VtxWriter[2] = DrawVert{c, uv, color}
dl.VtxWriter[3] = DrawVert{d, uv, color}
dl.IdxWriter[0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[1] = DrawIdx(dl.vtxIndex+1)
dl.IdxWriter[2] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[3] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[4] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[5] = DrawIdx(dl.vtxIndex+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
func (dl *DrawList) PrimRectUV(a, c f32.Vec2, uva, uvc f32.Vec2, color uint32) {
b, d := f32.Vec2{c[0], a[1]}, f32.Vec2{a[0], c[1]}
uvb, uvd := f32.Vec2{uvc[0], uva[1]}, f32.Vec2{uva[0], uvc[1]}
dl.VtxWriter[0] = DrawVert{a, uva, color}
dl.VtxWriter[1] = DrawVert{b, uvb, color}
dl.VtxWriter[2] = DrawVert{c, uvc, color}
dl.VtxWriter[3] = DrawVert{d, uvd, color}
ii := dl.vtxIndex
dl.IdxWriter[0] = DrawIdx(ii+0)
dl.IdxWriter[1] = DrawIdx(ii+1)
dl.IdxWriter[2] = DrawIdx(ii+2)
dl.IdxWriter[3] = DrawIdx(ii+0)
dl.IdxWriter[4] = DrawIdx(ii+2)
dl.IdxWriter[5] = DrawIdx(ii+3)
dl.idxIndex += 6
dl.vtxIndex += 4
}
func (dl *DrawList) PrimQuadUV(a, b, c, d f32.Vec2, uva, uvb,uvc, uvd f32.Vec2, color uint32) {
// vertex
dl.VtxWriter[0] = DrawVert{a, uva, color}
dl.VtxWriter[1] = DrawVert{b, uvb, color}
dl.VtxWriter[2] = DrawVert{c, uvc, color}
dl.VtxWriter[3] = DrawVert{d, uvd, color}
ii := dl.vtxIndex
dl.IdxWriter[0] = DrawIdx(ii+0)
dl.IdxWriter[1] = DrawIdx(ii+1)
dl.IdxWriter[2] = DrawIdx(ii+2)
dl.IdxWriter[3] = DrawIdx(ii+0)
dl.IdxWriter[4] = DrawIdx(ii+2)
dl.IdxWriter[5] = DrawIdx(ii+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
// 此处生成最终的顶点数据和索引数据
// 当前并不支持抗锯齿!!简单的用顶点生成线段
func (dl *DrawList) AddPolyLine(points []f32.Vec2, color uint32, thickness float32, closed bool) {
pointsCount := len(points)
if pointsCount < 2 {
return
}
uv := dl.TexUVWhitePixel
count := pointsCount
if !closed {
count = pointsCount - 1
}
// Non Anti-aliased Stroke
idxCount := count * 6
vtxCount := count * 4
dl.PrimReserve(idxCount, vtxCount)
for i1 := 0; i1 < count; i1 ++{
i2 := i1 + 1
if i2 == pointsCount {
i2 = 0
}
p1, p2 := points[i1], points[i2]
diff := p2.Sub(p1)
invLength := math.InvLength(diff[0], diff[1], 1.0)
diff = diff.Mul(invLength)
dx := diff[0] * (thickness * 0.5)
dy := diff[1] * (thickness * 0.5)
vi := i1*4
dl.VtxWriter[vi+0] = DrawVert{f32.Vec2{p1[0]+dy, p1[1]-dx}, uv, color}
dl.VtxWriter[vi+1] = DrawVert{f32.Vec2{p2[0]+dy, p2[1]-dx}, uv, color}
dl.VtxWriter[vi+2] = DrawVert{f32.Vec2{p2[0]-dy, p2[1]+dx}, uv, color}
dl.VtxWriter[vi+3] = DrawVert{f32.Vec2{p1[0]-dy, p1[1]+dx}, uv, color}
ii := i1*6
dl.IdxWriter[ii+0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+1] = DrawIdx(dl.vtxIndex+1)
dl.IdxWriter[ii+2] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[ii+3] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+4] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[ii+5] = DrawIdx(dl.vtxIndex+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
dl.AddCommand(idxCount)
}
// Non Anti-aliased Fill
func (dl *DrawList) AddConvexPolyFilled(points []f32.Vec2, color uint32) {
uv := dl.TexUVWhitePixel
pointCount := len(points)
idxCount := (pointCount-2)*3
vtxCount := pointCount
dl.PrimReserve(idxCount, vtxCount)
for i := 0; i < vtxCount; i++ {
dl.VtxWriter[i] = DrawVert{points[i], uv, color}
}
for i, ii := 2, 0; i < pointCount; i, ii = i+1, ii+3 {
dl.IdxWriter[ii+0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+1] = DrawIdx(dl.vtxIndex+i-1)
dl.IdxWriter[ii+2] = DrawIdx(dl.vtxIndex+i)
}
dl.vtxIndex += vtxCount
dl.idxIndex += idxCount
dl.AddCommand(idxCount)
}
// 此处圆角的算法:
// 使用一个12边形近似圆形,采用中心放射算法,计算出
// 各个角度的sin/cos, 然后通过公式,得到圆圆形顶点
// f(x) = centre.x + cos()*radius
// f(y) = centre.y + sin()*radius
// 以上, 可以提前算好 sin/cos 加速整个过程
func (dl *DrawList) PathArcToFast(centre f32.Vec2, radius float32, min12, max12 int) {
if radius == 0 || min12 > max12 {
dl.path[dl.pathUsed] = centre; dl.pathUsed ++
return
}
for a := min12; a <= max12; a++ {
x := centre[0] + dl.CircleVtx12[a%12][0] * radius
y := centre[1] + dl.CircleVtx12[a%12][1] * radius
dl.path[dl.pathUsed] = f32.Vec2{x, y}
dl.pathUsed ++
}
}
func (dl *DrawList) PathArcTo(centre f32.Vec2, radius float32, min, max float32, segments int) {
if radius == 0 {
dl.path[dl.pathUsed] = centre; dl.pathUsed++
return
}
for i := 0; i <= segments; i++ {
a := min + (float32(i)/float32(segments)) * (max-min)
x := centre[0] + math.Cos(a) * radius
y := centre[1] + math.Sin(a) * radius
dl.path[dl.pathUsed] = f32.Vec2{x, y}
dl.pathUsed ++
}
}
func (dl *DrawList) PathBezierCurveTo(p2, p3, p4 f32.Vec2, segments int) {
}
func (dl *DrawList) PathRect(a, b f32.Vec2, rounding float32, corners FlagCorner) {
if rounding <= 0 || corners == FlagCornerNone {
dl.PathLineTo(a)
dl.PathLineTo(f32.Vec2{b[0], a[1]})
dl.PathLineTo(b)
dl.PathLineTo(f32.Vec2{a[0], b[1]})
} else {
var bl, br, tr, tl float32
if (corners & FlagCornerBottomLeft) != 0 {
bl = rounding
}
if (corners & FlagCornerBottomRight) != 0 {
br = rounding
}
if (corners & FlagCornerTopRight) != 0 {
tr = rounding
}
if (corners & FlagCornerTopLeft) != 0 {
tl = rounding
}
dl.PathArcToFast(f32.Vec2{a[0]+bl, a[1]+bl}, bl, 6, 9) // bottom-left
dl.PathArcToFast(f | br}, br, 9, 12)// bottom-right
dl.PathArcToFast(f32.Vec2{b[0]-tr, b[1]-tr}, tr, 0, 3) // top-right
dl.PathArcToFast(f32.Vec2{a[0]+tl, b[1]-tl}, tl, 3, 6) // top-left
}
}
func (dl *DrawList) AddLine(a, b f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a.Add(f32.Vec2{.5, .5}))
dl.PathLineTo(b.Add(f32.Vec2{.5, .5}))
dl.PathStroke(color, thickness, false)
}
// 所有非填充图形看来都是使用路径实现的
func (dl *DrawList) AddRect(a, b f32.Vec2, color uint32, rounding float32, roundFlags FlagCorner, thickness float32) {
//dl.PathRect(a.Add(mgl32.Vec2{5, .5}), b.Sub(mgl32.Vec2{.5, .5}), rounding, roundFlags)
// TODO
dl.PathRect(a, b, rounding, roundFlags)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddRectFilled(min, max f32.Vec2, color uint32, rounding float32, corner FlagCorner) {
if rounding > 0 && corner != FlagCornerNone {
dl.PathRect(min, max, rounding, corner)
dl.PathFillConvex(color)
} else {
dl.PrimReserve(6, 4)
dl.PrimRect(min, max, color)
dl.AddCommand(6)
}
}
func (dl *DrawList) AddRectFilledMultiColor() {
}
func (dl *DrawList) AddQuad(a, b, c, d f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathLineTo(d)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddQuadFilled(a, b, c, d f32.Vec2, color uint32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathLineTo(d)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddTriangle(a, b, c f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddTriangleFilled(a, b, c f32.Vec2, color uint32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddCircle(centre f32.Vec2, radius float32, color uint32, segments int, thickness float32) {
max := math.Pi * 2 * float32(segments-1)/float32(segments)
dl.PathArcTo(centre, radius, 0.0, max, segments)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddCircleFilled(centre f32.Vec2, radius float32, color uint32, segments int) {
max := math.Pi * 2 * float32(segments-1)/float32(segments)
dl.PathArcTo(centre, radius,0.0, max, segments)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddBezierCurve(pos0 f32.Vec2, cp0, cp1 f32.Vec2, pos1 f32.Vec2,
color uint32, thickness float32, segments int) {
dl.PathLineTo(pos0)
dl.PathBezierCurveTo(cp0, cp1, pos1, segments)
dl.PathStroke(color, thickness, false)
}
func (dl *DrawList) AddImage(texId uint16, a, b f32.Vec2, uva, uvb f32.Vec2, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PrimReserve(6, 4)
dl.PrimRectUV(a, b, uva, uvb, color)
dl.AddCommand(6)
}
func (dl *DrawList) AddImageQuad(texId uint16, a, b, c, d f32.Vec2, uva, uvb, uvc, uvd f32.Vec2, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PrimReserve(6, 4)
dl.PrimQuadUV(a, b, c, d, uva, uvb, uvc, uvd, color)
dl.AddCommand(6)
}
func (dl *DrawList) AddImageRound(texId uint16, a, b f32.Vec2, uva, uvb f32.Vec2, color uint32, rounding float32, corners FlagCorner) {
if rounding <= 0 || (corners & FlagCornerAll) == 0 {
dl.AddImage(texId, a, b, uva, uvb, color)
return
}
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PathRect(a, b, rounding, corners)
dl.PathFillConvex(color)
// map uv to vertex - linear scale
xySize, uvSize := b.Sub(a), uvb.Sub(uva)
var scale f32.Vec2
if xySize[0] != 0 {
scale[0] = uvSize[0]/xySize[0]
}
if xySize[1] != 0 {
scale[1] = uvSize[1]/xySize[1]
}
// clamp??
for i := range dl.VtxWriter {
vertex := &dl.VtxWriter[i]
dx := (vertex.xy[0] - a[0]) * scale[0]
dy := (vertex.xy[1] - a[1]) * scale[1]
vertex.uv = f32.Vec2{uva[0]+dx, uva[1]+dy}
}
}
// NinePatch Algorithm
// 12 13 14 15
// x1 x2 max
// +----+----+----+
// | | | |
// | | |p1 |
// +----+----+----+ y2
// | | | |
// | |p0 | |
// +----+----+----+ y1
// | | | |
// | | | |
// +----+----+----+
//min
// 0 1 2 3
//patch = {x1, x2, y1, y2} % TextureSize
func (dl *DrawList) AddImageNinePatch(texId uint16, min, max f32.Vec2, uva, uvb f32.Vec2, patch f32.Vec4, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
_, tex := bk.R.Texture(texId)
texSize := f32.Vec2{tex.Width, tex.Height}
idxCount, vtxCount := 9 * 6, 16
dl.PrimReserve(idxCount, vtxCount)
x1, x2, y1, y2 := min[0]+patch[0]*texSize[0], max[0]-patch[1]*texSize[0], min[1]+patch[2]*texSize[1], max[1]-patch[3]*texSize[1]
uvw, uvh := uvb[0]-uva[0], uvb[1]-uva[1]
u1, u2, v1, v2 := uva[0]+patch[0]*uvw, uvb[0]-patch[1]*uvw, uva[1]+patch[2]*uvh, uvb[1]-patch[3]*uvh
if x2 < x1 {
x1 = (min[0] + max[0])/2; x2 = x1
}
if y2 < y1 {
y1 = (min[1] + max[1])/2; y2 = y1
}
vtxWriter := dl.VtxWriter
idxWriter := dl.IdxWriter
// fill vertex
vtxWriter[0] = DrawVert{min, uva, color}
vtxWriter[1] = DrawVert{f32.Vec2{x1, min[1]}, f32.Vec2{u1, uva[1]}, color}
vtxWriter[2] = DrawVert{f32.Vec2{x2, min[1]}, f32.Vec2{u2, uva[1]}, color}
vtxWriter[3] = DrawVert{f32.Vec2{max[0], min[1]}, f32.Vec2{uvb[0], uva[1]}, color}
vtxWriter[4] = DrawVert{f32.Vec2{min[0], y1}, f32.Vec2{uva[0], v1}, color}
vtxWriter[5] = DrawVert{f32.Vec2{x1, y1}, f32.Vec2{u1, v1}, color}
vtxWriter[6] = DrawVert{f32.Vec2{x2, y1}, f32.Vec2{u2, v1}, color}
vtxWriter[7] = DrawVert{f32.Vec2{max[0], y1}, f32.Vec2{uvb[0], v1}, color}
vtxWriter[8] = DrawVert{f32.Vec2{min[0], y2}, f32.Vec2{uva[0], v2}, color}
vtxWriter[9] = DrawVert{f32.Vec2{x1, y2}, f32.Vec2{u1, v2}, color}
vtxWriter[10] = DrawVert{f32.Vec2{x2, y2}, f32.Vec2{u2, v2}, color}
vtxWriter[11] = DrawVert{f32.Vec2{max[0], y2}, f32.Vec2{uvb[0], v2}, color}
vtxWriter[12] = DrawVert{f32.Vec2{min[0], max[1]}, f32.Vec2{uva[0], uvb[1]}, color}
vtxWriter[13] = DrawVert{f32.Vec2{x1, max[1]}, f32.Vec2{u1, uvb[1]}, color}
vtxWriter[14] = DrawVert{f32.Vec2{x2, max[1]}, f32.Vec2{u2, uvb[1]}, color}
vtxWriter[15] = DrawVert{max, uvb, color}
// fill index
ii := uint16(dl.vtxIndex)
for i, v := range ninePatchIndex {
idxWriter[i] = DrawIdx(ii+v)
}
dl.idxIndex += idxCount
dl.vtxIndex += vtxCount
dl.AddCommand(idxCount)
}
var ninePatchIndex = [54]uint16 {
0, 1, 5, 0, 5, 4, 1, 2, 6, 1, 6, 5, 2, 3, 7, 2, 7, 6,
4, 5, 9, 4, 9, 8, 5, 6, 10, 5, 10, 9, 6, 7, 11, 6, 11, 10,
8, 9, 13, 8, 13, 12, 9, 10, 14, 9, 14, 13, 10, 11,15, 10, 15, 14,
}
func (dl *DrawList) AddText(pos f32.Vec2, text string, font font.Font, fontSize float32, color uint32, wrapWidth float32) (size f32.Vec2){
if text == "" {
return
}
if font == nil {
font = dl.Font
}
if fontSize == 0 {
fontSize = dl.FontSize
}
fr := &FontRender{
DrawList:dl,
fontSize:fontSize,
font:font,
color:color,
}
if wrapWidth > 0 {
size = fr.RenderWrapped(pos, text, wrapWidth)
} else {
size = fr.RenderText(pos, text)
}
return
}
// 每次绘制都会产生一个 Command (可能会造成内存浪费! 1k cmd = 1000 * 6 * 4 = 24k)
// 为了减少内存可以一边添加一边尝试向前合并
func (dl *DrawList) AddCommand(elemCount int) {
var (
clip = dl.CurrentClipRect()
tex = dl.CurrentTextureId()
order = dl.ZOrder
index = dl.cmdIndex
)
if prev := &dl.CmdBuffer[index-1]; prev.ClipRect == clip && prev.TextureId == tex && prev.zOrder == order{
prev.ElemCount += uint16(elemCount)
} else {
fi := prev.FirstIndex+prev.ElemCount
dl.CmdBuffer[index] = DrawCmd{fi,uint16(elemCount),clip,tex, order}
dl.cmdIndex += 1
}
}
func (dl *DrawList) Commands() []DrawCmd {
return dl.CmdBuffer[1:dl.cmdIndex]
}
| 32.Vec2{b[0]-br, a[1]+ | conditional_block |
drawing.go | package gui
import (
"korok.io/korok/math/f32"
"korok.io/korok/math"
"korok.io/korok/gfx/bk"
"korok.io/korok/gfx/font"
)
type DrawListFlags uint32
const (
FlagAntiAliasedLine DrawListFlags = iota
FlagAntiAliasedFill
)
// Rounding corner:
// A: 0x0000 0001 top-left
// B: 0x0000 0002 top-right
// C: 0x0000 0004 down-right
// D: 0x0000 0008 down-left
type FlagCorner uint32
const (
FlagCornerNone FlagCorner = 0x0000
FlagCornerTopLeft = 0x0001
FlagCornerTopRight = 0x0002
FlagCornerBottomRight = 0x0004
FlagCornerBottomLeft = 0x0008
FlagCornerAll = 0x000F
)
type Align uint32
const (
AlignCenter Align = iota
AlignLeft = 1 << iota
AlignRight = 1 << iota
AlignTop = 1 << iota
AlignBottom = 1 << iota
)
const (
DefaultZOrder = int16(0xFFFF>>1-100)
)
// DrawList provide method to write primitives to buffer
type DrawCmd struct {
FirstIndex uint16
ElemCount uint16
ClipRect f32.Vec4
TextureId uint16
zOrder int16
}
type DrawIdx uint16
type DrawVert struct {
xy f32.Vec2
uv f32.Vec2
color uint32
}
type DrawList struct {
CmdBuffer []DrawCmd
IdxBuffer []DrawIdx
VtxBuffer []DrawVert
cmdIndex, idxIndex, vtxIndex int
cmdCap, idxCap, vtxCap int
// Data *DrawListSharedData
OwnerName string // 窗口名
VtxCurrentIdx int // VtxBuffer.Size
// 指向当前正在使用的 cmdbuffer 的位置
VtxWriter []DrawVert
IdxWriter []DrawIdx
ClipRectStack[]f32.Vec4
TextureIdStack []uint16
// path
path [64]f32.Vec2
pathUsed int
FullScreen f32.Vec4
TexUVWhitePixel f32.Vec2
CircleVtx12 [12]f32.Vec2
Font font.Font
FontSize float32
Flags DrawListFlags
ZOrder int16
}
func NewDrawList() *DrawList {
dl := &DrawList{}
dl.Initialize()
return dl
}
func (dl *DrawList) Initialize() {
dl.CmdBuffer = make([]DrawCmd, 1024)
dl.IdxBuffer = make([]DrawIdx, 2024)
dl.VtxBuffer = make([]DrawVert, 2024)
// TODO
dl.TexUVWhitePixel = f32.Vec2{0, 0}
// TODO bake circle vertex!!
for i := 0; i < 12; i++ {
sin := math.Sin((6.28/12)*float32(i))
cos := math.Cos((6.28/12)*float32(i))
dl.CircleVtx12[i] = f32.Vec2{cos, sin}
}
dl.ZOrder = DefaultZOrder
dl.cmdIndex = 1 // skip first one
}
func (dl *DrawList) Empty() bool {
return dl.vtxIndex == 0 || dl.idxIndex == 0
}
func (dl *DrawList) Size() (idx, vdx int) {
idx = dl.idxIndex
vdx = dl.vtxIndex
return
}
// TODO
func (dl *DrawList) Clear() {
dl.cmdIndex = 1
dl | ndex = 0
dl.vtxIndex = 0
}
func (dl *DrawList) PathClear() {
dl.pathUsed = 0
}
func (dl *DrawList) PathLineTo(pos f32.Vec2) {
if n := len(dl.path); dl.pathUsed < n-1 {
dl.path[dl.pathUsed] = pos
dl.pathUsed += 1
}
}
func (dl *DrawList) PathLineToMergeDuplicate(pos f32.Vec2) {
//if (_Path.Size == 0 || memcmp(&_Path[_Path.Size-1], &pos, 8) != 0)
// _Path.push_back(pos);
}
func (dl *DrawList) PathFillConvex(col uint32) {
dl.AddConvexPolyFilled(dl.path[:dl.pathUsed], col);
dl.pathUsed = 0
}
// default: thickness=1.0
func (dl *DrawList) PathStroke(color uint32, thickness float32, closed bool) {
dl.AddPolyLine(dl.path[:dl.pathUsed], color, thickness, closed)
dl.PathClear()
}
func (dl *DrawList) CurrentClipRect() (clip f32.Vec4) {
if n := len(dl.ClipRectStack); n > 0 {
clip = dl.ClipRectStack[n-1]
} else {
clip = dl.FullScreen
}
return
}
func (dl *DrawList) CurrentTextureId() (id uint16) {
if n := len(dl.TextureIdStack); n > 0 {
id = dl.TextureIdStack[n-1]
}
return
}
// will result in new draw-call
func (dl *DrawList) UpdateClipRect() {
//clip := dl.CurrentClipRect()
}
func (dl *DrawList) UpdateTextureId() {
}
// Clip 相关的操作
func (dl *DrawList) PushClipRect(min, max f32.Vec2, intersectCurrentClip bool) {
cr := f32.Vec4{min[0], min[1], max[0], max[1]}
if intersectCurrentClip && len(dl.ClipRectStack) > 0{
current := dl.ClipRectStack[len(dl.ClipRectStack)-1]
if cr[0] < current[0] {
cr[0] = current[0]
}
if cr[1] < current[1] {
cr[1] = current[1]
}
if cr[2] > current[2] {
cr[2] = current[2]
}
if cr[3] > current[3] {
cr[3] = current[3]
}
cr[2] = math.Max(cr[0], cr[2])
cr[3] = math.Max(cr[1], cr[3])
dl.ClipRectStack = append(dl.ClipRectStack, cr)
dl.UpdateClipRect()
}
}
func (dl *DrawList) PushClipRectFullScreen() {
min := f32.Vec2{dl.FullScreen[0], dl.FullScreen[1]}
max := f32.Vec2{dl.FullScreen[2], dl.FullScreen[3]}
dl.PushClipRect(min, max, false)
}
func (dl *DrawList) PopClipRect() {
if n := len(dl.ClipRectStack); n > 0 {
dl.ClipRectStack = dl.ClipRectStack[:n-1]
}
}
func (dl *DrawList) GetClipRectMin() f32.Vec2 {
return f32.Vec2{0, 0 }
}
func (dl *DrawList) GetClipRectMax() f32.Vec2 {
return f32.Vec2{0, 0 }
}
func (dl *DrawList) PushTextureId(texId uint16) {
dl.TextureIdStack = append(dl.TextureIdStack, texId)
}
func (dl *DrawList) PopTextureId() {
if n := len(dl.TextureIdStack); n > 0 {
dl.TextureIdStack = dl.TextureIdStack[:n-1]
}
}
// primitive operation, auto scale by 1024
func (dl *DrawList) PrimReserve(idxCount, vtxCount int) {
if sz, require := len(dl.VtxBuffer), dl.vtxIndex+vtxCount; require >= sz {
vtxBuffer := make([]DrawVert, sz+1024)
copy(vtxBuffer, dl.VtxBuffer)
dl.VtxBuffer = vtxBuffer
}
if sz, require := len(dl.IdxBuffer), dl.idxIndex+idxCount; require >= sz {
idxBuffer := make([]DrawIdx, sz+1024)
copy(idxBuffer, dl.IdxBuffer)
dl.IdxBuffer = idxBuffer
}
dl.VtxWriter = dl.VtxBuffer[dl.vtxIndex:dl.vtxIndex+vtxCount]
dl.IdxWriter = dl.IdxBuffer[dl.idxIndex:dl.idxIndex+idxCount]
}
func (dl *DrawList) PrimRect(min, max f32.Vec2, color uint32) {
uv := dl.TexUVWhitePixel
a, b, c, d := min, f32.Vec2{max[0], min[1]}, max, f32.Vec2{min[0], max[1]}
dl.VtxWriter[0] = DrawVert{a, uv, color}
dl.VtxWriter[1] = DrawVert{b, uv, color}
dl.VtxWriter[2] = DrawVert{c, uv, color}
dl.VtxWriter[3] = DrawVert{d, uv, color}
dl.IdxWriter[0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[1] = DrawIdx(dl.vtxIndex+1)
dl.IdxWriter[2] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[3] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[4] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[5] = DrawIdx(dl.vtxIndex+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
func (dl *DrawList) PrimRectUV(a, c f32.Vec2, uva, uvc f32.Vec2, color uint32) {
b, d := f32.Vec2{c[0], a[1]}, f32.Vec2{a[0], c[1]}
uvb, uvd := f32.Vec2{uvc[0], uva[1]}, f32.Vec2{uva[0], uvc[1]}
dl.VtxWriter[0] = DrawVert{a, uva, color}
dl.VtxWriter[1] = DrawVert{b, uvb, color}
dl.VtxWriter[2] = DrawVert{c, uvc, color}
dl.VtxWriter[3] = DrawVert{d, uvd, color}
ii := dl.vtxIndex
dl.IdxWriter[0] = DrawIdx(ii+0)
dl.IdxWriter[1] = DrawIdx(ii+1)
dl.IdxWriter[2] = DrawIdx(ii+2)
dl.IdxWriter[3] = DrawIdx(ii+0)
dl.IdxWriter[4] = DrawIdx(ii+2)
dl.IdxWriter[5] = DrawIdx(ii+3)
dl.idxIndex += 6
dl.vtxIndex += 4
}
func (dl *DrawList) PrimQuadUV(a, b, c, d f32.Vec2, uva, uvb,uvc, uvd f32.Vec2, color uint32) {
// vertex
dl.VtxWriter[0] = DrawVert{a, uva, color}
dl.VtxWriter[1] = DrawVert{b, uvb, color}
dl.VtxWriter[2] = DrawVert{c, uvc, color}
dl.VtxWriter[3] = DrawVert{d, uvd, color}
ii := dl.vtxIndex
dl.IdxWriter[0] = DrawIdx(ii+0)
dl.IdxWriter[1] = DrawIdx(ii+1)
dl.IdxWriter[2] = DrawIdx(ii+2)
dl.IdxWriter[3] = DrawIdx(ii+0)
dl.IdxWriter[4] = DrawIdx(ii+2)
dl.IdxWriter[5] = DrawIdx(ii+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
// 此处生成最终的顶点数据和索引数据
// 当前并不支持抗锯齿!!简单的用顶点生成线段
func (dl *DrawList) AddPolyLine(points []f32.Vec2, color uint32, thickness float32, closed bool) {
pointsCount := len(points)
if pointsCount < 2 {
return
}
uv := dl.TexUVWhitePixel
count := pointsCount
if !closed {
count = pointsCount - 1
}
// Non Anti-aliased Stroke
idxCount := count * 6
vtxCount := count * 4
dl.PrimReserve(idxCount, vtxCount)
for i1 := 0; i1 < count; i1 ++{
i2 := i1 + 1
if i2 == pointsCount {
i2 = 0
}
p1, p2 := points[i1], points[i2]
diff := p2.Sub(p1)
invLength := math.InvLength(diff[0], diff[1], 1.0)
diff = diff.Mul(invLength)
dx := diff[0] * (thickness * 0.5)
dy := diff[1] * (thickness * 0.5)
vi := i1*4
dl.VtxWriter[vi+0] = DrawVert{f32.Vec2{p1[0]+dy, p1[1]-dx}, uv, color}
dl.VtxWriter[vi+1] = DrawVert{f32.Vec2{p2[0]+dy, p2[1]-dx}, uv, color}
dl.VtxWriter[vi+2] = DrawVert{f32.Vec2{p2[0]-dy, p2[1]+dx}, uv, color}
dl.VtxWriter[vi+3] = DrawVert{f32.Vec2{p1[0]-dy, p1[1]+dx}, uv, color}
ii := i1*6
dl.IdxWriter[ii+0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+1] = DrawIdx(dl.vtxIndex+1)
dl.IdxWriter[ii+2] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[ii+3] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+4] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[ii+5] = DrawIdx(dl.vtxIndex+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
dl.AddCommand(idxCount)
}
// Non Anti-aliased Fill
func (dl *DrawList) AddConvexPolyFilled(points []f32.Vec2, color uint32) {
uv := dl.TexUVWhitePixel
pointCount := len(points)
idxCount := (pointCount-2)*3
vtxCount := pointCount
dl.PrimReserve(idxCount, vtxCount)
for i := 0; i < vtxCount; i++ {
dl.VtxWriter[i] = DrawVert{points[i], uv, color}
}
for i, ii := 2, 0; i < pointCount; i, ii = i+1, ii+3 {
dl.IdxWriter[ii+0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+1] = DrawIdx(dl.vtxIndex+i-1)
dl.IdxWriter[ii+2] = DrawIdx(dl.vtxIndex+i)
}
dl.vtxIndex += vtxCount
dl.idxIndex += idxCount
dl.AddCommand(idxCount)
}
// 此处圆角的算法:
// 使用一个12边形近似圆形,采用中心放射算法,计算出
// 各个角度的sin/cos, 然后通过公式,得到圆圆形顶点
// f(x) = centre.x + cos()*radius
// f(y) = centre.y + sin()*radius
// 以上, 可以提前算好 sin/cos 加速整个过程
func (dl *DrawList) PathArcToFast(centre f32.Vec2, radius float32, min12, max12 int) {
if radius == 0 || min12 > max12 {
dl.path[dl.pathUsed] = centre; dl.pathUsed ++
return
}
for a := min12; a <= max12; a++ {
x := centre[0] + dl.CircleVtx12[a%12][0] * radius
y := centre[1] + dl.CircleVtx12[a%12][1] * radius
dl.path[dl.pathUsed] = f32.Vec2{x, y}
dl.pathUsed ++
}
}
func (dl *DrawList) PathArcTo(centre f32.Vec2, radius float32, min, max float32, segments int) {
if radius == 0 {
dl.path[dl.pathUsed] = centre; dl.pathUsed++
return
}
for i := 0; i <= segments; i++ {
a := min + (float32(i)/float32(segments)) * (max-min)
x := centre[0] + math.Cos(a) * radius
y := centre[1] + math.Sin(a) * radius
dl.path[dl.pathUsed] = f32.Vec2{x, y}
dl.pathUsed ++
}
}
func (dl *DrawList) PathBezierCurveTo(p2, p3, p4 f32.Vec2, segments int) {
}
func (dl *DrawList) PathRect(a, b f32.Vec2, rounding float32, corners FlagCorner) {
if rounding <= 0 || corners == FlagCornerNone {
dl.PathLineTo(a)
dl.PathLineTo(f32.Vec2{b[0], a[1]})
dl.PathLineTo(b)
dl.PathLineTo(f32.Vec2{a[0], b[1]})
} else {
var bl, br, tr, tl float32
if (corners & FlagCornerBottomLeft) != 0 {
bl = rounding
}
if (corners & FlagCornerBottomRight) != 0 {
br = rounding
}
if (corners & FlagCornerTopRight) != 0 {
tr = rounding
}
if (corners & FlagCornerTopLeft) != 0 {
tl = rounding
}
dl.PathArcToFast(f32.Vec2{a[0]+bl, a[1]+bl}, bl, 6, 9) // bottom-left
dl.PathArcToFast(f32.Vec2{b[0]-br, a[1]+br}, br, 9, 12)// bottom-right
dl.PathArcToFast(f32.Vec2{b[0]-tr, b[1]-tr}, tr, 0, 3) // top-right
dl.PathArcToFast(f32.Vec2{a[0]+tl, b[1]-tl}, tl, 3, 6) // top-left
}
}
func (dl *DrawList) AddLine(a, b f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a.Add(f32.Vec2{.5, .5}))
dl.PathLineTo(b.Add(f32.Vec2{.5, .5}))
dl.PathStroke(color, thickness, false)
}
// 所有非填充图形看来都是使用路径实现的
func (dl *DrawList) AddRect(a, b f32.Vec2, color uint32, rounding float32, roundFlags FlagCorner, thickness float32) {
//dl.PathRect(a.Add(mgl32.Vec2{5, .5}), b.Sub(mgl32.Vec2{.5, .5}), rounding, roundFlags)
// TODO
dl.PathRect(a, b, rounding, roundFlags)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddRectFilled(min, max f32.Vec2, color uint32, rounding float32, corner FlagCorner) {
if rounding > 0 && corner != FlagCornerNone {
dl.PathRect(min, max, rounding, corner)
dl.PathFillConvex(color)
} else {
dl.PrimReserve(6, 4)
dl.PrimRect(min, max, color)
dl.AddCommand(6)
}
}
func (dl *DrawList) AddRectFilledMultiColor() {
}
func (dl *DrawList) AddQuad(a, b, c, d f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathLineTo(d)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddQuadFilled(a, b, c, d f32.Vec2, color uint32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathLineTo(d)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddTriangle(a, b, c f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddTriangleFilled(a, b, c f32.Vec2, color uint32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddCircle(centre f32.Vec2, radius float32, color uint32, segments int, thickness float32) {
max := math.Pi * 2 * float32(segments-1)/float32(segments)
dl.PathArcTo(centre, radius, 0.0, max, segments)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddCircleFilled(centre f32.Vec2, radius float32, color uint32, segments int) {
max := math.Pi * 2 * float32(segments-1)/float32(segments)
dl.PathArcTo(centre, radius,0.0, max, segments)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddBezierCurve(pos0 f32.Vec2, cp0, cp1 f32.Vec2, pos1 f32.Vec2,
color uint32, thickness float32, segments int) {
dl.PathLineTo(pos0)
dl.PathBezierCurveTo(cp0, cp1, pos1, segments)
dl.PathStroke(color, thickness, false)
}
func (dl *DrawList) AddImage(texId uint16, a, b f32.Vec2, uva, uvb f32.Vec2, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PrimReserve(6, 4)
dl.PrimRectUV(a, b, uva, uvb, color)
dl.AddCommand(6)
}
func (dl *DrawList) AddImageQuad(texId uint16, a, b, c, d f32.Vec2, uva, uvb, uvc, uvd f32.Vec2, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PrimReserve(6, 4)
dl.PrimQuadUV(a, b, c, d, uva, uvb, uvc, uvd, color)
dl.AddCommand(6)
}
func (dl *DrawList) AddImageRound(texId uint16, a, b f32.Vec2, uva, uvb f32.Vec2, color uint32, rounding float32, corners FlagCorner) {
if rounding <= 0 || (corners & FlagCornerAll) == 0 {
dl.AddImage(texId, a, b, uva, uvb, color)
return
}
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PathRect(a, b, rounding, corners)
dl.PathFillConvex(color)
// map uv to vertex - linear scale
xySize, uvSize := b.Sub(a), uvb.Sub(uva)
var scale f32.Vec2
if xySize[0] != 0 {
scale[0] = uvSize[0]/xySize[0]
}
if xySize[1] != 0 {
scale[1] = uvSize[1]/xySize[1]
}
// clamp??
for i := range dl.VtxWriter {
vertex := &dl.VtxWriter[i]
dx := (vertex.xy[0] - a[0]) * scale[0]
dy := (vertex.xy[1] - a[1]) * scale[1]
vertex.uv = f32.Vec2{uva[0]+dx, uva[1]+dy}
}
}
// NinePatch Algorithm
// 12 13 14 15
// x1 x2 max
// +----+----+----+
// | | | |
// | | |p1 |
// +----+----+----+ y2
// | | | |
// | |p0 | |
// +----+----+----+ y1
// | | | |
// | | | |
// +----+----+----+
//min
// 0 1 2 3
//patch = {x1, x2, y1, y2} % TextureSize
func (dl *DrawList) AddImageNinePatch(texId uint16, min, max f32.Vec2, uva, uvb f32.Vec2, patch f32.Vec4, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
_, tex := bk.R.Texture(texId)
texSize := f32.Vec2{tex.Width, tex.Height}
idxCount, vtxCount := 9 * 6, 16
dl.PrimReserve(idxCount, vtxCount)
x1, x2, y1, y2 := min[0]+patch[0]*texSize[0], max[0]-patch[1]*texSize[0], min[1]+patch[2]*texSize[1], max[1]-patch[3]*texSize[1]
uvw, uvh := uvb[0]-uva[0], uvb[1]-uva[1]
u1, u2, v1, v2 := uva[0]+patch[0]*uvw, uvb[0]-patch[1]*uvw, uva[1]+patch[2]*uvh, uvb[1]-patch[3]*uvh
if x2 < x1 {
x1 = (min[0] + max[0])/2; x2 = x1
}
if y2 < y1 {
y1 = (min[1] + max[1])/2; y2 = y1
}
vtxWriter := dl.VtxWriter
idxWriter := dl.IdxWriter
// fill vertex
vtxWriter[0] = DrawVert{min, uva, color}
vtxWriter[1] = DrawVert{f32.Vec2{x1, min[1]}, f32.Vec2{u1, uva[1]}, color}
vtxWriter[2] = DrawVert{f32.Vec2{x2, min[1]}, f32.Vec2{u2, uva[1]}, color}
vtxWriter[3] = DrawVert{f32.Vec2{max[0], min[1]}, f32.Vec2{uvb[0], uva[1]}, color}
vtxWriter[4] = DrawVert{f32.Vec2{min[0], y1}, f32.Vec2{uva[0], v1}, color}
vtxWriter[5] = DrawVert{f32.Vec2{x1, y1}, f32.Vec2{u1, v1}, color}
vtxWriter[6] = DrawVert{f32.Vec2{x2, y1}, f32.Vec2{u2, v1}, color}
vtxWriter[7] = DrawVert{f32.Vec2{max[0], y1}, f32.Vec2{uvb[0], v1}, color}
vtxWriter[8] = DrawVert{f32.Vec2{min[0], y2}, f32.Vec2{uva[0], v2}, color}
vtxWriter[9] = DrawVert{f32.Vec2{x1, y2}, f32.Vec2{u1, v2}, color}
vtxWriter[10] = DrawVert{f32.Vec2{x2, y2}, f32.Vec2{u2, v2}, color}
vtxWriter[11] = DrawVert{f32.Vec2{max[0], y2}, f32.Vec2{uvb[0], v2}, color}
vtxWriter[12] = DrawVert{f32.Vec2{min[0], max[1]}, f32.Vec2{uva[0], uvb[1]}, color}
vtxWriter[13] = DrawVert{f32.Vec2{x1, max[1]}, f32.Vec2{u1, uvb[1]}, color}
vtxWriter[14] = DrawVert{f32.Vec2{x2, max[1]}, f32.Vec2{u2, uvb[1]}, color}
vtxWriter[15] = DrawVert{max, uvb, color}
// fill index
ii := uint16(dl.vtxIndex)
for i, v := range ninePatchIndex {
idxWriter[i] = DrawIdx(ii+v)
}
dl.idxIndex += idxCount
dl.vtxIndex += vtxCount
dl.AddCommand(idxCount)
}
var ninePatchIndex = [54]uint16 {
0, 1, 5, 0, 5, 4, 1, 2, 6, 1, 6, 5, 2, 3, 7, 2, 7, 6,
4, 5, 9, 4, 9, 8, 5, 6, 10, 5, 10, 9, 6, 7, 11, 6, 11, 10,
8, 9, 13, 8, 13, 12, 9, 10, 14, 9, 14, 13, 10, 11,15, 10, 15, 14,
}
func (dl *DrawList) AddText(pos f32.Vec2, text string, font font.Font, fontSize float32, color uint32, wrapWidth float32) (size f32.Vec2){
if text == "" {
return
}
if font == nil {
font = dl.Font
}
if fontSize == 0 {
fontSize = dl.FontSize
}
fr := &FontRender{
DrawList:dl,
fontSize:fontSize,
font:font,
color:color,
}
if wrapWidth > 0 {
size = fr.RenderWrapped(pos, text, wrapWidth)
} else {
size = fr.RenderText(pos, text)
}
return
}
// 每次绘制都会产生一个 Command (可能会造成内存浪费! 1k cmd = 1000 * 6 * 4 = 24k)
// 为了减少内存可以一边添加一边尝试向前合并
func (dl *DrawList) AddCommand(elemCount int) {
var (
clip = dl.CurrentClipRect()
tex = dl.CurrentTextureId()
order = dl.ZOrder
index = dl.cmdIndex
)
if prev := &dl.CmdBuffer[index-1]; prev.ClipRect == clip && prev.TextureId == tex && prev.zOrder == order{
prev.ElemCount += uint16(elemCount)
} else {
fi := prev.FirstIndex+prev.ElemCount
dl.CmdBuffer[index] = DrawCmd{fi,uint16(elemCount),clip,tex, order}
dl.cmdIndex += 1
}
}
func (dl *DrawList) Commands() []DrawCmd {
return dl.CmdBuffer[1:dl.cmdIndex]
}
| .idxI | identifier_name |
drawing.go | package gui
import (
"korok.io/korok/math/f32"
"korok.io/korok/math"
"korok.io/korok/gfx/bk"
"korok.io/korok/gfx/font"
)
type DrawListFlags uint32
const (
FlagAntiAliasedLine DrawListFlags = iota
FlagAntiAliasedFill
)
// Rounding corner:
// A: 0x0000 0001 top-left
// B: 0x0000 0002 top-right
// C: 0x0000 0004 down-right
// D: 0x0000 0008 down-left
type FlagCorner uint32
const (
FlagCornerNone FlagCorner = 0x0000
FlagCornerTopLeft = 0x0001
FlagCornerTopRight = 0x0002
FlagCornerBottomRight = 0x0004
FlagCornerBottomLeft = 0x0008
FlagCornerAll = 0x000F
)
type Align uint32
const (
AlignCenter Align = iota
AlignLeft = 1 << iota
AlignRight = 1 << iota
AlignTop = 1 << iota
AlignBottom = 1 << iota
)
const (
DefaultZOrder = int16(0xFFFF>>1-100)
)
// DrawList provide method to write primitives to buffer
type DrawCmd struct {
FirstIndex uint16
ElemCount uint16
ClipRect f32.Vec4
TextureId uint16
zOrder int16
}
type DrawIdx uint16
type DrawVert struct {
xy f32.Vec2
uv f32.Vec2
color uint32
}
type DrawList struct {
CmdBuffer []DrawCmd
IdxBuffer []DrawIdx
VtxBuffer []DrawVert
cmdIndex, idxIndex, vtxIndex int
cmdCap, idxCap, vtxCap int
// Data *DrawListSharedData
OwnerName string // 窗口名
VtxCurrentIdx int // VtxBuffer.Size
// 指向当前正在使用的 cmdbuffer 的位置
VtxWriter []DrawVert
IdxWriter []DrawIdx
ClipRectStack[]f32.Vec4
TextureIdStack []uint16
// path
path [64]f32.Vec2
pathUsed int
FullScreen f32.Vec4
TexUVWhitePixel f32.Vec2
CircleVtx12 [12]f32.Vec2
Font font.Font
FontSize float32
Flags DrawListFlags
ZOrder int16
}
func NewDrawList() *DrawList {
dl := &DrawList{}
dl.Initialize()
return dl
}
func (dl *DrawList) Initialize() {
dl.CmdBuffer = make([]DrawCmd, 1024)
dl.IdxBuffer = make([]DrawIdx, 2024)
dl.VtxBuffer = make([]DrawVert, 2024)
// TODO
dl.TexUVWhitePixel = f32.Vec2{0, 0}
// TODO bake circle vertex!!
for i := 0; i < 12; i++ {
sin := math.Sin((6.28/12)*float32(i))
cos := math.Cos((6.28/12)*float32(i))
dl.CircleVtx12[i] = f32.Vec2{cos, sin}
}
dl.ZOrder = DefaultZOrder
dl.cmdIndex = 1 // skip first one
}
func (dl *DrawList) Empty() bool {
return dl.vtxIndex == 0 || dl.idxIndex == 0
}
func (dl *DrawList) Size() (idx, vdx int) {
idx = dl.idxIndex
vdx = dl.vtxIndex
return
}
// TODO
func (dl *DrawList) Clear() {
dl.cmdIndex = 1
dl.idxIndex = 0
dl.vtxIndex = 0
}
func (dl *DrawList) PathClear() {
dl.pathUsed = 0
}
func (dl *DrawList) PathLineTo(pos f32.Vec2) {
if n := len(dl.path); dl.pathUsed < n-1 {
dl.path[dl.pathUsed] = pos
dl.pathUsed += 1
}
}
func (dl *DrawList) PathLineToMergeDuplicate(pos f32.Vec2) {
//if (_Path.Size == 0 || memcmp(&_Path[_Path.Size-1], &pos, 8) != 0)
// _Path.push_back(pos);
}
func (dl *DrawList) PathFillConvex(col uint32) {
dl.AddConvexPolyFilled(dl.path[:dl.pathUsed], col);
dl.pathUsed = 0
}
// default: thickness=1.0
func (dl *DrawList) PathStroke(color uint32, thickness float32, closed bool) {
dl.AddPolyLine(dl.path[:dl.pathUsed], color, thickness, closed)
dl.PathClear()
}
func (dl *DrawList) CurrentClipRect() (clip f32.Vec4) {
if n := len(dl.ClipRectStack); n > 0 {
clip = dl.ClipRectStack[n-1]
} else {
clip = dl.FullScreen
}
return
}
func (dl *DrawList) CurrentTextureId() (id uint16) {
if n := len(dl.TextureIdStack); n > 0 {
id = dl.TextureIdStack[n-1]
}
return
}
// will result in new draw-call
func (dl *DrawList) UpdateClipRect() {
//clip := dl.CurrentClipRect()
}
func (dl *DrawList) UpdateTextureId() {
}
// Clip 相关的操作
func (dl *DrawList) PushClipRect(min, max f32.Vec2, intersectCurrentClip bool) {
cr := f32.Vec4{min[0], min[1], max[0], max[1]}
if intersectCurrentClip && len(dl.ClipRectStack) > 0{
current := dl.ClipRectStack[len(dl.ClipRectStack)-1]
if cr[0] < current[0] {
cr[0] = current[0]
}
if cr[1] < current[1] {
cr[1] = current[1]
}
if cr[2] > current[2] {
cr[2] = current[2]
}
if cr[3] > current[3] {
cr[3] = current[3]
}
cr[2] = math.Max(cr[0], cr[2])
cr[3] = math.Max(cr[1], cr[3])
dl.ClipRectStack = append(dl.ClipRectStack, cr)
dl.UpdateClipRect()
}
}
func (dl *DrawList) PushClipRectFullScreen() {
min := f32.Vec2{dl.FullScreen[0], dl.FullScreen[1]}
max := f32.Vec2{dl.FullScreen[2], dl.FullScreen[3]}
dl.PushClipRect(min, max, false)
}
func (dl *DrawList) PopClipRect() {
if n := len(dl.ClipRectStack); n > 0 {
dl.ClipRectStack = dl.ClipRectStack[:n-1]
}
}
func (dl *DrawList) GetClipRectMin() f32.Vec2 {
return f32.Vec2{0, 0 }
}
func (dl *DrawList) GetClipRectMax() f32.Vec2 {
return f32.Vec2{0, 0 }
}
func (dl *DrawList) PushTextureId(texId uint16) {
dl.TextureIdStack = append(dl.TextureIdStack, texId)
} | }
}
// primitive operation, auto scale by 1024
func (dl *DrawList) PrimReserve(idxCount, vtxCount int) {
if sz, require := len(dl.VtxBuffer), dl.vtxIndex+vtxCount; require >= sz {
vtxBuffer := make([]DrawVert, sz+1024)
copy(vtxBuffer, dl.VtxBuffer)
dl.VtxBuffer = vtxBuffer
}
if sz, require := len(dl.IdxBuffer), dl.idxIndex+idxCount; require >= sz {
idxBuffer := make([]DrawIdx, sz+1024)
copy(idxBuffer, dl.IdxBuffer)
dl.IdxBuffer = idxBuffer
}
dl.VtxWriter = dl.VtxBuffer[dl.vtxIndex:dl.vtxIndex+vtxCount]
dl.IdxWriter = dl.IdxBuffer[dl.idxIndex:dl.idxIndex+idxCount]
}
func (dl *DrawList) PrimRect(min, max f32.Vec2, color uint32) {
uv := dl.TexUVWhitePixel
a, b, c, d := min, f32.Vec2{max[0], min[1]}, max, f32.Vec2{min[0], max[1]}
dl.VtxWriter[0] = DrawVert{a, uv, color}
dl.VtxWriter[1] = DrawVert{b, uv, color}
dl.VtxWriter[2] = DrawVert{c, uv, color}
dl.VtxWriter[3] = DrawVert{d, uv, color}
dl.IdxWriter[0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[1] = DrawIdx(dl.vtxIndex+1)
dl.IdxWriter[2] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[3] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[4] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[5] = DrawIdx(dl.vtxIndex+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
func (dl *DrawList) PrimRectUV(a, c f32.Vec2, uva, uvc f32.Vec2, color uint32) {
b, d := f32.Vec2{c[0], a[1]}, f32.Vec2{a[0], c[1]}
uvb, uvd := f32.Vec2{uvc[0], uva[1]}, f32.Vec2{uva[0], uvc[1]}
dl.VtxWriter[0] = DrawVert{a, uva, color}
dl.VtxWriter[1] = DrawVert{b, uvb, color}
dl.VtxWriter[2] = DrawVert{c, uvc, color}
dl.VtxWriter[3] = DrawVert{d, uvd, color}
ii := dl.vtxIndex
dl.IdxWriter[0] = DrawIdx(ii+0)
dl.IdxWriter[1] = DrawIdx(ii+1)
dl.IdxWriter[2] = DrawIdx(ii+2)
dl.IdxWriter[3] = DrawIdx(ii+0)
dl.IdxWriter[4] = DrawIdx(ii+2)
dl.IdxWriter[5] = DrawIdx(ii+3)
dl.idxIndex += 6
dl.vtxIndex += 4
}
func (dl *DrawList) PrimQuadUV(a, b, c, d f32.Vec2, uva, uvb,uvc, uvd f32.Vec2, color uint32) {
// vertex
dl.VtxWriter[0] = DrawVert{a, uva, color}
dl.VtxWriter[1] = DrawVert{b, uvb, color}
dl.VtxWriter[2] = DrawVert{c, uvc, color}
dl.VtxWriter[3] = DrawVert{d, uvd, color}
ii := dl.vtxIndex
dl.IdxWriter[0] = DrawIdx(ii+0)
dl.IdxWriter[1] = DrawIdx(ii+1)
dl.IdxWriter[2] = DrawIdx(ii+2)
dl.IdxWriter[3] = DrawIdx(ii+0)
dl.IdxWriter[4] = DrawIdx(ii+2)
dl.IdxWriter[5] = DrawIdx(ii+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
// 此处生成最终的顶点数据和索引数据
// 当前并不支持抗锯齿!!简单的用顶点生成线段
func (dl *DrawList) AddPolyLine(points []f32.Vec2, color uint32, thickness float32, closed bool) {
pointsCount := len(points)
if pointsCount < 2 {
return
}
uv := dl.TexUVWhitePixel
count := pointsCount
if !closed {
count = pointsCount - 1
}
// Non Anti-aliased Stroke
idxCount := count * 6
vtxCount := count * 4
dl.PrimReserve(idxCount, vtxCount)
for i1 := 0; i1 < count; i1 ++{
i2 := i1 + 1
if i2 == pointsCount {
i2 = 0
}
p1, p2 := points[i1], points[i2]
diff := p2.Sub(p1)
invLength := math.InvLength(diff[0], diff[1], 1.0)
diff = diff.Mul(invLength)
dx := diff[0] * (thickness * 0.5)
dy := diff[1] * (thickness * 0.5)
vi := i1*4
dl.VtxWriter[vi+0] = DrawVert{f32.Vec2{p1[0]+dy, p1[1]-dx}, uv, color}
dl.VtxWriter[vi+1] = DrawVert{f32.Vec2{p2[0]+dy, p2[1]-dx}, uv, color}
dl.VtxWriter[vi+2] = DrawVert{f32.Vec2{p2[0]-dy, p2[1]+dx}, uv, color}
dl.VtxWriter[vi+3] = DrawVert{f32.Vec2{p1[0]-dy, p1[1]+dx}, uv, color}
ii := i1*6
dl.IdxWriter[ii+0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+1] = DrawIdx(dl.vtxIndex+1)
dl.IdxWriter[ii+2] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[ii+3] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+4] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[ii+5] = DrawIdx(dl.vtxIndex+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
dl.AddCommand(idxCount)
}
// Non Anti-aliased Fill
func (dl *DrawList) AddConvexPolyFilled(points []f32.Vec2, color uint32) {
uv := dl.TexUVWhitePixel
pointCount := len(points)
idxCount := (pointCount-2)*3
vtxCount := pointCount
dl.PrimReserve(idxCount, vtxCount)
for i := 0; i < vtxCount; i++ {
dl.VtxWriter[i] = DrawVert{points[i], uv, color}
}
for i, ii := 2, 0; i < pointCount; i, ii = i+1, ii+3 {
dl.IdxWriter[ii+0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+1] = DrawIdx(dl.vtxIndex+i-1)
dl.IdxWriter[ii+2] = DrawIdx(dl.vtxIndex+i)
}
dl.vtxIndex += vtxCount
dl.idxIndex += idxCount
dl.AddCommand(idxCount)
}
// 此处圆角的算法:
// 使用一个12边形近似圆形,采用中心放射算法,计算出
// 各个角度的sin/cos, 然后通过公式,得到圆圆形顶点
// f(x) = centre.x + cos()*radius
// f(y) = centre.y + sin()*radius
// 以上, 可以提前算好 sin/cos 加速整个过程
func (dl *DrawList) PathArcToFast(centre f32.Vec2, radius float32, min12, max12 int) {
if radius == 0 || min12 > max12 {
dl.path[dl.pathUsed] = centre; dl.pathUsed ++
return
}
for a := min12; a <= max12; a++ {
x := centre[0] + dl.CircleVtx12[a%12][0] * radius
y := centre[1] + dl.CircleVtx12[a%12][1] * radius
dl.path[dl.pathUsed] = f32.Vec2{x, y}
dl.pathUsed ++
}
}
func (dl *DrawList) PathArcTo(centre f32.Vec2, radius float32, min, max float32, segments int) {
if radius == 0 {
dl.path[dl.pathUsed] = centre; dl.pathUsed++
return
}
for i := 0; i <= segments; i++ {
a := min + (float32(i)/float32(segments)) * (max-min)
x := centre[0] + math.Cos(a) * radius
y := centre[1] + math.Sin(a) * radius
dl.path[dl.pathUsed] = f32.Vec2{x, y}
dl.pathUsed ++
}
}
func (dl *DrawList) PathBezierCurveTo(p2, p3, p4 f32.Vec2, segments int) {
}
func (dl *DrawList) PathRect(a, b f32.Vec2, rounding float32, corners FlagCorner) {
if rounding <= 0 || corners == FlagCornerNone {
dl.PathLineTo(a)
dl.PathLineTo(f32.Vec2{b[0], a[1]})
dl.PathLineTo(b)
dl.PathLineTo(f32.Vec2{a[0], b[1]})
} else {
var bl, br, tr, tl float32
if (corners & FlagCornerBottomLeft) != 0 {
bl = rounding
}
if (corners & FlagCornerBottomRight) != 0 {
br = rounding
}
if (corners & FlagCornerTopRight) != 0 {
tr = rounding
}
if (corners & FlagCornerTopLeft) != 0 {
tl = rounding
}
dl.PathArcToFast(f32.Vec2{a[0]+bl, a[1]+bl}, bl, 6, 9) // bottom-left
dl.PathArcToFast(f32.Vec2{b[0]-br, a[1]+br}, br, 9, 12)// bottom-right
dl.PathArcToFast(f32.Vec2{b[0]-tr, b[1]-tr}, tr, 0, 3) // top-right
dl.PathArcToFast(f32.Vec2{a[0]+tl, b[1]-tl}, tl, 3, 6) // top-left
}
}
func (dl *DrawList) AddLine(a, b f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a.Add(f32.Vec2{.5, .5}))
dl.PathLineTo(b.Add(f32.Vec2{.5, .5}))
dl.PathStroke(color, thickness, false)
}
// 所有非填充图形看来都是使用路径实现的
func (dl *DrawList) AddRect(a, b f32.Vec2, color uint32, rounding float32, roundFlags FlagCorner, thickness float32) {
//dl.PathRect(a.Add(mgl32.Vec2{5, .5}), b.Sub(mgl32.Vec2{.5, .5}), rounding, roundFlags)
// TODO
dl.PathRect(a, b, rounding, roundFlags)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddRectFilled(min, max f32.Vec2, color uint32, rounding float32, corner FlagCorner) {
if rounding > 0 && corner != FlagCornerNone {
dl.PathRect(min, max, rounding, corner)
dl.PathFillConvex(color)
} else {
dl.PrimReserve(6, 4)
dl.PrimRect(min, max, color)
dl.AddCommand(6)
}
}
func (dl *DrawList) AddRectFilledMultiColor() {
}
func (dl *DrawList) AddQuad(a, b, c, d f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathLineTo(d)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddQuadFilled(a, b, c, d f32.Vec2, color uint32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathLineTo(d)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddTriangle(a, b, c f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddTriangleFilled(a, b, c f32.Vec2, color uint32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddCircle(centre f32.Vec2, radius float32, color uint32, segments int, thickness float32) {
max := math.Pi * 2 * float32(segments-1)/float32(segments)
dl.PathArcTo(centre, radius, 0.0, max, segments)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddCircleFilled(centre f32.Vec2, radius float32, color uint32, segments int) {
max := math.Pi * 2 * float32(segments-1)/float32(segments)
dl.PathArcTo(centre, radius,0.0, max, segments)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddBezierCurve(pos0 f32.Vec2, cp0, cp1 f32.Vec2, pos1 f32.Vec2,
color uint32, thickness float32, segments int) {
dl.PathLineTo(pos0)
dl.PathBezierCurveTo(cp0, cp1, pos1, segments)
dl.PathStroke(color, thickness, false)
}
func (dl *DrawList) AddImage(texId uint16, a, b f32.Vec2, uva, uvb f32.Vec2, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PrimReserve(6, 4)
dl.PrimRectUV(a, b, uva, uvb, color)
dl.AddCommand(6)
}
func (dl *DrawList) AddImageQuad(texId uint16, a, b, c, d f32.Vec2, uva, uvb, uvc, uvd f32.Vec2, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PrimReserve(6, 4)
dl.PrimQuadUV(a, b, c, d, uva, uvb, uvc, uvd, color)
dl.AddCommand(6)
}
func (dl *DrawList) AddImageRound(texId uint16, a, b f32.Vec2, uva, uvb f32.Vec2, color uint32, rounding float32, corners FlagCorner) {
if rounding <= 0 || (corners & FlagCornerAll) == 0 {
dl.AddImage(texId, a, b, uva, uvb, color)
return
}
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PathRect(a, b, rounding, corners)
dl.PathFillConvex(color)
// map uv to vertex - linear scale
xySize, uvSize := b.Sub(a), uvb.Sub(uva)
var scale f32.Vec2
if xySize[0] != 0 {
scale[0] = uvSize[0]/xySize[0]
}
if xySize[1] != 0 {
scale[1] = uvSize[1]/xySize[1]
}
// clamp??
for i := range dl.VtxWriter {
vertex := &dl.VtxWriter[i]
dx := (vertex.xy[0] - a[0]) * scale[0]
dy := (vertex.xy[1] - a[1]) * scale[1]
vertex.uv = f32.Vec2{uva[0]+dx, uva[1]+dy}
}
}
// NinePatch Algorithm
// 12 13 14 15
// x1 x2 max
// +----+----+----+
// | | | |
// | | |p1 |
// +----+----+----+ y2
// | | | |
// | |p0 | |
// +----+----+----+ y1
// | | | |
// | | | |
// +----+----+----+
//min
// 0 1 2 3
//patch = {x1, x2, y1, y2} % TextureSize
func (dl *DrawList) AddImageNinePatch(texId uint16, min, max f32.Vec2, uva, uvb f32.Vec2, patch f32.Vec4, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
_, tex := bk.R.Texture(texId)
texSize := f32.Vec2{tex.Width, tex.Height}
idxCount, vtxCount := 9 * 6, 16
dl.PrimReserve(idxCount, vtxCount)
x1, x2, y1, y2 := min[0]+patch[0]*texSize[0], max[0]-patch[1]*texSize[0], min[1]+patch[2]*texSize[1], max[1]-patch[3]*texSize[1]
uvw, uvh := uvb[0]-uva[0], uvb[1]-uva[1]
u1, u2, v1, v2 := uva[0]+patch[0]*uvw, uvb[0]-patch[1]*uvw, uva[1]+patch[2]*uvh, uvb[1]-patch[3]*uvh
if x2 < x1 {
x1 = (min[0] + max[0])/2; x2 = x1
}
if y2 < y1 {
y1 = (min[1] + max[1])/2; y2 = y1
}
vtxWriter := dl.VtxWriter
idxWriter := dl.IdxWriter
// fill vertex
vtxWriter[0] = DrawVert{min, uva, color}
vtxWriter[1] = DrawVert{f32.Vec2{x1, min[1]}, f32.Vec2{u1, uva[1]}, color}
vtxWriter[2] = DrawVert{f32.Vec2{x2, min[1]}, f32.Vec2{u2, uva[1]}, color}
vtxWriter[3] = DrawVert{f32.Vec2{max[0], min[1]}, f32.Vec2{uvb[0], uva[1]}, color}
vtxWriter[4] = DrawVert{f32.Vec2{min[0], y1}, f32.Vec2{uva[0], v1}, color}
vtxWriter[5] = DrawVert{f32.Vec2{x1, y1}, f32.Vec2{u1, v1}, color}
vtxWriter[6] = DrawVert{f32.Vec2{x2, y1}, f32.Vec2{u2, v1}, color}
vtxWriter[7] = DrawVert{f32.Vec2{max[0], y1}, f32.Vec2{uvb[0], v1}, color}
vtxWriter[8] = DrawVert{f32.Vec2{min[0], y2}, f32.Vec2{uva[0], v2}, color}
vtxWriter[9] = DrawVert{f32.Vec2{x1, y2}, f32.Vec2{u1, v2}, color}
vtxWriter[10] = DrawVert{f32.Vec2{x2, y2}, f32.Vec2{u2, v2}, color}
vtxWriter[11] = DrawVert{f32.Vec2{max[0], y2}, f32.Vec2{uvb[0], v2}, color}
vtxWriter[12] = DrawVert{f32.Vec2{min[0], max[1]}, f32.Vec2{uva[0], uvb[1]}, color}
vtxWriter[13] = DrawVert{f32.Vec2{x1, max[1]}, f32.Vec2{u1, uvb[1]}, color}
vtxWriter[14] = DrawVert{f32.Vec2{x2, max[1]}, f32.Vec2{u2, uvb[1]}, color}
vtxWriter[15] = DrawVert{max, uvb, color}
// fill index
ii := uint16(dl.vtxIndex)
for i, v := range ninePatchIndex {
idxWriter[i] = DrawIdx(ii+v)
}
dl.idxIndex += idxCount
dl.vtxIndex += vtxCount
dl.AddCommand(idxCount)
}
var ninePatchIndex = [54]uint16 {
0, 1, 5, 0, 5, 4, 1, 2, 6, 1, 6, 5, 2, 3, 7, 2, 7, 6,
4, 5, 9, 4, 9, 8, 5, 6, 10, 5, 10, 9, 6, 7, 11, 6, 11, 10,
8, 9, 13, 8, 13, 12, 9, 10, 14, 9, 14, 13, 10, 11,15, 10, 15, 14,
}
func (dl *DrawList) AddText(pos f32.Vec2, text string, font font.Font, fontSize float32, color uint32, wrapWidth float32) (size f32.Vec2){
if text == "" {
return
}
if font == nil {
font = dl.Font
}
if fontSize == 0 {
fontSize = dl.FontSize
}
fr := &FontRender{
DrawList:dl,
fontSize:fontSize,
font:font,
color:color,
}
if wrapWidth > 0 {
size = fr.RenderWrapped(pos, text, wrapWidth)
} else {
size = fr.RenderText(pos, text)
}
return
}
// 每次绘制都会产生一个 Command (可能会造成内存浪费! 1k cmd = 1000 * 6 * 4 = 24k)
// 为了减少内存可以一边添加一边尝试向前合并
func (dl *DrawList) AddCommand(elemCount int) {
var (
clip = dl.CurrentClipRect()
tex = dl.CurrentTextureId()
order = dl.ZOrder
index = dl.cmdIndex
)
if prev := &dl.CmdBuffer[index-1]; prev.ClipRect == clip && prev.TextureId == tex && prev.zOrder == order{
prev.ElemCount += uint16(elemCount)
} else {
fi := prev.FirstIndex+prev.ElemCount
dl.CmdBuffer[index] = DrawCmd{fi,uint16(elemCount),clip,tex, order}
dl.cmdIndex += 1
}
}
func (dl *DrawList) Commands() []DrawCmd {
return dl.CmdBuffer[1:dl.cmdIndex]
} |
func (dl *DrawList) PopTextureId() {
if n := len(dl.TextureIdStack); n > 0 {
dl.TextureIdStack = dl.TextureIdStack[:n-1] | random_line_split |
drawing.go | package gui
import (
"korok.io/korok/math/f32"
"korok.io/korok/math"
"korok.io/korok/gfx/bk"
"korok.io/korok/gfx/font"
)
type DrawListFlags uint32
const (
FlagAntiAliasedLine DrawListFlags = iota
FlagAntiAliasedFill
)
// Rounding corner:
// A: 0x0000 0001 top-left
// B: 0x0000 0002 top-right
// C: 0x0000 0004 down-right
// D: 0x0000 0008 down-left
type FlagCorner uint32
const (
FlagCornerNone FlagCorner = 0x0000
FlagCornerTopLeft = 0x0001
FlagCornerTopRight = 0x0002
FlagCornerBottomRight = 0x0004
FlagCornerBottomLeft = 0x0008
FlagCornerAll = 0x000F
)
type Align uint32
const (
AlignCenter Align = iota
AlignLeft = 1 << iota
AlignRight = 1 << iota
AlignTop = 1 << iota
AlignBottom = 1 << iota
)
const (
DefaultZOrder = int16(0xFFFF>>1-100)
)
// DrawList provide method to write primitives to buffer
type DrawCmd struct {
FirstIndex uint16
ElemCount uint16
ClipRect f32.Vec4
TextureId uint16
zOrder int16
}
type DrawIdx uint16
type DrawVert struct {
xy f32.Vec2
uv f32.Vec2
color uint32
}
type DrawList struct {
CmdBuffer []DrawCmd
IdxBuffer []DrawIdx
VtxBuffer []DrawVert
cmdIndex, idxIndex, vtxIndex int
cmdCap, idxCap, vtxCap int
// Data *DrawListSharedData
OwnerName string // 窗口名
VtxCurrentIdx int // VtxBuffer.Size
// 指向当前正在使用的 cmdbuffer 的位置
VtxWriter []DrawVert
IdxWriter []DrawIdx
ClipRectStack[]f32.Vec4
TextureIdStack []uint16
// path
path [64]f32.Vec2
pathUsed int
FullScreen f32.Vec4
TexUVWhitePixel f32.Vec2
CircleVtx12 [12]f32.Vec2
Font font.Font
FontSize float32
Flags DrawListFlags
ZOrder int16
}
func NewDrawList() *DrawList {
dl := &DrawList{}
dl.Initialize()
return dl
}
func (dl *DrawList) Initialize() {
dl.CmdBuffer = make([]DrawCmd, 1024)
dl.IdxBuffer = make([]DrawIdx, 2024)
dl.VtxBuffer = make([]DrawVert, 2024)
// TODO
dl.TexUVWhitePixel = f32.Vec2{0, 0}
// TODO bake circle vertex!!
for i := 0; i < 12; i++ {
sin := math.Sin((6.28/12)*float32(i))
cos := math.Cos((6.28/12)*float32(i))
dl.CircleVtx12[i] = f32.Vec2{cos, sin}
}
dl.ZOrder = DefaultZOrder
dl.cmdIndex = 1 // skip first one
}
func (dl *DrawList) Empty() bool {
return dl.vtxIndex == 0 || dl.idxIndex == 0
}
func (dl *DrawList) Size() (idx, vdx int) {
idx = dl.idxIndex
vdx = dl.vtxIndex
return
}
// TODO
func (dl *DrawList) Clear() {
dl.cmdIndex = 1
dl.idxIndex = 0
dl.vtxIndex = 0
}
func (dl *DrawList) PathClear() {
dl.pathUsed = 0
}
func (dl *DrawList) PathLineTo(pos f32.Vec2) {
if n := len(dl.path); dl.pathUsed < n-1 {
dl.path[dl.pathUsed] = pos
dl.pathUsed += 1
}
}
func (dl *DrawList) PathLineToMergeDuplicate(pos f32.Vec2) {
//if (_Path.Size == 0 || memcmp(&_Path[_Path.Size-1], &pos, 8) != 0)
// _Path.push_back(pos);
}
func (dl *DrawList) PathFillConvex(col uint32) {
dl.AddConvexPolyFilled(dl.path[:dl.pathUsed], col);
dl.pathUsed = 0
}
// default: thickness=1.0
func (dl *DrawList) PathStroke(color uint32, thickness float32, closed bool) {
dl.AddPolyLine(dl.path[:dl. | ClipRect() (clip f32.Vec4) {
if n := len(dl.ClipRectStack); n > 0 {
clip = dl.ClipRectStack[n-1]
} else {
clip = dl.FullScreen
}
return
}
func (dl *DrawList) CurrentTextureId() (id uint16) {
if n := len(dl.TextureIdStack); n > 0 {
id = dl.TextureIdStack[n-1]
}
return
}
// will result in new draw-call
func (dl *DrawList) UpdateClipRect() {
//clip := dl.CurrentClipRect()
}
func (dl *DrawList) UpdateTextureId() {
}
// Clip 相关的操作
func (dl *DrawList) PushClipRect(min, max f32.Vec2, intersectCurrentClip bool) {
cr := f32.Vec4{min[0], min[1], max[0], max[1]}
if intersectCurrentClip && len(dl.ClipRectStack) > 0{
current := dl.ClipRectStack[len(dl.ClipRectStack)-1]
if cr[0] < current[0] {
cr[0] = current[0]
}
if cr[1] < current[1] {
cr[1] = current[1]
}
if cr[2] > current[2] {
cr[2] = current[2]
}
if cr[3] > current[3] {
cr[3] = current[3]
}
cr[2] = math.Max(cr[0], cr[2])
cr[3] = math.Max(cr[1], cr[3])
dl.ClipRectStack = append(dl.ClipRectStack, cr)
dl.UpdateClipRect()
}
}
func (dl *DrawList) PushClipRectFullScreen() {
min := f32.Vec2{dl.FullScreen[0], dl.FullScreen[1]}
max := f32.Vec2{dl.FullScreen[2], dl.FullScreen[3]}
dl.PushClipRect(min, max, false)
}
func (dl *DrawList) PopClipRect() {
if n := len(dl.ClipRectStack); n > 0 {
dl.ClipRectStack = dl.ClipRectStack[:n-1]
}
}
func (dl *DrawList) GetClipRectMin() f32.Vec2 {
return f32.Vec2{0, 0 }
}
func (dl *DrawList) GetClipRectMax() f32.Vec2 {
return f32.Vec2{0, 0 }
}
func (dl *DrawList) PushTextureId(texId uint16) {
dl.TextureIdStack = append(dl.TextureIdStack, texId)
}
func (dl *DrawList) PopTextureId() {
if n := len(dl.TextureIdStack); n > 0 {
dl.TextureIdStack = dl.TextureIdStack[:n-1]
}
}
// primitive operation, auto scale by 1024
func (dl *DrawList) PrimReserve(idxCount, vtxCount int) {
if sz, require := len(dl.VtxBuffer), dl.vtxIndex+vtxCount; require >= sz {
vtxBuffer := make([]DrawVert, sz+1024)
copy(vtxBuffer, dl.VtxBuffer)
dl.VtxBuffer = vtxBuffer
}
if sz, require := len(dl.IdxBuffer), dl.idxIndex+idxCount; require >= sz {
idxBuffer := make([]DrawIdx, sz+1024)
copy(idxBuffer, dl.IdxBuffer)
dl.IdxBuffer = idxBuffer
}
dl.VtxWriter = dl.VtxBuffer[dl.vtxIndex:dl.vtxIndex+vtxCount]
dl.IdxWriter = dl.IdxBuffer[dl.idxIndex:dl.idxIndex+idxCount]
}
func (dl *DrawList) PrimRect(min, max f32.Vec2, color uint32) {
uv := dl.TexUVWhitePixel
a, b, c, d := min, f32.Vec2{max[0], min[1]}, max, f32.Vec2{min[0], max[1]}
dl.VtxWriter[0] = DrawVert{a, uv, color}
dl.VtxWriter[1] = DrawVert{b, uv, color}
dl.VtxWriter[2] = DrawVert{c, uv, color}
dl.VtxWriter[3] = DrawVert{d, uv, color}
dl.IdxWriter[0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[1] = DrawIdx(dl.vtxIndex+1)
dl.IdxWriter[2] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[3] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[4] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[5] = DrawIdx(dl.vtxIndex+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
func (dl *DrawList) PrimRectUV(a, c f32.Vec2, uva, uvc f32.Vec2, color uint32) {
b, d := f32.Vec2{c[0], a[1]}, f32.Vec2{a[0], c[1]}
uvb, uvd := f32.Vec2{uvc[0], uva[1]}, f32.Vec2{uva[0], uvc[1]}
dl.VtxWriter[0] = DrawVert{a, uva, color}
dl.VtxWriter[1] = DrawVert{b, uvb, color}
dl.VtxWriter[2] = DrawVert{c, uvc, color}
dl.VtxWriter[3] = DrawVert{d, uvd, color}
ii := dl.vtxIndex
dl.IdxWriter[0] = DrawIdx(ii+0)
dl.IdxWriter[1] = DrawIdx(ii+1)
dl.IdxWriter[2] = DrawIdx(ii+2)
dl.IdxWriter[3] = DrawIdx(ii+0)
dl.IdxWriter[4] = DrawIdx(ii+2)
dl.IdxWriter[5] = DrawIdx(ii+3)
dl.idxIndex += 6
dl.vtxIndex += 4
}
func (dl *DrawList) PrimQuadUV(a, b, c, d f32.Vec2, uva, uvb,uvc, uvd f32.Vec2, color uint32) {
// vertex
dl.VtxWriter[0] = DrawVert{a, uva, color}
dl.VtxWriter[1] = DrawVert{b, uvb, color}
dl.VtxWriter[2] = DrawVert{c, uvc, color}
dl.VtxWriter[3] = DrawVert{d, uvd, color}
ii := dl.vtxIndex
dl.IdxWriter[0] = DrawIdx(ii+0)
dl.IdxWriter[1] = DrawIdx(ii+1)
dl.IdxWriter[2] = DrawIdx(ii+2)
dl.IdxWriter[3] = DrawIdx(ii+0)
dl.IdxWriter[4] = DrawIdx(ii+2)
dl.IdxWriter[5] = DrawIdx(ii+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
// 此处生成最终的顶点数据和索引数据
// 当前并不支持抗锯齿!!简单的用顶点生成线段
func (dl *DrawList) AddPolyLine(points []f32.Vec2, color uint32, thickness float32, closed bool) {
pointsCount := len(points)
if pointsCount < 2 {
return
}
uv := dl.TexUVWhitePixel
count := pointsCount
if !closed {
count = pointsCount - 1
}
// Non Anti-aliased Stroke
idxCount := count * 6
vtxCount := count * 4
dl.PrimReserve(idxCount, vtxCount)
for i1 := 0; i1 < count; i1 ++{
i2 := i1 + 1
if i2 == pointsCount {
i2 = 0
}
p1, p2 := points[i1], points[i2]
diff := p2.Sub(p1)
invLength := math.InvLength(diff[0], diff[1], 1.0)
diff = diff.Mul(invLength)
dx := diff[0] * (thickness * 0.5)
dy := diff[1] * (thickness * 0.5)
vi := i1*4
dl.VtxWriter[vi+0] = DrawVert{f32.Vec2{p1[0]+dy, p1[1]-dx}, uv, color}
dl.VtxWriter[vi+1] = DrawVert{f32.Vec2{p2[0]+dy, p2[1]-dx}, uv, color}
dl.VtxWriter[vi+2] = DrawVert{f32.Vec2{p2[0]-dy, p2[1]+dx}, uv, color}
dl.VtxWriter[vi+3] = DrawVert{f32.Vec2{p1[0]-dy, p1[1]+dx}, uv, color}
ii := i1*6
dl.IdxWriter[ii+0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+1] = DrawIdx(dl.vtxIndex+1)
dl.IdxWriter[ii+2] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[ii+3] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+4] = DrawIdx(dl.vtxIndex+2)
dl.IdxWriter[ii+5] = DrawIdx(dl.vtxIndex+3)
dl.vtxIndex += 4
dl.idxIndex += 6
}
dl.AddCommand(idxCount)
}
// Non Anti-aliased Fill
func (dl *DrawList) AddConvexPolyFilled(points []f32.Vec2, color uint32) {
uv := dl.TexUVWhitePixel
pointCount := len(points)
idxCount := (pointCount-2)*3
vtxCount := pointCount
dl.PrimReserve(idxCount, vtxCount)
for i := 0; i < vtxCount; i++ {
dl.VtxWriter[i] = DrawVert{points[i], uv, color}
}
for i, ii := 2, 0; i < pointCount; i, ii = i+1, ii+3 {
dl.IdxWriter[ii+0] = DrawIdx(dl.vtxIndex+0)
dl.IdxWriter[ii+1] = DrawIdx(dl.vtxIndex+i-1)
dl.IdxWriter[ii+2] = DrawIdx(dl.vtxIndex+i)
}
dl.vtxIndex += vtxCount
dl.idxIndex += idxCount
dl.AddCommand(idxCount)
}
// 此处圆角的算法:
// 使用一个12边形近似圆形,采用中心放射算法,计算出
// 各个角度的sin/cos, 然后通过公式,得到圆圆形顶点
// f(x) = centre.x + cos()*radius
// f(y) = centre.y + sin()*radius
// 以上, 可以提前算好 sin/cos 加速整个过程
func (dl *DrawList) PathArcToFast(centre f32.Vec2, radius float32, min12, max12 int) {
if radius == 0 || min12 > max12 {
dl.path[dl.pathUsed] = centre; dl.pathUsed ++
return
}
for a := min12; a <= max12; a++ {
x := centre[0] + dl.CircleVtx12[a%12][0] * radius
y := centre[1] + dl.CircleVtx12[a%12][1] * radius
dl.path[dl.pathUsed] = f32.Vec2{x, y}
dl.pathUsed ++
}
}
func (dl *DrawList) PathArcTo(centre f32.Vec2, radius float32, min, max float32, segments int) {
if radius == 0 {
dl.path[dl.pathUsed] = centre; dl.pathUsed++
return
}
for i := 0; i <= segments; i++ {
a := min + (float32(i)/float32(segments)) * (max-min)
x := centre[0] + math.Cos(a) * radius
y := centre[1] + math.Sin(a) * radius
dl.path[dl.pathUsed] = f32.Vec2{x, y}
dl.pathUsed ++
}
}
func (dl *DrawList) PathBezierCurveTo(p2, p3, p4 f32.Vec2, segments int) {
}
func (dl *DrawList) PathRect(a, b f32.Vec2, rounding float32, corners FlagCorner) {
if rounding <= 0 || corners == FlagCornerNone {
dl.PathLineTo(a)
dl.PathLineTo(f32.Vec2{b[0], a[1]})
dl.PathLineTo(b)
dl.PathLineTo(f32.Vec2{a[0], b[1]})
} else {
var bl, br, tr, tl float32
if (corners & FlagCornerBottomLeft) != 0 {
bl = rounding
}
if (corners & FlagCornerBottomRight) != 0 {
br = rounding
}
if (corners & FlagCornerTopRight) != 0 {
tr = rounding
}
if (corners & FlagCornerTopLeft) != 0 {
tl = rounding
}
dl.PathArcToFast(f32.Vec2{a[0]+bl, a[1]+bl}, bl, 6, 9) // bottom-left
dl.PathArcToFast(f32.Vec2{b[0]-br, a[1]+br}, br, 9, 12)// bottom-right
dl.PathArcToFast(f32.Vec2{b[0]-tr, b[1]-tr}, tr, 0, 3) // top-right
dl.PathArcToFast(f32.Vec2{a[0]+tl, b[1]-tl}, tl, 3, 6) // top-left
}
}
func (dl *DrawList) AddLine(a, b f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a.Add(f32.Vec2{.5, .5}))
dl.PathLineTo(b.Add(f32.Vec2{.5, .5}))
dl.PathStroke(color, thickness, false)
}
// 所有非填充图形看来都是使用路径实现的
func (dl *DrawList) AddRect(a, b f32.Vec2, color uint32, rounding float32, roundFlags FlagCorner, thickness float32) {
//dl.PathRect(a.Add(mgl32.Vec2{5, .5}), b.Sub(mgl32.Vec2{.5, .5}), rounding, roundFlags)
// TODO
dl.PathRect(a, b, rounding, roundFlags)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddRectFilled(min, max f32.Vec2, color uint32, rounding float32, corner FlagCorner) {
if rounding > 0 && corner != FlagCornerNone {
dl.PathRect(min, max, rounding, corner)
dl.PathFillConvex(color)
} else {
dl.PrimReserve(6, 4)
dl.PrimRect(min, max, color)
dl.AddCommand(6)
}
}
func (dl *DrawList) AddRectFilledMultiColor() {
}
func (dl *DrawList) AddQuad(a, b, c, d f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathLineTo(d)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddQuadFilled(a, b, c, d f32.Vec2, color uint32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathLineTo(d)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddTriangle(a, b, c f32.Vec2, color uint32, thickness float32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddTriangleFilled(a, b, c f32.Vec2, color uint32) {
dl.PathLineTo(a)
dl.PathLineTo(b)
dl.PathLineTo(c)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddCircle(centre f32.Vec2, radius float32, color uint32, segments int, thickness float32) {
max := math.Pi * 2 * float32(segments-1)/float32(segments)
dl.PathArcTo(centre, radius, 0.0, max, segments)
dl.PathStroke(color, thickness, true)
}
func (dl *DrawList) AddCircleFilled(centre f32.Vec2, radius float32, color uint32, segments int) {
max := math.Pi * 2 * float32(segments-1)/float32(segments)
dl.PathArcTo(centre, radius,0.0, max, segments)
dl.PathFillConvex(color)
}
func (dl *DrawList) AddBezierCurve(pos0 f32.Vec2, cp0, cp1 f32.Vec2, pos1 f32.Vec2,
color uint32, thickness float32, segments int) {
dl.PathLineTo(pos0)
dl.PathBezierCurveTo(cp0, cp1, pos1, segments)
dl.PathStroke(color, thickness, false)
}
func (dl *DrawList) AddImage(texId uint16, a, b f32.Vec2, uva, uvb f32.Vec2, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PrimReserve(6, 4)
dl.PrimRectUV(a, b, uva, uvb, color)
dl.AddCommand(6)
}
func (dl *DrawList) AddImageQuad(texId uint16, a, b, c, d f32.Vec2, uva, uvb, uvc, uvd f32.Vec2, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PrimReserve(6, 4)
dl.PrimQuadUV(a, b, c, d, uva, uvb, uvc, uvd, color)
dl.AddCommand(6)
}
func (dl *DrawList) AddImageRound(texId uint16, a, b f32.Vec2, uva, uvb f32.Vec2, color uint32, rounding float32, corners FlagCorner) {
if rounding <= 0 || (corners & FlagCornerAll) == 0 {
dl.AddImage(texId, a, b, uva, uvb, color)
return
}
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
dl.PathRect(a, b, rounding, corners)
dl.PathFillConvex(color)
// map uv to vertex - linear scale
xySize, uvSize := b.Sub(a), uvb.Sub(uva)
var scale f32.Vec2
if xySize[0] != 0 {
scale[0] = uvSize[0]/xySize[0]
}
if xySize[1] != 0 {
scale[1] = uvSize[1]/xySize[1]
}
// clamp??
for i := range dl.VtxWriter {
vertex := &dl.VtxWriter[i]
dx := (vertex.xy[0] - a[0]) * scale[0]
dy := (vertex.xy[1] - a[1]) * scale[1]
vertex.uv = f32.Vec2{uva[0]+dx, uva[1]+dy}
}
}
// NinePatch Algorithm
// 12 13 14 15
// x1 x2 max
// +----+----+----+
// | | | |
// | | |p1 |
// +----+----+----+ y2
// | | | |
// | |p0 | |
// +----+----+----+ y1
// | | | |
// | | | |
// +----+----+----+
//min
// 0 1 2 3
//patch = {x1, x2, y1, y2} % TextureSize
func (dl *DrawList) AddImageNinePatch(texId uint16, min, max f32.Vec2, uva, uvb f32.Vec2, patch f32.Vec4, color uint32) {
if n := len(dl.TextureIdStack); n == 0 || texId != dl.TextureIdStack[n-1] {
dl.PushTextureId(texId)
defer dl.PopTextureId()
}
_, tex := bk.R.Texture(texId)
texSize := f32.Vec2{tex.Width, tex.Height}
idxCount, vtxCount := 9 * 6, 16
dl.PrimReserve(idxCount, vtxCount)
x1, x2, y1, y2 := min[0]+patch[0]*texSize[0], max[0]-patch[1]*texSize[0], min[1]+patch[2]*texSize[1], max[1]-patch[3]*texSize[1]
uvw, uvh := uvb[0]-uva[0], uvb[1]-uva[1]
u1, u2, v1, v2 := uva[0]+patch[0]*uvw, uvb[0]-patch[1]*uvw, uva[1]+patch[2]*uvh, uvb[1]-patch[3]*uvh
if x2 < x1 {
x1 = (min[0] + max[0])/2; x2 = x1
}
if y2 < y1 {
y1 = (min[1] + max[1])/2; y2 = y1
}
vtxWriter := dl.VtxWriter
idxWriter := dl.IdxWriter
// fill vertex
vtxWriter[0] = DrawVert{min, uva, color}
vtxWriter[1] = DrawVert{f32.Vec2{x1, min[1]}, f32.Vec2{u1, uva[1]}, color}
vtxWriter[2] = DrawVert{f32.Vec2{x2, min[1]}, f32.Vec2{u2, uva[1]}, color}
vtxWriter[3] = DrawVert{f32.Vec2{max[0], min[1]}, f32.Vec2{uvb[0], uva[1]}, color}
vtxWriter[4] = DrawVert{f32.Vec2{min[0], y1}, f32.Vec2{uva[0], v1}, color}
vtxWriter[5] = DrawVert{f32.Vec2{x1, y1}, f32.Vec2{u1, v1}, color}
vtxWriter[6] = DrawVert{f32.Vec2{x2, y1}, f32.Vec2{u2, v1}, color}
vtxWriter[7] = DrawVert{f32.Vec2{max[0], y1}, f32.Vec2{uvb[0], v1}, color}
vtxWriter[8] = DrawVert{f32.Vec2{min[0], y2}, f32.Vec2{uva[0], v2}, color}
vtxWriter[9] = DrawVert{f32.Vec2{x1, y2}, f32.Vec2{u1, v2}, color}
vtxWriter[10] = DrawVert{f32.Vec2{x2, y2}, f32.Vec2{u2, v2}, color}
vtxWriter[11] = DrawVert{f32.Vec2{max[0], y2}, f32.Vec2{uvb[0], v2}, color}
vtxWriter[12] = DrawVert{f32.Vec2{min[0], max[1]}, f32.Vec2{uva[0], uvb[1]}, color}
vtxWriter[13] = DrawVert{f32.Vec2{x1, max[1]}, f32.Vec2{u1, uvb[1]}, color}
vtxWriter[14] = DrawVert{f32.Vec2{x2, max[1]}, f32.Vec2{u2, uvb[1]}, color}
vtxWriter[15] = DrawVert{max, uvb, color}
// fill index
ii := uint16(dl.vtxIndex)
for i, v := range ninePatchIndex {
idxWriter[i] = DrawIdx(ii+v)
}
dl.idxIndex += idxCount
dl.vtxIndex += vtxCount
dl.AddCommand(idxCount)
}
var ninePatchIndex = [54]uint16 {
0, 1, 5, 0, 5, 4, 1, 2, 6, 1, 6, 5, 2, 3, 7, 2, 7, 6,
4, 5, 9, 4, 9, 8, 5, 6, 10, 5, 10, 9, 6, 7, 11, 6, 11, 10,
8, 9, 13, 8, 13, 12, 9, 10, 14, 9, 14, 13, 10, 11,15, 10, 15, 14,
}
func (dl *DrawList) AddText(pos f32.Vec2, text string, font font.Font, fontSize float32, color uint32, wrapWidth float32) (size f32.Vec2){
if text == "" {
return
}
if font == nil {
font = dl.Font
}
if fontSize == 0 {
fontSize = dl.FontSize
}
fr := &FontRender{
DrawList:dl,
fontSize:fontSize,
font:font,
color:color,
}
if wrapWidth > 0 {
size = fr.RenderWrapped(pos, text, wrapWidth)
} else {
size = fr.RenderText(pos, text)
}
return
}
// 每次绘制都会产生一个 Command (可能会造成内存浪费! 1k cmd = 1000 * 6 * 4 = 24k)
// 为了减少内存可以一边添加一边尝试向前合并
func (dl *DrawList) AddCommand(elemCount int) {
var (
clip = dl.CurrentClipRect()
tex = dl.CurrentTextureId()
order = dl.ZOrder
index = dl.cmdIndex
)
if prev := &dl.CmdBuffer[index-1]; prev.ClipRect == clip && prev.TextureId == tex && prev.zOrder == order{
prev.ElemCount += uint16(elemCount)
} else {
fi := prev.FirstIndex+prev.ElemCount
dl.CmdBuffer[index] = DrawCmd{fi,uint16(elemCount),clip,tex, order}
dl.cmdIndex += 1
}
}
func (dl *DrawList) Commands() []DrawCmd {
return dl.CmdBuffer[1:dl.cmdIndex]
}
| pathUsed], color, thickness, closed)
dl.PathClear()
}
func (dl *DrawList) Current | identifier_body |
server.py | """Movie Ratings."""
from jinja2 import StrictUndefined
from flask import Flask, render_template, redirect, request, flash, session
from flask_debugtoolbar import DebugToolbarExtension
from sqlalchemy import func, update
from model import User, Rating, Movie, connect_to_db, db
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
# Normally, if you use an undefined variable in Jinja2, it fails silently.
# This is horrible. Fix this so that, instead, it raises an error.
app.jinja_env.undefined = StrictUndefined
@app.template_filter()
def datetimefilter(value, format='%b %d'):
"""Convert a datetime to a different format so it can be accessible in Jinja."""
return value.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
@app.route('/')
def index():
|
@app.route("/users")
def user_list():
"""Show list of users."""
users = User.query.all()
return render_template("user_list.html", users=users)
# This takes to each user's profile from user list
@app.route("/users/<int:user_id>")
def user_profile(user_id):
"""Show user information"""
# Query by user id to return that record in database about user info
user = User.query.filter(User.user_id == user_id).one()
# import pdb; pdb.set_trace()
# Query to get all movies and scores rated by this user
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
user_movies = db.session.query(Rating.user_id,
Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.user_id == user_id).order_by(Movie.title).all()
# Passed user info into jinja and called on its attributes
# Passed user_movies into jinja and called on its attributes to get the info
return render_template("user_profile.html", user=user, user_movies = user_movies)
# # THIS WORKS, but we want to use /user/<int:user_id>, which we figured out above!!
# @app.route("/user-profile")
# def user_profile():
# """Show user information"""
# # import pdb; pdb.set_trace()
# # Get user email to query in User database and get all info about the user
# email = session["logged_in_user_email"]
# user = User.query.filter(User.email == email).one()
# # # Test code to see attributes of user object
# # user_id = user.user_id
# # age = user.age
# # zipcode = user.zipcode
# return render_template("user_profile.html", user=user)
@app.route("/signup-login", methods=["GET"])
def show_forms():
"""Show signup and login forms."""
return render_template("signup_login.html")
@app.route("/signup", methods=["POST"])
def signup():
"""Check if user exists in database, otherwise add user to database."""
# Get values from signup form
signup_email = request.form.get("signup_email")
signup_password = request.form.get("signup_password")
# If user exists, ask them to log in
# Otherwise, add user into database and log them in, redirecting to homepage
if db.session.query(User).filter(User.email == signup_email).first():
flash("You already have an account please use login!", "danger")
return redirect("/signup-login")
else:
new_user = User(email=signup_email, password=signup_password, age=None, zipcode=None)
db.session.add(new_user)
db.session.commit()
session["logged_in_user_email"] = signup_email
session["logged_in_user"] = new_user.user_id
flash("Your account has been created! You now are logged in!", "success")
return redirect("/")
@app.route("/login", methods=["POST"])
def login():
"""Check if user's email matches password, otherwise ask user to try again."""
# Get values from login form
login_email = request.form.get("login_email")
login_password = request.form.get("login_password")
# If user's email and password matches, log them in, redirecting them to homepage
# Otherwise, ask them to log in with the correct password
if db.session.query(User).filter(User.email == login_email,
User.password == login_password).first():
flash("Login SUCCESS.", "success")
# Query to get user's user id, in order to redirect user to their user profile
user = User.query.filter(User.email == login_email).one()
session["logged_in_user_email"] = login_email
session["logged_in_user"] = user.user_id
# Pass a variable through a string via string formatting
# so we can pass user_id into the redirected route, which is a string!!
return redirect("/users/%s" % user.user_id)
# return redirect("/")
else:
flash("Incorrect password. Please try again!", "danger")
return redirect("/signup-login")
@app.route("/logout")
def process_logout():
"""Log user out."""
del session["logged_in_user_email"]
del session["logged_in_user"]
flash("Logged out.", "success")
return redirect("/")
@app.route("/movies")
def movie_list():
"""Show list of movies."""
# sort movie titles alphbetically
movies = Movie.query.order_by(Movie.title).all()
return render_template("movie_list.html", movies=movies)
@app.route("/movies/<int:movie_id>", methods=['GET'])
def movie_profile(movie_id):
"""Show movie information.
If a user is logged in, let them add/edit a rating.
"""
if not session.get('logged_in_user_email'):
flash("Please login or signup to see the movie details and rate the movie!", "danger")
return redirect("/signup-login")
else:
# import pdb; pdb.set_trace();
# Query by movie id to return that record in database about movie info
# movie = Movie.query.filter(Movie.movie_id == movie_id).one()
movie = Movie.query.get(movie_id)
user = User.query.filter(User.email == session.get("logged_in_user_email")).one()
user_id = user.user_id
if user_id:
user_rating = Rating.query.filter_by(movie_id=movie_id, user_id=user_id).first()
else:
user_rating = None
# Prediction code: only predict if the user hasn't rated it
prediction = None
if (not user_rating) and user_id:
user = User.query.get(user_id)
if user:
prediction = user.predict_rating(movie)
# Either use the prediction or their real rating
if prediction:
# User hasn't scored; use our prediction if we made one
effective_rating = prediction
elif user_rating:
# User has already scored for real; use that
effective_rating = user_rating.score
else:
# User hasn't scored and we couldn't get a prediction
effective_rating = None
# Get the wizard's rating, either by predicting or using real rating
wizard = User.query.filter_by(email="wizard@gmail.com").one()
wizard_rating = Rating.query.filter_by(user_id=wizard.user_id, movie_id=movie.movie_id).first()
if wizard_rating is None:
wizard_rating = wizard.predict_rating(movie)
else:
wizard_rating = wizard_rating.score
if wizard_rating and effective_rating:
difference = abs(wizard_rating - effective_rating)
else:
# We couldn't get a wizard rating, so we'll skip difference
difference = None
# Depending on how different we are from the Wizard, choose a message
BERATEMENT_MESSAGES = [
"I suppose you don't have such bad taste after all.",
"I regret every decision that I've ever made that has brought me to listen to your opinion.",
"Words fail me, as your taste in movies has clearly failed you.",
"That movie is great. For a clown to watch. Idiot.",
"Words cannot express the awfulness of your taste."
]
if difference is not None:
beratement = BERATEMENT_MESSAGES[int(difference)]
else:
beratement = None
# Tallies score of each rating (how many people rated this score per rating)
# Returns list of tuples for count_score
unordered_ratings = db.session.query(Rating.score, func.count(Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating.score)
ordered_movies = unordered_ratings.order_by(Rating.score)
count_score = ordered_movies.all()
# Get average score, which returns a tuple-like object, so need to access index 0 to return the number and pass through jinja
avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating.movie_id == movie_id).one()
# Query to get all ratings for a specific movie
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
ratings = db.session.query(Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.movie_id == movie_id).all()
# # Pass user info into jinja and called on its attributes
# # Pass count_score, avg_rating, and ratings into jinja
# return render_template("movie_profile.html", movie=movie, count_score=count_score, avg_rating=avg_rating[0], ratings=ratings)
return render_template(
"movie_profile.html",
movie=movie,
user_rating=user_rating,
avg_rating=avg_rating[0],
count_score=count_score,
prediction=prediction,
ratings=ratings,
beratement=beratement)
@app.route("/movies/<int:movie_id>/rate-movie")
def rate_movie(movie_id):
"""Get user rating score for movie"""
user_rating = request.args.get("user_rating")
# get user id from log in email address
user_email = session["logged_in_user_email"]
user = User.query.filter(User.email == user_email).one()
user_id = user.user_id
# Check if user rating exists in database
# If user has rated this movie before, update value
# Else, add user rating to database by movie id and user id
if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).all():
# When updating a value, we need to use the key-value pair in update()
db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update({"score": user_rating})
# db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update(Rating.score == user_rating)
db.session.commit()
flash("You have rated this movie before! It has now been updated to %s." % (user_rating), "warning")
return redirect("/users/%s" % user_id)
else:
db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=user_rating))
db.session.commit()
flash("You have rated this movie a %s." % (user_rating), "info")
return redirect("/users/%s" % user_id)
# Get user rating routed correctly, as this was just test code
# Fix label format for movie profile page
return render_template("rate_movie.html", user_rating=user_rating)
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the point
# that we invoke the DebugToolbarExtension
app.debug = True
connect_to_db(app)
# Use the DebugToolbar
# DebugToolbarExtension(app)
app.run()
| """Homepage."""
# We want user profile link to show if user is logged in and clicks on homepage
# Check if logged in and get the value or else return None
# If there is a value, query to get user information so that user.user_id can be accessed in jinja
# Else, pass None value through so that if statement in jinja not executed
user_email = session.get("logged_in_user_email", None)
if user_email is not None:
user = User.query.filter(User.email == user_email).one()
return render_template("homepage.html", user=user)
else:
return render_template("homepage.html", user=None) | identifier_body |
server.py | """Movie Ratings."""
from jinja2 import StrictUndefined
from flask import Flask, render_template, redirect, request, flash, session
from flask_debugtoolbar import DebugToolbarExtension
from sqlalchemy import func, update
from model import User, Rating, Movie, connect_to_db, db
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
# Normally, if you use an undefined variable in Jinja2, it fails silently.
# This is horrible. Fix this so that, instead, it raises an error.
app.jinja_env.undefined = StrictUndefined
@app.template_filter()
def datetimefilter(value, format='%b %d'):
"""Convert a datetime to a different format so it can be accessible in Jinja."""
return value.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
@app.route('/')
def index():
"""Homepage."""
# We want user profile link to show if user is logged in and clicks on homepage
# Check if logged in and get the value or else return None
# If there is a value, query to get user information so that user.user_id can be accessed in jinja
# Else, pass None value through so that if statement in jinja not executed
user_email = session.get("logged_in_user_email", None)
if user_email is not None:
user = User.query.filter(User.email == user_email).one()
return render_template("homepage.html", user=user)
else:
return render_template("homepage.html", user=None)
@app.route("/users")
def | ():
"""Show list of users."""
users = User.query.all()
return render_template("user_list.html", users=users)
# This takes to each user's profile from user list
@app.route("/users/<int:user_id>")
def user_profile(user_id):
"""Show user information"""
# Query by user id to return that record in database about user info
user = User.query.filter(User.user_id == user_id).one()
# import pdb; pdb.set_trace()
# Query to get all movies and scores rated by this user
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
user_movies = db.session.query(Rating.user_id,
Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.user_id == user_id).order_by(Movie.title).all()
# Passed user info into jinja and called on its attributes
# Passed user_movies into jinja and called on its attributes to get the info
return render_template("user_profile.html", user=user, user_movies = user_movies)
# # THIS WORKS, but we want to use /user/<int:user_id>, which we figured out above!!
# @app.route("/user-profile")
# def user_profile():
# """Show user information"""
# # import pdb; pdb.set_trace()
# # Get user email to query in User database and get all info about the user
# email = session["logged_in_user_email"]
# user = User.query.filter(User.email == email).one()
# # # Test code to see attributes of user object
# # user_id = user.user_id
# # age = user.age
# # zipcode = user.zipcode
# return render_template("user_profile.html", user=user)
@app.route("/signup-login", methods=["GET"])
def show_forms():
"""Show signup and login forms."""
return render_template("signup_login.html")
@app.route("/signup", methods=["POST"])
def signup():
"""Check if user exists in database, otherwise add user to database."""
# Get values from signup form
signup_email = request.form.get("signup_email")
signup_password = request.form.get("signup_password")
# If user exists, ask them to log in
# Otherwise, add user into database and log them in, redirecting to homepage
if db.session.query(User).filter(User.email == signup_email).first():
flash("You already have an account please use login!", "danger")
return redirect("/signup-login")
else:
new_user = User(email=signup_email, password=signup_password, age=None, zipcode=None)
db.session.add(new_user)
db.session.commit()
session["logged_in_user_email"] = signup_email
session["logged_in_user"] = new_user.user_id
flash("Your account has been created! You now are logged in!", "success")
return redirect("/")
@app.route("/login", methods=["POST"])
def login():
"""Check if user's email matches password, otherwise ask user to try again."""
# Get values from login form
login_email = request.form.get("login_email")
login_password = request.form.get("login_password")
# If user's email and password matches, log them in, redirecting them to homepage
# Otherwise, ask them to log in with the correct password
if db.session.query(User).filter(User.email == login_email,
User.password == login_password).first():
flash("Login SUCCESS.", "success")
# Query to get user's user id, in order to redirect user to their user profile
user = User.query.filter(User.email == login_email).one()
session["logged_in_user_email"] = login_email
session["logged_in_user"] = user.user_id
# Pass a variable through a string via string formatting
# so we can pass user_id into the redirected route, which is a string!!
return redirect("/users/%s" % user.user_id)
# return redirect("/")
else:
flash("Incorrect password. Please try again!", "danger")
return redirect("/signup-login")
@app.route("/logout")
def process_logout():
"""Log user out."""
del session["logged_in_user_email"]
del session["logged_in_user"]
flash("Logged out.", "success")
return redirect("/")
@app.route("/movies")
def movie_list():
"""Show list of movies."""
# sort movie titles alphbetically
movies = Movie.query.order_by(Movie.title).all()
return render_template("movie_list.html", movies=movies)
@app.route("/movies/<int:movie_id>", methods=['GET'])
def movie_profile(movie_id):
"""Show movie information.
If a user is logged in, let them add/edit a rating.
"""
if not session.get('logged_in_user_email'):
flash("Please login or signup to see the movie details and rate the movie!", "danger")
return redirect("/signup-login")
else:
# import pdb; pdb.set_trace();
# Query by movie id to return that record in database about movie info
# movie = Movie.query.filter(Movie.movie_id == movie_id).one()
movie = Movie.query.get(movie_id)
user = User.query.filter(User.email == session.get("logged_in_user_email")).one()
user_id = user.user_id
if user_id:
user_rating = Rating.query.filter_by(movie_id=movie_id, user_id=user_id).first()
else:
user_rating = None
# Prediction code: only predict if the user hasn't rated it
prediction = None
if (not user_rating) and user_id:
user = User.query.get(user_id)
if user:
prediction = user.predict_rating(movie)
# Either use the prediction or their real rating
if prediction:
# User hasn't scored; use our prediction if we made one
effective_rating = prediction
elif user_rating:
# User has already scored for real; use that
effective_rating = user_rating.score
else:
# User hasn't scored and we couldn't get a prediction
effective_rating = None
# Get the wizard's rating, either by predicting or using real rating
wizard = User.query.filter_by(email="wizard@gmail.com").one()
wizard_rating = Rating.query.filter_by(user_id=wizard.user_id, movie_id=movie.movie_id).first()
if wizard_rating is None:
wizard_rating = wizard.predict_rating(movie)
else:
wizard_rating = wizard_rating.score
if wizard_rating and effective_rating:
difference = abs(wizard_rating - effective_rating)
else:
# We couldn't get a wizard rating, so we'll skip difference
difference = None
# Depending on how different we are from the Wizard, choose a message
BERATEMENT_MESSAGES = [
"I suppose you don't have such bad taste after all.",
"I regret every decision that I've ever made that has brought me to listen to your opinion.",
"Words fail me, as your taste in movies has clearly failed you.",
"That movie is great. For a clown to watch. Idiot.",
"Words cannot express the awfulness of your taste."
]
if difference is not None:
beratement = BERATEMENT_MESSAGES[int(difference)]
else:
beratement = None
# Tallies score of each rating (how many people rated this score per rating)
# Returns list of tuples for count_score
unordered_ratings = db.session.query(Rating.score, func.count(Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating.score)
ordered_movies = unordered_ratings.order_by(Rating.score)
count_score = ordered_movies.all()
# Get average score, which returns a tuple-like object, so need to access index 0 to return the number and pass through jinja
avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating.movie_id == movie_id).one()
# Query to get all ratings for a specific movie
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
ratings = db.session.query(Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.movie_id == movie_id).all()
# # Pass user info into jinja and called on its attributes
# # Pass count_score, avg_rating, and ratings into jinja
# return render_template("movie_profile.html", movie=movie, count_score=count_score, avg_rating=avg_rating[0], ratings=ratings)
return render_template(
"movie_profile.html",
movie=movie,
user_rating=user_rating,
avg_rating=avg_rating[0],
count_score=count_score,
prediction=prediction,
ratings=ratings,
beratement=beratement)
@app.route("/movies/<int:movie_id>/rate-movie")
def rate_movie(movie_id):
"""Get user rating score for movie"""
user_rating = request.args.get("user_rating")
# get user id from log in email address
user_email = session["logged_in_user_email"]
user = User.query.filter(User.email == user_email).one()
user_id = user.user_id
# Check if user rating exists in database
# If user has rated this movie before, update value
# Else, add user rating to database by movie id and user id
if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).all():
# When updating a value, we need to use the key-value pair in update()
db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update({"score": user_rating})
# db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update(Rating.score == user_rating)
db.session.commit()
flash("You have rated this movie before! It has now been updated to %s." % (user_rating), "warning")
return redirect("/users/%s" % user_id)
else:
db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=user_rating))
db.session.commit()
flash("You have rated this movie a %s." % (user_rating), "info")
return redirect("/users/%s" % user_id)
# Get user rating routed correctly, as this was just test code
# Fix label format for movie profile page
return render_template("rate_movie.html", user_rating=user_rating)
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the point
# that we invoke the DebugToolbarExtension
app.debug = True
connect_to_db(app)
# Use the DebugToolbar
# DebugToolbarExtension(app)
app.run()
| user_list | identifier_name |
server.py | """Movie Ratings."""
from jinja2 import StrictUndefined
from flask import Flask, render_template, redirect, request, flash, session
from flask_debugtoolbar import DebugToolbarExtension
from sqlalchemy import func, update
from model import User, Rating, Movie, connect_to_db, db
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
# Normally, if you use an undefined variable in Jinja2, it fails silently.
# This is horrible. Fix this so that, instead, it raises an error.
app.jinja_env.undefined = StrictUndefined
@app.template_filter()
def datetimefilter(value, format='%b %d'):
"""Convert a datetime to a different format so it can be accessible in Jinja."""
return value.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
@app.route('/')
def index():
"""Homepage."""
# We want user profile link to show if user is logged in and clicks on homepage
# Check if logged in and get the value or else return None
# If there is a value, query to get user information so that user.user_id can be accessed in jinja
# Else, pass None value through so that if statement in jinja not executed
user_email = session.get("logged_in_user_email", None)
if user_email is not None:
user = User.query.filter(User.email == user_email).one()
return render_template("homepage.html", user=user)
else:
return render_template("homepage.html", user=None)
@app.route("/users")
def user_list():
"""Show list of users."""
users = User.query.all()
return render_template("user_list.html", users=users)
# This takes to each user's profile from user list
@app.route("/users/<int:user_id>")
def user_profile(user_id):
"""Show user information"""
# Query by user id to return that record in database about user info
user = User.query.filter(User.user_id == user_id).one()
# import pdb; pdb.set_trace()
# Query to get all movies and scores rated by this user
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
user_movies = db.session.query(Rating.user_id,
Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.user_id == user_id).order_by(Movie.title).all()
# Passed user info into jinja and called on its attributes
# Passed user_movies into jinja and called on its attributes to get the info
return render_template("user_profile.html", user=user, user_movies = user_movies)
# # THIS WORKS, but we want to use /user/<int:user_id>, which we figured out above!!
# @app.route("/user-profile")
# def user_profile():
# """Show user information"""
# # import pdb; pdb.set_trace()
# # Get user email to query in User database and get all info about the user
# email = session["logged_in_user_email"]
# user = User.query.filter(User.email == email).one()
# # # Test code to see attributes of user object
# # user_id = user.user_id
# # age = user.age
# # zipcode = user.zipcode
# return render_template("user_profile.html", user=user)
@app.route("/signup-login", methods=["GET"])
def show_forms():
"""Show signup and login forms."""
return render_template("signup_login.html")
@app.route("/signup", methods=["POST"])
def signup():
"""Check if user exists in database, otherwise add user to database."""
# Get values from signup form
signup_email = request.form.get("signup_email")
signup_password = request.form.get("signup_password")
# If user exists, ask them to log in
# Otherwise, add user into database and log them in, redirecting to homepage
if db.session.query(User).filter(User.email == signup_email).first():
flash("You already have an account please use login!", "danger")
return redirect("/signup-login")
else:
new_user = User(email=signup_email, password=signup_password, age=None, zipcode=None)
db.session.add(new_user)
db.session.commit()
session["logged_in_user_email"] = signup_email
session["logged_in_user"] = new_user.user_id
flash("Your account has been created! You now are logged in!", "success")
return redirect("/")
@app.route("/login", methods=["POST"])
def login():
"""Check if user's email matches password, otherwise ask user to try again."""
# Get values from login form
login_email = request.form.get("login_email")
login_password = request.form.get("login_password")
# If user's email and password matches, log them in, redirecting them to homepage
# Otherwise, ask them to log in with the correct password
if db.session.query(User).filter(User.email == login_email,
User.password == login_password).first():
flash("Login SUCCESS.", "success")
# Query to get user's user id, in order to redirect user to their user profile
user = User.query.filter(User.email == login_email).one()
session["logged_in_user_email"] = login_email
session["logged_in_user"] = user.user_id
# Pass a variable through a string via string formatting
# so we can pass user_id into the redirected route, which is a string!!
return redirect("/users/%s" % user.user_id)
# return redirect("/")
else:
flash("Incorrect password. Please try again!", "danger")
return redirect("/signup-login")
@app.route("/logout")
def process_logout():
"""Log user out."""
del session["logged_in_user_email"]
del session["logged_in_user"]
flash("Logged out.", "success")
return redirect("/")
@app.route("/movies")
def movie_list():
"""Show list of movies."""
# sort movie titles alphbetically
movies = Movie.query.order_by(Movie.title).all()
return render_template("movie_list.html", movies=movies)
@app.route("/movies/<int:movie_id>", methods=['GET'])
def movie_profile(movie_id):
"""Show movie information.
If a user is logged in, let them add/edit a rating.
"""
if not session.get('logged_in_user_email'):
flash("Please login or signup to see the movie details and rate the movie!", "danger")
return redirect("/signup-login")
else:
# import pdb; pdb.set_trace();
# Query by movie id to return that record in database about movie info
# movie = Movie.query.filter(Movie.movie_id == movie_id).one()
movie = Movie.query.get(movie_id)
user = User.query.filter(User.email == session.get("logged_in_user_email")).one()
user_id = user.user_id
if user_id:
user_rating = Rating.query.filter_by(movie_id=movie_id, user_id=user_id).first()
else:
user_rating = None
# Prediction code: only predict if the user hasn't rated it
prediction = None
if (not user_rating) and user_id:
user = User.query.get(user_id)
if user:
prediction = user.predict_rating(movie)
# Either use the prediction or their real rating
if prediction:
# User hasn't scored; use our prediction if we made one
effective_rating = prediction
elif user_rating:
# User has already scored for real; use that
effective_rating = user_rating.score
else:
# User hasn't scored and we couldn't get a prediction
effective_rating = None
# Get the wizard's rating, either by predicting or using real rating
wizard = User.query.filter_by(email="wizard@gmail.com").one()
wizard_rating = Rating.query.filter_by(user_id=wizard.user_id, movie_id=movie.movie_id).first()
if wizard_rating is None:
|
else:
wizard_rating = wizard_rating.score
if wizard_rating and effective_rating:
difference = abs(wizard_rating - effective_rating)
else:
# We couldn't get a wizard rating, so we'll skip difference
difference = None
# Depending on how different we are from the Wizard, choose a message
BERATEMENT_MESSAGES = [
"I suppose you don't have such bad taste after all.",
"I regret every decision that I've ever made that has brought me to listen to your opinion.",
"Words fail me, as your taste in movies has clearly failed you.",
"That movie is great. For a clown to watch. Idiot.",
"Words cannot express the awfulness of your taste."
]
if difference is not None:
beratement = BERATEMENT_MESSAGES[int(difference)]
else:
beratement = None
# Tallies score of each rating (how many people rated this score per rating)
# Returns list of tuples for count_score
unordered_ratings = db.session.query(Rating.score, func.count(Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating.score)
ordered_movies = unordered_ratings.order_by(Rating.score)
count_score = ordered_movies.all()
# Get average score, which returns a tuple-like object, so need to access index 0 to return the number and pass through jinja
avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating.movie_id == movie_id).one()
# Query to get all ratings for a specific movie
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
ratings = db.session.query(Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.movie_id == movie_id).all()
# # Pass user info into jinja and called on its attributes
# # Pass count_score, avg_rating, and ratings into jinja
# return render_template("movie_profile.html", movie=movie, count_score=count_score, avg_rating=avg_rating[0], ratings=ratings)
return render_template(
"movie_profile.html",
movie=movie,
user_rating=user_rating,
avg_rating=avg_rating[0],
count_score=count_score,
prediction=prediction,
ratings=ratings,
beratement=beratement)
@app.route("/movies/<int:movie_id>/rate-movie")
def rate_movie(movie_id):
"""Get user rating score for movie"""
user_rating = request.args.get("user_rating")
# get user id from log in email address
user_email = session["logged_in_user_email"]
user = User.query.filter(User.email == user_email).one()
user_id = user.user_id
# Check if user rating exists in database
# If user has rated this movie before, update value
# Else, add user rating to database by movie id and user id
if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).all():
# When updating a value, we need to use the key-value pair in update()
db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update({"score": user_rating})
# db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update(Rating.score == user_rating)
db.session.commit()
flash("You have rated this movie before! It has now been updated to %s." % (user_rating), "warning")
return redirect("/users/%s" % user_id)
else:
db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=user_rating))
db.session.commit()
flash("You have rated this movie a %s." % (user_rating), "info")
return redirect("/users/%s" % user_id)
# Get user rating routed correctly, as this was just test code
# Fix label format for movie profile page
return render_template("rate_movie.html", user_rating=user_rating)
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the point
# that we invoke the DebugToolbarExtension
app.debug = True
connect_to_db(app)
# Use the DebugToolbar
# DebugToolbarExtension(app)
app.run()
| wizard_rating = wizard.predict_rating(movie) | conditional_block |
server.py | """Movie Ratings."""
from jinja2 import StrictUndefined
from flask import Flask, render_template, redirect, request, flash, session
from flask_debugtoolbar import DebugToolbarExtension
from sqlalchemy import func, update
from model import User, Rating, Movie, connect_to_db, db
app = Flask(__name__)
# Required to use Flask sessions and the debug toolbar
app.secret_key = "ABC"
# Normally, if you use an undefined variable in Jinja2, it fails silently.
# This is horrible. Fix this so that, instead, it raises an error.
app.jinja_env.undefined = StrictUndefined
@app.template_filter()
def datetimefilter(value, format='%b %d'):
"""Convert a datetime to a different format so it can be accessible in Jinja."""
return value.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
@app.route('/')
def index():
"""Homepage."""
# We want user profile link to show if user is logged in and clicks on homepage
# Check if logged in and get the value or else return None
# If there is a value, query to get user information so that user.user_id can be accessed in jinja
# Else, pass None value through so that if statement in jinja not executed
user_email = session.get("logged_in_user_email", None)
if user_email is not None:
user = User.query.filter(User.email == user_email).one()
return render_template("homepage.html", user=user)
else:
return render_template("homepage.html", user=None)
@app.route("/users")
def user_list():
"""Show list of users."""
users = User.query.all()
return render_template("user_list.html", users=users)
# This takes to each user's profile from user list
@app.route("/users/<int:user_id>")
def user_profile(user_id):
"""Show user information"""
# Query by user id to return that record in database about user info
user = User.query.filter(User.user_id == user_id).one()
# import pdb; pdb.set_trace()
# Query to get all movies and scores rated by this user
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
user_movies = db.session.query(Rating.user_id,
Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.user_id == user_id).order_by(Movie.title).all()
# Passed user info into jinja and called on its attributes
# Passed user_movies into jinja and called on its attributes to get the info
return render_template("user_profile.html", user=user, user_movies = user_movies)
# # THIS WORKS, but we want to use /user/<int:user_id>, which we figured out above!!
# @app.route("/user-profile")
# def user_profile():
# """Show user information"""
# # import pdb; pdb.set_trace()
# # Get user email to query in User database and get all info about the user
# email = session["logged_in_user_email"]
# user = User.query.filter(User.email == email).one()
# # # Test code to see attributes of user object
# # user_id = user.user_id
# # age = user.age
# # zipcode = user.zipcode
# return render_template("user_profile.html", user=user)
@app.route("/signup-login", methods=["GET"])
def show_forms():
"""Show signup and login forms."""
return render_template("signup_login.html")
@app.route("/signup", methods=["POST"])
def signup():
"""Check if user exists in database, otherwise add user to database."""
# Get values from signup form
signup_email = request.form.get("signup_email")
signup_password = request.form.get("signup_password")
# If user exists, ask them to log in
# Otherwise, add user into database and log them in, redirecting to homepage
if db.session.query(User).filter(User.email == signup_email).first():
flash("You already have an account please use login!", "danger")
return redirect("/signup-login")
else:
new_user = User(email=signup_email, password=signup_password, age=None, zipcode=None)
db.session.add(new_user)
db.session.commit()
session["logged_in_user_email"] = signup_email
session["logged_in_user"] = new_user.user_id
flash("Your account has been created! You now are logged in!", "success")
return redirect("/")
@app.route("/login", methods=["POST"])
def login():
"""Check if user's email matches password, otherwise ask user to try again."""
# Get values from login form
login_email = request.form.get("login_email")
login_password = request.form.get("login_password")
# If user's email and password matches, log them in, redirecting them to homepage
# Otherwise, ask them to log in with the correct password
if db.session.query(User).filter(User.email == login_email,
User.password == login_password).first():
flash("Login SUCCESS.", "success")
# Query to get user's user id, in order to redirect user to their user profile
user = User.query.filter(User.email == login_email).one()
session["logged_in_user_email"] = login_email
session["logged_in_user"] = user.user_id
# Pass a variable through a string via string formatting
# so we can pass user_id into the redirected route, which is a string!!
return redirect("/users/%s" % user.user_id)
# return redirect("/")
else:
flash("Incorrect password. Please try again!", "danger")
return redirect("/signup-login")
@app.route("/logout")
def process_logout():
"""Log user out."""
del session["logged_in_user_email"]
del session["logged_in_user"]
flash("Logged out.", "success")
return redirect("/")
@app.route("/movies")
def movie_list():
"""Show list of movies."""
# sort movie titles alphbetically
movies = Movie.query.order_by(Movie.title).all()
return render_template("movie_list.html", movies=movies)
@app.route("/movies/<int:movie_id>", methods=['GET'])
def movie_profile(movie_id):
"""Show movie information.
If a user is logged in, let them add/edit a rating.
"""
if not session.get('logged_in_user_email'):
flash("Please login or signup to see the movie details and rate the movie!", "danger")
return redirect("/signup-login")
else:
# import pdb; pdb.set_trace();
# Query by movie id to return that record in database about movie info
# movie = Movie.query.filter(Movie.movie_id == movie_id).one()
movie = Movie.query.get(movie_id)
user = User.query.filter(User.email == session.get("logged_in_user_email")).one()
user_id = user.user_id
if user_id:
user_rating = Rating.query.filter_by(movie_id=movie_id, user_id=user_id).first()
else:
user_rating = None
# Prediction code: only predict if the user hasn't rated it
prediction = None
if (not user_rating) and user_id:
user = User.query.get(user_id)
if user:
prediction = user.predict_rating(movie)
# Either use the prediction or their real rating
if prediction:
# User hasn't scored; use our prediction if we made one
effective_rating = prediction
elif user_rating:
# User has already scored for real; use that
effective_rating = user_rating.score
else:
# User hasn't scored and we couldn't get a prediction
effective_rating = None
# Get the wizard's rating, either by predicting or using real rating
wizard = User.query.filter_by(email="wizard@gmail.com").one()
wizard_rating = Rating.query.filter_by(user_id=wizard.user_id, movie_id=movie.movie_id).first()
if wizard_rating is None:
wizard_rating = wizard.predict_rating(movie)
else:
wizard_rating = wizard_rating.score
if wizard_rating and effective_rating:
difference = abs(wizard_rating - effective_rating)
else:
# We couldn't get a wizard rating, so we'll skip difference
difference = None
# Depending on how different we are from the Wizard, choose a message
BERATEMENT_MESSAGES = [
"I suppose you don't have such bad taste after all.",
"I regret every decision that I've ever made that has brought me to listen to your opinion.",
"Words fail me, as your taste in movies has clearly failed you.",
"That movie is great. For a clown to watch. Idiot.",
"Words cannot express the awfulness of your taste."
]
if difference is not None:
beratement = BERATEMENT_MESSAGES[int(difference)]
else:
beratement = None
# Tallies score of each rating (how many people rated this score per rating)
# Returns list of tuples for count_score
unordered_ratings = db.session.query(Rating.score, func.count(Rating.score)).filter(Rating.movie_id == movie_id).group_by(Rating.score)
ordered_movies = unordered_ratings.order_by(Rating.score)
count_score = ordered_movies.all()
# Get average score, which returns a tuple-like object, so need to access index 0 to return the number and pass through jinja
avg_rating = db.session.query(func.avg(Rating.score)).filter(Rating.movie_id == movie_id).one()
# Query to get all ratings for a specific movie
# Needed to join Rating and Movie tables and filter by user id
# Sort movie titles alphabetically
ratings = db.session.query(Rating.movie_id,
Rating.score,
Movie.title).join(Movie).filter(Rating.movie_id == movie_id).all()
# # Pass user info into jinja and called on its attributes
# # Pass count_score, avg_rating, and ratings into jinja
# return render_template("movie_profile.html", movie=movie, count_score=count_score, avg_rating=avg_rating[0], ratings=ratings)
return render_template(
"movie_profile.html",
movie=movie,
user_rating=user_rating,
avg_rating=avg_rating[0],
count_score=count_score,
prediction=prediction,
ratings=ratings,
beratement=beratement)
@app.route("/movies/<int:movie_id>/rate-movie")
def rate_movie(movie_id):
"""Get user rating score for movie"""
user_rating = request.args.get("user_rating") |
user_id = user.user_id
# Check if user rating exists in database
# If user has rated this movie before, update value
# Else, add user rating to database by movie id and user id
if db.session.query(Rating.score).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).all():
# When updating a value, we need to use the key-value pair in update()
db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update({"score": user_rating})
# db.session.query(Rating).filter(Rating.movie_id == movie_id, Rating.user_id == user_id).update(Rating.score == user_rating)
db.session.commit()
flash("You have rated this movie before! It has now been updated to %s." % (user_rating), "warning")
return redirect("/users/%s" % user_id)
else:
db.session.add(Rating(movie_id=movie_id, user_id=user_id, score=user_rating))
db.session.commit()
flash("You have rated this movie a %s." % (user_rating), "info")
return redirect("/users/%s" % user_id)
# Get user rating routed correctly, as this was just test code
# Fix label format for movie profile page
return render_template("rate_movie.html", user_rating=user_rating)
if __name__ == "__main__":
# We have to set debug=True here, since it has to be True at the point
# that we invoke the DebugToolbarExtension
app.debug = True
connect_to_db(app)
# Use the DebugToolbar
# DebugToolbarExtension(app)
app.run() | # get user id from log in email address
user_email = session["logged_in_user_email"]
user = User.query.filter(User.email == user_email).one() | random_line_split |
broken_telephone.go | /*
* broken_telephone.go: The children's game of "broken telephone"
* implemented with goroutines passing messages read from stdin.
* Each goroutine mutates the message by replacing one word before
* passing it on.
*
* For Introduction to Go, Spring 2010
* Kimmo Kulovesi <kkuloves@cs.helsinki.fi>
*/
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"rand"
"sort"
"strings"
"unicode"
"utf8"
)
var (
numberOfLinks = flag.Int("l", 20,
"Number of unreliable links to pass the message through")
wordList = flag.String("w", "/usr/share/dict/words",
"Filename of the word list to use")
printIntermediateStates = flag.Bool("i", false,
"Print intermediate states of the message")
debugMetaphone = flag.Bool("d", false,
"Print double metaphone algorithm debug output (very verbose!)")
allowNamesInWordList = flag.Bool("n", false,
"Allow names to be read from the word list")
dictionary metaphoneDict
)
// Minimum length of a word to consider it for substitution.
// TODO: Implement stopwords instead of using this limit.
const minWordLength = 4
// Default dicitionary capacity.
const dictCapacity = 100000
// The distance (in either direction) at which to "fudge" the word
// substitutes - increasing this gives more random matches.
const fudgeDistance = 4
// The maximum length of the phonetic representations to use, 0 for unlimited.
// The traditional metaphone algorithm uses 4.
const doubleMetaphoneMaxLen = 4
// mutate copies m but randomly substitutes one word.
func mutate(m message) message {
if len(m) == 0 {
return m
}
n := make(message, len(m))
copy(n, m)
for tries := 0; tries < 3; tries++ {
i := rand.Intn(len(m))
word := m[i]
if len(word.original) >= minWordLength {
n[i] = dictionary.randomSubstituteFor(word)
return n
}
}
return n
}
// telephone passes mutated messages between from and to.
func telephone(from, to chan message) {
for m := <-from; m != nil; m = <-from {
if *printIntermediateStates {
fmt.Printf("Heard: %s\n", m)
}
to <- mutate(m)
}
to <- nil
}
// main reads the dictionary, creates the telephone system and then passes
// messages read from stdin to it.
func main() {
flag.Parse()
if file, err := os.Open(*wordList, os.O_RDONLY, 0); err == nil {
fmt.Fprintf(os.Stderr, "Reading words from \"%s\"...\n", *wordList)
dictionary = readWords(file)
file.Close()
} else {
fmt.Fprintf(os.Stderr, "%s: %s\n", *wordList, err)
}
if dictionary.Len() < 1 {
fmt.Fprintf(os.Stderr, "No words read, aborting.")
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "%d words in dictionary\n", dictionary.Len())
send := make(chan message)
receive := send
for i := *numberOfLinks; i > 0; i-- {
c := make(chan message)
go telephone(receive, c)
receive = c
}
input := bufio.NewReader(os.Stdin)
for {
if line, err := input.ReadString('\n'); err == nil {
words := strings.Fields(line[0 : len(line)-1])
msg := make(message, len(words))
for i, word := range words {
msg[i] = doubleMetaphone(word)
}
send <- msg
fmt.Println(<-receive)
} else {
if err != os.EOF {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
}
break
}
}
send <- nil
}
type metaphoneWord struct {
original, literal, metaphone, secondary string
}
type message []*metaphoneWord
func (m message) String() (s string) {
for i, word := range m {
s += word.original
if i != len(m)-1 {
s += " "
}
}
return
}
type metaphoneDict []metaphoneWord
func newMetaphoneDict() metaphoneDict { return make(metaphoneDict, 0, dictCapacity) }
func (d metaphoneDict) Len() int { return len(d) }
func (d metaphoneDict) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d metaphoneDict) Less(i, j int) bool { return d[i].metaphone < d[j].metaphone }
// phoneticLocation returns the index where metaphone is or would be sorted.
func (d metaphoneDict) phoneticLocation(metaphone string) (i int) {
left, right := 0, len(d)
for left < right {
i = left + ((right - left) / 2)
if d[i].metaphone < metaphone {
left = i + 1
} else {
right = i
}
}
return
}
// matches returns a slice containing all exact matches in d for metaphone.
func (d metaphoneDict) matches(metaphone string) metaphoneDict {
var l, r int
i := d.phoneticLocation(metaphone)
for r = i; r < len(d) && d[r].metaphone == metaphone; r++ {
}
for l = i; l >= 0 && d[l].metaphone == metaphone; l-- {
}
l++
if r-l < fudgeDistance*2 {
l -= fudgeDistance
r += fudgeDistance
}
if l < 0 {
l = 0
}
if r > len(d) {
r = len(d)
}
return d[l:r]
}
// randomNonEqual returns a randomly selected word in d which is not
// literally equal to w. Returns -1 if no such word exists.
// (The assumption is that the same word literal appears at most twice.)
func (d metaphoneDict) randomNonEqual(w string) *metaphoneWord |
// randomSubstituteFor returns a semi-random but usually phonetically
// close substitute for w.
func (d metaphoneDict) randomSubstituteFor(w *metaphoneWord) *metaphoneWord {
mp := w.metaphone
if rand.Intn(2) == 0 {
mp = w.secondary
}
match := d.matches(mp).randomNonEqual(w.literal)
if match != nil {
return match
}
return w
}
// Push adds mp to the end of d, reallocating space if necessary.
func (d *metaphoneDict) Push(mp metaphoneWord) {
dict := *d
if len(dict) < cap(dict) {
dict = dict[0 : len(dict)+1]
} else {
dict = make(metaphoneDict, len(dict)+1, cap(dict)+dictCapacity)
copy(dict, *d)
}
dict[len(dict)-1] = mp
*d = dict
}
// readWords creates a metaphone dictionary from input of one word per line.
// The dictionary is sorted by phonetic representation.
func readWords(input io.Reader) metaphoneDict {
dict := newMetaphoneDict()
rd := bufio.NewReader(input)
for {
if word, err := rd.ReadString('\n'); err == nil {
word = strings.TrimSpace(word)
if !*allowNamesInWordList {
rune, _ := utf8.DecodeRuneInString(word)
if unicode.IsUpper(rune) {
continue
}
}
if strings.Index(word, " ") != -1 || len(word) < minWordLength {
continue
}
mp := *doubleMetaphone(word)
dict.Push(mp)
if mp.metaphone != mp.secondary {
// Secondary phonetic representation
dict.Push(metaphoneWord{word, mp.literal,
mp.secondary, mp.metaphone})
}
} else {
if err != os.EOF {
fmt.Printf("Error: %s\n", err)
}
break
}
}
sort.Sort(dict)
return dict
}
// doubleMetaphone is like doubleMetaphoneLimited but uses the default maxLen.
func doubleMetaphone(word string) (result *metaphoneWord) {
if doubleMetaphoneMaxLen > 0 {
return doubleMetaphoneLimited(word, doubleMetaphoneMaxLen)
}
return doubleMetaphoneLimited(word, len(word)*2)
}
// doubleMetaphone returns two phonetic representations of an English word.
// The secondary representation may equal primary. The implementation is
// currently not fully complete with all special cases. The phonetic
// representations are limited to maxLen length.
func doubleMetaphoneLimited(word string, maxLen int) (result *metaphoneWord) {
result = new(metaphoneWord)
result.original = word
word = strings.ToUpper(word)
// TODO: Strip punctuation
result.literal = word
prev, skip, last, slavoGermanic := 0, 0, len(word)-1, false
testSlavoGermanic: for pos, c := range word {
switch c {
case 'C':
if pos == last || word[pos+1] != 'Z' {
break
}
fallthrough
case 'W', 'K':
slavoGermanic = true
break testSlavoGermanic
}
}
word += " " // Allow indexing beyond the end
for pos, c := range word {
if c == ' ' {
break
}
if skip > 0 {
prev = 0
skip--
continue
}
mp, ms := "", "-"
switch c {
case 'A', 'E', 'I', 'O', 'U', 'Y', 'Ü', 'Ä', 'Ö', 'Å', 'É', 'È', 'Ï':
if pos == 0 {
// Initial vowel
mp = "A"
} else if pos == 1 && prev == 'W' {
// W + vowel at the start of the word
mp, ms = "A", "F"
}
case 'B':
if prev == 'M' && pos > 1 && word[pos-2] == 'U' &&
(pos == last || (word[pos+1] == 'E' &&
word[pos+2] == 'R')) {
// e.g. dumb, thumb
break
}
if prev != 'B' {
mp = "P"
}
case 'C':
if prev == 'X' {
break
}
if pos == 0 && strings.HasPrefix(word, "CAESAR") {
mp = "S"
skip = 1
break
}
next := word[pos+1]
if next == 'H' {
skip = 1
n2, n3 := word[pos+2], word[pos+3]
if pos > 0 {
if n2 == 'A' && n3 == 'E' {
// michael
mp, ms = "K", "X"
break
}
if (pos == 1 && (prev == 'M' || prev == 'S')) ||
n2 == 'T' || n2 == 'S' {
// Mc, Sch, -cht, -chs
mp = "K"
break
}
if (prev == 'A' || prev == 'O' || prev == 'U' ||
prev == 'E') && (n2 == 'L' || n2 == 'R' || n2 == 'N' ||
n2 == 'M' || n2 == 'B' || n2 == 'B' ||
n2 == 'H' || n2 == 'F' || n2 == 'V' ||
n2 == 'W') {
// e.g. wachtler, wechsler, but not tichner
mp = "K"
break
}
if pos > 1 {
p2 := word[pos-2]
if prev == 'R' &&
((p2 == 'O' && n2 == 'E' && n3 == 'S') ||
(p2 == 'O' && n2 == 'I' && n3 == 'D') ||
(p2 == 'A' && n2 == 'I' && n3 == 'T')) {
// orchestra, orchid, architect (but not arch)
mp = "K"
break
}
}
} else {
// pos == 0
n4, n5 := word[pos+4], word[pos+5]
if (n2 == 'A' && n3 == 'R' && ((n4 == 'A' && n5 == 'C') ||
(n4 == 'I' && n5 == 'S'))) ||
(n2 == 'E' && n3 == 'M') || (n2 == 'Y' && n3 == 'M') ||
(n2 == 'I' && n3 == 'A') ||
(n2 == 'O' && n3 == 'R' && (n4 != 'O' || n5 != 'E')) {
// e.g. character, charisma, chorus, chemistry
// but not "chore"
mp = "K"
} else {
switch n2 {
case 'L', 'R', 'N', 'M', 'B', 'H', 'F', 'V', 'W', ' ':
mp = "K"
default:
mp = "X"
}
}
break
}
mp, ms = "X", "K"
break
} else if next == 'Z' {
if pos < 2 || word[pos-1] != 'I' || word[pos-2] == 'W' {
// cz, not wicz
mp, ms = "S", "X"
skip = 1
break
}
} else if next == 'C' {
n2 := word[pos+2]
if n2 == 'I' && word[pos+3] == 'A' {
// -ccia, e.g. focaccia
mp = "X"
skip = 2
break
}
if pos != 2 || prev != 'M' {
// -cc, but not e.g. McClellan
if n2 == 'I' || n2 == 'E' ||
(n2 == 'H' && word[pos+3] != 'U') {
// e.g. bellocchio, but not bacchus
skip = 3
if pos == 1 && prev == 'A' {
// e.g. accident
mp = "KS"
break
} else if prev == 'U' && n2 == 'E' &&
(word[pos+4] == 'S' || word[pos+4] == 'E') {
// succeed, success
mp = "KS"
break
}
mp = "X"
break
}
}
if n2 != 'I' && n2 != 'E' {
skip = 1
}
} else if next == 'K' || next == 'Q' {
skip = 1
} else if next == 'I' {
mp = "S"
skip = 1
n2 := word[pos+2]
if n2 == 'O' || n2 == 'E' || n2 == 'A' {
// cio, cie, cia
ms = "X"
}
break
} else if next == 'E' || next == 'Y' {
skip = 1
mp = "S"
break
}
mp = "K"
case 'D':
if prev != 'D' && prev != 'T' {
if word[pos+1] == 'G' {
skip = 1
switch word[pos+2] {
case 'E', 'I', 'Y':
// e.g. "edge"
mp = "J"
default:
// e.g. "edgar"
mp = "K"
}
break
}
mp = "T"
}
case 'F', 'V':
if prev != c {
mp = "F"
}
case 'G':
next := word[pos+1]
if next == 'H' {
skip = 1
if !isVowel(prev) {
mp = "K"
break
}
if pos == 0 {
if word[pos+2] == 'I' {
mp = "J"
} else {
mp = "K"
}
break
}
if pos > 1 {
if word[pos-2] == 'B' || word[pos-2] == 'H' ||
word[pos-2] == 'D' {
// e.g. hugh
break
}
if pos > 2 {
p3 := word[pos-3]
if p3 == 'B' || p3 == 'H' || p3 == 'D' {
// e.g. bough
break
}
if pos > 3 && (word[pos-4] == 'B' || word[pos-4] == 'H') {
// e.g. brought
break
}
if prev == 'U' && (p3 == 'C' || p3 == 'G' ||
p3 == 'L' || p3 == 'R' ||
p3 == 'T') {
// e.g. laugh, cough, rough, tough
mp = "F"
break
}
}
}
if prev != 'I' {
mp = "K"
}
break
}
if next == 'N' {
skip = 1
if !slavoGermanic {
if pos == 1 && isVowel(prev) {
mp, ms = "KN", "N"
break
} else if word[pos+2] != 'E' || word[pos+3] != 'Y' {
// not e.g. cagney
mp, ms = "N", "KN"
break
}
}
mp = "KN"
break
}
if next == 'L' {
if word[pos+2] == 'I' && !slavoGermanic {
// e.g. tagliaro
mp, ms = "KL", "L"
skip = 1
}
} else if next == 'E' || next == 'I' || next == 'Y' || next == 'G' {
skip = 1
n2 := word[pos+2]
if next != 'G' {
if pos == 0 {
if (next == 'E' && (n2 == 'S' || n2 == 'P' ||
n2 == 'B' || n2 == 'L' ||
n2 == 'Y' || n2 == 'I' ||
n2 == 'R')) || next == 'Y' ||
(next == 'I' && (n2 == 'L' || n2 == 'N')) {
skip = 1
mp, ms = "K", "J"
break
}
}
if !(next == 'I' || prev == 'I' || prev == 'E' ||
(next == 'Y' && (prev == 'R' || prev == 'O')) ||
(next == 'E' && pos > 0 && word[pos-1] != 'R')) {
// -ger-, -gy-
mp = "K"
if !(pos == 3 && next == 'E' &&
strings.HasPrefix(word, "DANGER") ||
strings.HasPrefix(word, "RANGER") ||
strings.HasPrefix(word, "MANGER")) {
ms = "J"
}
break
}
} else if !(n2 == 'I' && (prev == 'A' || prev == 'O')) {
// not -aggi -oggi
mp = "K"
break
}
if !strings.HasPrefix(word, "SCH") ||
(next == 'E' && n2 == 'T') {
// obvious Germanic
mp = "K"
} else if next == 'I' && pos == (last-3) &&
strings.HasSuffix(word, "ER") {
// -gier suffix
mp = "J"
} else {
mp, ms = "J", "K"
}
break
}
mp = "K"
case 'H':
if pos == 0 || isVowel(prev) {
next, _ := utf8.DecodeRuneInString(word[pos+1 : len(word)])
if isVowel(next) {
// H between two vowels, or at the beginning followed by a vowel
mp = "H"
skip = 1
}
}
case 'J':
if prev == 'S' || prev == 'K' || prev == 'L' || prev == 'J' {
break
}
next := word[pos+1]
if pos == 0 {
if next == 'O' && word[pos+2] == 'S' && word[pos+3] == 'E' {
if word[pos+4] == ' ' {
// Jose
mp = "H"
} else {
mp, ms = "J", "H"
}
break
}
mp, ms = "J", "A"
} else if !slavoGermanic && isVowel(prev) &&
(next == 'A' || next == 'O') {
mp, ms = "J", "H"
} else if pos == last {
mp, ms = "J", ""
} else {
switch next {
case 'L', 'T', 'K', 'S', 'N', 'M', 'B', 'Z':
// NOP
default:
mp = "J"
}
}
case 'Q', 'K':
if prev != c && prev != 'C' {
mp = "K"
}
case 'L':
if word[pos+1] == 'L' {
skip = 1
if pos > 0 && ((word[pos+3] == ' ' &&
(((word[pos+2] == 'O' || word[pos+2] == 'A') &&
word[pos-1] == 'I') || (word[pos+2] == 'E' &&
word[pos-1] == 'A'))) ||
((word[last] == 'S' && (word[last-1] == 'A' ||
word[last-1] == 'O')) ||
(word[last] == 'A' || word[last] == 'O') &&
(word[pos-1] == 'A' && word[pos+2] == 'E'))) {
// Spanish, -illo, -illa, -alle
ms = ""
}
}
mp = "L"
case 'M':
if prev != 'M' {
mp = "M"
}
case 'N':
if pos == 1 && (prev == 'K' || prev == 'G' || prev == 'P') {
// Skip GN, KN, PN at the start of the word
result.metaphone, result.secondary = "", ""
break
}
fallthrough
case 'Ñ':
if prev != c {
mp = "N"
}
case 'P':
next := word[pos+1]
if next == 'H' {
mp = "F"
skip = 1
break
}
if next == 'S' && pos == 0 {
// Ignore PS at the start of the word
skip = 1
break
}
if next == 'P' || next == 'B' {
skip = 1
}
mp = "P"
// case 'Q': is grouped with K
case 'R':
if prev == 'R' {
break
}
if pos == last && !slavoGermanic && prev == 'E' && pos > 1 &&
word[pos-2] == 'I' && (pos < 4 || word[pos-4] != 'M' ||
!(word[pos-3] == 'E' || word[pos-3] == 'A')) {
// French, e.g. rogier, but not e.g. hochmeier
mp, ms = "", "R"
} else {
mp = "R"
}
case 'S', 'ß', 'Š':
if prev == 'S' {
break
}
next := word[pos+1]
if (prev == 'I' || prev == 'Y') && next == 'L' {
// isl, ysl, e.g. island, isle, carlysle
break
}
if pos == 0 {
if next == 'M' || next == 'N' || next == 'L' || next == 'W' {
mp, ms = "S", "X"
break
}
if strings.HasPrefix(word, "SUGAR") {
mp, ms = "X", "S"
break
}
}
if next == 'H' {
if word[pos+2] == 'O' {
if (word[pos+3] == 'E' && word[pos+4] == 'K') ||
(word[pos+3] == 'L' && (word[pos+4] == 'M' ||
word[pos+4] == 'Z')) {
// holm, holz, hoek
mp = "S"
break
}
} else if word[pos+2] == 'E' && word[pos+3] == 'I' &&
word[pos+4] == 'M' {
// heim
mp = "S"
break
}
mp = "X"
skip = 1
} else if next == 'I' && (word[pos+2] == 'O' || word[pos+2] == 'A') {
// sio, sia
mp = "S"
if !slavoGermanic {
ms = "X"
}
skip = 2
} else if next == 'Z' {
mp, ms = "S", "X"
skip = 1
} else if next == 'C' {
skip = 2
if word[pos+2] == 'H' {
n3, n4 := word[pos+3], word[pos+4]
if (n3 == 'O' && n4 == 'O') || (n3 == 'U' && n4 == 'Y') ||
(n3 == 'E' && (n4 == 'D' || n4 == 'M')) {
// Dutch origin, e.g. "school", "schooner"
mp = "SK"
} else if n3 == 'E' && (n4 == 'R' || n4 == 'N') {
mp, ms = "X", "SK"
} else {
mp = "X"
if pos == 0 && !isVowel(int(word[3])) && word[3] != 'W' {
ms = "S"
}
}
} else if word[pos+2] == 'I' || word[pos+2] == 'E' ||
word[pos+2] == 'Y' {
mp = "S"
} else {
mp = "SK"
skip = 1
// TODO: Check correctness of skip
}
} else if pos == last && prev == 'I' {
if pos > 1 && (word[pos-2] == 'A' || word[pos-2] == 'O') {
// French, e.g. artois
ms = "S"
} else {
mp = "S"
}
} else {
mp = "S"
}
case 'T':
if prev == 'T' {
if word[pos+1] == 'H' {
// tth
mp, ms = "0", "T"
skip = 1
} else {
mp = "T"
}
break
}
if prev == 'D' {
break
}
next := word[pos+1]
if next == 'I' {
if word[pos+2] == 'A' ||
(word[pos+2] == 'O' && word[pos+3] == 'N') {
// tia, tion
mp = "X"
skip = 2
}
} else if next == 'C' && word[pos+2] == 'H' {
// tch
mp = "X"
skip = 2
} else if next == 'H' {
skip = 1
if word[pos+3] == 'M' {
if word[pos+2] == 'O' || word[pos+2] == 'A' {
mp = "T"
break
}
}
mp, ms = "0", "T"
} else if next != 'T' {
mp = "T"
}
// case 'V': is grouped with F
case 'W':
next := word[pos+1]
if next == 'R' {
if pos != 0 {
mp = "R"
}
skip = 1
break
}
if pos == 0 {
if next == 'H' {
mp = "A"
}
break
}
if (pos == last && isVowel(prev)) ||
strings.HasPrefix(word, "SCH") {
ms = "F"
break
}
n2, n3 := word[pos+2], word[pos+3]
if (prev == 'E' || prev == 'O') && next == 'S' && n2 == 'K' &&
(n3 == 'I' || n3 == 'Y') {
// -ewski, -ewsky, -owski, -owsky
ms = "F"
} else if next == 'I' && n3 == 'Z' && (n2 == 'C' || n2 == 'T') {
// -wicz, -witz
mp, ms = "TS", "FX"
skip = 3
}
case 'X':
if pos == 0 {
// Initial X pronounced like a Z, e.g. Xavier
mp = "S"
} else if prev != 'X' {
if pos == last && prev == 'U' && pos > 1 &&
(word[pos-2] == 'A' || word[pos-2] == 'O') {
// French, e.g. breaux
break
}
mp = "KS"
}
case 'Z':
if prev == 'S' || prev == 'Z' {
break
}
if word[pos+1] == 'H' {
// Chinese, e.g. Zhao
mp = "J"
skip = 1
break
}
if word[pos+1] == 'I' || word[pos+1] == 'O' || word[pos+1] == 'A' ||
(slavoGermanic && prev != 'T' && pos > 0) {
ms = "TS"
}
fallthrough
case 'Ç':
mp = "S"
default:
}
prev = c
result.metaphone += mp
if ms == "-" {
ms = mp
}
result.secondary += ms
if *debugMetaphone {
fmt.Fprintf(os.Stderr, "\t%c -> [%s] [%s]\n", c, mp, ms)
}
if len(result.metaphone) >= maxLen && len(result.secondary) >= maxLen {
break
}
}
if len(result.metaphone) > maxLen {
result.metaphone = result.metaphone[0:maxLen]
}
if len(result.secondary) > maxLen {
result.secondary = result.secondary[0:maxLen]
}
if result.secondary == result.metaphone {
result.secondary = result.metaphone
}
if *debugMetaphone {
fmt.Fprintf(os.Stderr, "%s: [%s] [%s]\n", result.literal, result.metaphone,
result.secondary)
}
return
}
func isVowel(c int) bool {
// TODO: Non-English support is rather limited
return c == 'A' || c == 'E' || c == 'I' || c == 'O' || c == 'U' || c == 'Y' ||
c == 'Ä' || c == 'Ö' || c == 'Ü' || c == 'Å' || c == 'É' || c == 'È' ||
c == 'Ï'
}
| {
if len(d) == 0 {
return nil
}
i := rand.Intn(len(d))
switch {
case d[i].literal != w:
case i > 0 && d[i-1].literal != w:
i--
case i < len(d)-1 && d[i+1].literal != w:
i++
case i == 0 && len(d) > 2 && d[i+2].literal != w:
i += 2
case i == len(d)-1 && len(d) > 2 && d[i-2].literal != w:
i -= 2
default:
return nil
}
return &d[i]
} | identifier_body |
broken_telephone.go | /*
* broken_telephone.go: The children's game of "broken telephone"
* implemented with goroutines passing messages read from stdin.
* Each goroutine mutates the message by replacing one word before
* passing it on.
*
* For Introduction to Go, Spring 2010
* Kimmo Kulovesi <kkuloves@cs.helsinki.fi>
*/
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"rand"
"sort"
"strings"
"unicode"
"utf8"
)
var (
numberOfLinks = flag.Int("l", 20,
"Number of unreliable links to pass the message through")
wordList = flag.String("w", "/usr/share/dict/words",
"Filename of the word list to use")
printIntermediateStates = flag.Bool("i", false,
"Print intermediate states of the message")
debugMetaphone = flag.Bool("d", false,
"Print double metaphone algorithm debug output (very verbose!)")
allowNamesInWordList = flag.Bool("n", false,
"Allow names to be read from the word list")
dictionary metaphoneDict
)
// Minimum length of a word to consider it for substitution.
// TODO: Implement stopwords instead of using this limit.
const minWordLength = 4
// Default dicitionary capacity.
const dictCapacity = 100000
// The distance (in either direction) at which to "fudge" the word
// substitutes - increasing this gives more random matches.
const fudgeDistance = 4
// The maximum length of the phonetic representations to use, 0 for unlimited.
// The traditional metaphone algorithm uses 4.
const doubleMetaphoneMaxLen = 4
// mutate copies m but randomly substitutes one word.
func mutate(m message) message {
if len(m) == 0 {
return m
}
n := make(message, len(m))
copy(n, m)
for tries := 0; tries < 3; tries++ {
i := rand.Intn(len(m))
word := m[i]
if len(word.original) >= minWordLength {
n[i] = dictionary.randomSubstituteFor(word)
return n
}
}
return n
}
// telephone passes mutated messages between from and to.
func telephone(from, to chan message) {
for m := <-from; m != nil; m = <-from {
if *printIntermediateStates {
fmt.Printf("Heard: %s\n", m)
}
to <- mutate(m)
}
to <- nil
}
// main reads the dictionary, creates the telephone system and then passes
// messages read from stdin to it.
func main() {
flag.Parse()
if file, err := os.Open(*wordList, os.O_RDONLY, 0); err == nil {
fmt.Fprintf(os.Stderr, "Reading words from \"%s\"...\n", *wordList)
dictionary = readWords(file)
file.Close()
} else {
fmt.Fprintf(os.Stderr, "%s: %s\n", *wordList, err)
}
if dictionary.Len() < 1 {
fmt.Fprintf(os.Stderr, "No words read, aborting.")
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "%d words in dictionary\n", dictionary.Len())
send := make(chan message)
receive := send
for i := *numberOfLinks; i > 0; i-- {
c := make(chan message)
go telephone(receive, c)
receive = c
}
input := bufio.NewReader(os.Stdin)
for {
if line, err := input.ReadString('\n'); err == nil {
words := strings.Fields(line[0 : len(line)-1])
msg := make(message, len(words))
for i, word := range words {
msg[i] = doubleMetaphone(word)
}
send <- msg
fmt.Println(<-receive)
} else {
if err != os.EOF {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
}
break
}
}
send <- nil
}
type metaphoneWord struct {
original, literal, metaphone, secondary string
}
type message []*metaphoneWord
func (m message) String() (s string) {
for i, word := range m {
s += word.original
if i != len(m)-1 {
s += " "
}
}
return
}
type metaphoneDict []metaphoneWord
func newMetaphoneDict() metaphoneDict { return make(metaphoneDict, 0, dictCapacity) }
func (d metaphoneDict) Len() int { return len(d) }
func (d metaphoneDict) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d metaphoneDict) | (i, j int) bool { return d[i].metaphone < d[j].metaphone }
// phoneticLocation returns the index where metaphone is or would be sorted.
func (d metaphoneDict) phoneticLocation(metaphone string) (i int) {
left, right := 0, len(d)
for left < right {
i = left + ((right - left) / 2)
if d[i].metaphone < metaphone {
left = i + 1
} else {
right = i
}
}
return
}
// matches returns a slice containing all exact matches in d for metaphone.
func (d metaphoneDict) matches(metaphone string) metaphoneDict {
var l, r int
i := d.phoneticLocation(metaphone)
for r = i; r < len(d) && d[r].metaphone == metaphone; r++ {
}
for l = i; l >= 0 && d[l].metaphone == metaphone; l-- {
}
l++
if r-l < fudgeDistance*2 {
l -= fudgeDistance
r += fudgeDistance
}
if l < 0 {
l = 0
}
if r > len(d) {
r = len(d)
}
return d[l:r]
}
// randomNonEqual returns a randomly selected word in d which is not
// literally equal to w. Returns -1 if no such word exists.
// (The assumption is that the same word literal appears at most twice.)
func (d metaphoneDict) randomNonEqual(w string) *metaphoneWord {
if len(d) == 0 {
return nil
}
i := rand.Intn(len(d))
switch {
case d[i].literal != w:
case i > 0 && d[i-1].literal != w:
i--
case i < len(d)-1 && d[i+1].literal != w:
i++
case i == 0 && len(d) > 2 && d[i+2].literal != w:
i += 2
case i == len(d)-1 && len(d) > 2 && d[i-2].literal != w:
i -= 2
default:
return nil
}
return &d[i]
}
// randomSubstituteFor returns a semi-random but usually phonetically
// close substitute for w.
func (d metaphoneDict) randomSubstituteFor(w *metaphoneWord) *metaphoneWord {
mp := w.metaphone
if rand.Intn(2) == 0 {
mp = w.secondary
}
match := d.matches(mp).randomNonEqual(w.literal)
if match != nil {
return match
}
return w
}
// Push adds mp to the end of d, reallocating space if necessary.
func (d *metaphoneDict) Push(mp metaphoneWord) {
dict := *d
if len(dict) < cap(dict) {
dict = dict[0 : len(dict)+1]
} else {
dict = make(metaphoneDict, len(dict)+1, cap(dict)+dictCapacity)
copy(dict, *d)
}
dict[len(dict)-1] = mp
*d = dict
}
// readWords creates a metaphone dictionary from input of one word per line.
// The dictionary is sorted by phonetic representation.
func readWords(input io.Reader) metaphoneDict {
dict := newMetaphoneDict()
rd := bufio.NewReader(input)
for {
if word, err := rd.ReadString('\n'); err == nil {
word = strings.TrimSpace(word)
if !*allowNamesInWordList {
rune, _ := utf8.DecodeRuneInString(word)
if unicode.IsUpper(rune) {
continue
}
}
if strings.Index(word, " ") != -1 || len(word) < minWordLength {
continue
}
mp := *doubleMetaphone(word)
dict.Push(mp)
if mp.metaphone != mp.secondary {
// Secondary phonetic representation
dict.Push(metaphoneWord{word, mp.literal,
mp.secondary, mp.metaphone})
}
} else {
if err != os.EOF {
fmt.Printf("Error: %s\n", err)
}
break
}
}
sort.Sort(dict)
return dict
}
// doubleMetaphone is like doubleMetaphoneLimited but uses the default maxLen.
func doubleMetaphone(word string) (result *metaphoneWord) {
if doubleMetaphoneMaxLen > 0 {
return doubleMetaphoneLimited(word, doubleMetaphoneMaxLen)
}
return doubleMetaphoneLimited(word, len(word)*2)
}
// doubleMetaphone returns two phonetic representations of an English word.
// The secondary representation may equal primary. The implementation is
// currently not fully complete with all special cases. The phonetic
// representations are limited to maxLen length.
func doubleMetaphoneLimited(word string, maxLen int) (result *metaphoneWord) {
result = new(metaphoneWord)
result.original = word
word = strings.ToUpper(word)
// TODO: Strip punctuation
result.literal = word
prev, skip, last, slavoGermanic := 0, 0, len(word)-1, false
testSlavoGermanic: for pos, c := range word {
switch c {
case 'C':
if pos == last || word[pos+1] != 'Z' {
break
}
fallthrough
case 'W', 'K':
slavoGermanic = true
break testSlavoGermanic
}
}
word += " " // Allow indexing beyond the end
for pos, c := range word {
if c == ' ' {
break
}
if skip > 0 {
prev = 0
skip--
continue
}
mp, ms := "", "-"
switch c {
case 'A', 'E', 'I', 'O', 'U', 'Y', 'Ü', 'Ä', 'Ö', 'Å', 'É', 'È', 'Ï':
if pos == 0 {
// Initial vowel
mp = "A"
} else if pos == 1 && prev == 'W' {
// W + vowel at the start of the word
mp, ms = "A", "F"
}
case 'B':
if prev == 'M' && pos > 1 && word[pos-2] == 'U' &&
(pos == last || (word[pos+1] == 'E' &&
word[pos+2] == 'R')) {
// e.g. dumb, thumb
break
}
if prev != 'B' {
mp = "P"
}
case 'C':
if prev == 'X' {
break
}
if pos == 0 && strings.HasPrefix(word, "CAESAR") {
mp = "S"
skip = 1
break
}
next := word[pos+1]
if next == 'H' {
skip = 1
n2, n3 := word[pos+2], word[pos+3]
if pos > 0 {
if n2 == 'A' && n3 == 'E' {
// michael
mp, ms = "K", "X"
break
}
if (pos == 1 && (prev == 'M' || prev == 'S')) ||
n2 == 'T' || n2 == 'S' {
// Mc, Sch, -cht, -chs
mp = "K"
break
}
if (prev == 'A' || prev == 'O' || prev == 'U' ||
prev == 'E') && (n2 == 'L' || n2 == 'R' || n2 == 'N' ||
n2 == 'M' || n2 == 'B' || n2 == 'B' ||
n2 == 'H' || n2 == 'F' || n2 == 'V' ||
n2 == 'W') {
// e.g. wachtler, wechsler, but not tichner
mp = "K"
break
}
if pos > 1 {
p2 := word[pos-2]
if prev == 'R' &&
((p2 == 'O' && n2 == 'E' && n3 == 'S') ||
(p2 == 'O' && n2 == 'I' && n3 == 'D') ||
(p2 == 'A' && n2 == 'I' && n3 == 'T')) {
// orchestra, orchid, architect (but not arch)
mp = "K"
break
}
}
} else {
// pos == 0
n4, n5 := word[pos+4], word[pos+5]
if (n2 == 'A' && n3 == 'R' && ((n4 == 'A' && n5 == 'C') ||
(n4 == 'I' && n5 == 'S'))) ||
(n2 == 'E' && n3 == 'M') || (n2 == 'Y' && n3 == 'M') ||
(n2 == 'I' && n3 == 'A') ||
(n2 == 'O' && n3 == 'R' && (n4 != 'O' || n5 != 'E')) {
// e.g. character, charisma, chorus, chemistry
// but not "chore"
mp = "K"
} else {
switch n2 {
case 'L', 'R', 'N', 'M', 'B', 'H', 'F', 'V', 'W', ' ':
mp = "K"
default:
mp = "X"
}
}
break
}
mp, ms = "X", "K"
break
} else if next == 'Z' {
if pos < 2 || word[pos-1] != 'I' || word[pos-2] == 'W' {
// cz, not wicz
mp, ms = "S", "X"
skip = 1
break
}
} else if next == 'C' {
n2 := word[pos+2]
if n2 == 'I' && word[pos+3] == 'A' {
// -ccia, e.g. focaccia
mp = "X"
skip = 2
break
}
if pos != 2 || prev != 'M' {
// -cc, but not e.g. McClellan
if n2 == 'I' || n2 == 'E' ||
(n2 == 'H' && word[pos+3] != 'U') {
// e.g. bellocchio, but not bacchus
skip = 3
if pos == 1 && prev == 'A' {
// e.g. accident
mp = "KS"
break
} else if prev == 'U' && n2 == 'E' &&
(word[pos+4] == 'S' || word[pos+4] == 'E') {
// succeed, success
mp = "KS"
break
}
mp = "X"
break
}
}
if n2 != 'I' && n2 != 'E' {
skip = 1
}
} else if next == 'K' || next == 'Q' {
skip = 1
} else if next == 'I' {
mp = "S"
skip = 1
n2 := word[pos+2]
if n2 == 'O' || n2 == 'E' || n2 == 'A' {
// cio, cie, cia
ms = "X"
}
break
} else if next == 'E' || next == 'Y' {
skip = 1
mp = "S"
break
}
mp = "K"
case 'D':
if prev != 'D' && prev != 'T' {
if word[pos+1] == 'G' {
skip = 1
switch word[pos+2] {
case 'E', 'I', 'Y':
// e.g. "edge"
mp = "J"
default:
// e.g. "edgar"
mp = "K"
}
break
}
mp = "T"
}
case 'F', 'V':
if prev != c {
mp = "F"
}
case 'G':
next := word[pos+1]
if next == 'H' {
skip = 1
if !isVowel(prev) {
mp = "K"
break
}
if pos == 0 {
if word[pos+2] == 'I' {
mp = "J"
} else {
mp = "K"
}
break
}
if pos > 1 {
if word[pos-2] == 'B' || word[pos-2] == 'H' ||
word[pos-2] == 'D' {
// e.g. hugh
break
}
if pos > 2 {
p3 := word[pos-3]
if p3 == 'B' || p3 == 'H' || p3 == 'D' {
// e.g. bough
break
}
if pos > 3 && (word[pos-4] == 'B' || word[pos-4] == 'H') {
// e.g. brought
break
}
if prev == 'U' && (p3 == 'C' || p3 == 'G' ||
p3 == 'L' || p3 == 'R' ||
p3 == 'T') {
// e.g. laugh, cough, rough, tough
mp = "F"
break
}
}
}
if prev != 'I' {
mp = "K"
}
break
}
if next == 'N' {
skip = 1
if !slavoGermanic {
if pos == 1 && isVowel(prev) {
mp, ms = "KN", "N"
break
} else if word[pos+2] != 'E' || word[pos+3] != 'Y' {
// not e.g. cagney
mp, ms = "N", "KN"
break
}
}
mp = "KN"
break
}
if next == 'L' {
if word[pos+2] == 'I' && !slavoGermanic {
// e.g. tagliaro
mp, ms = "KL", "L"
skip = 1
}
} else if next == 'E' || next == 'I' || next == 'Y' || next == 'G' {
skip = 1
n2 := word[pos+2]
if next != 'G' {
if pos == 0 {
if (next == 'E' && (n2 == 'S' || n2 == 'P' ||
n2 == 'B' || n2 == 'L' ||
n2 == 'Y' || n2 == 'I' ||
n2 == 'R')) || next == 'Y' ||
(next == 'I' && (n2 == 'L' || n2 == 'N')) {
skip = 1
mp, ms = "K", "J"
break
}
}
if !(next == 'I' || prev == 'I' || prev == 'E' ||
(next == 'Y' && (prev == 'R' || prev == 'O')) ||
(next == 'E' && pos > 0 && word[pos-1] != 'R')) {
// -ger-, -gy-
mp = "K"
if !(pos == 3 && next == 'E' &&
strings.HasPrefix(word, "DANGER") ||
strings.HasPrefix(word, "RANGER") ||
strings.HasPrefix(word, "MANGER")) {
ms = "J"
}
break
}
} else if !(n2 == 'I' && (prev == 'A' || prev == 'O')) {
// not -aggi -oggi
mp = "K"
break
}
if !strings.HasPrefix(word, "SCH") ||
(next == 'E' && n2 == 'T') {
// obvious Germanic
mp = "K"
} else if next == 'I' && pos == (last-3) &&
strings.HasSuffix(word, "ER") {
// -gier suffix
mp = "J"
} else {
mp, ms = "J", "K"
}
break
}
mp = "K"
case 'H':
if pos == 0 || isVowel(prev) {
next, _ := utf8.DecodeRuneInString(word[pos+1 : len(word)])
if isVowel(next) {
// H between two vowels, or at the beginning followed by a vowel
mp = "H"
skip = 1
}
}
case 'J':
if prev == 'S' || prev == 'K' || prev == 'L' || prev == 'J' {
break
}
next := word[pos+1]
if pos == 0 {
if next == 'O' && word[pos+2] == 'S' && word[pos+3] == 'E' {
if word[pos+4] == ' ' {
// Jose
mp = "H"
} else {
mp, ms = "J", "H"
}
break
}
mp, ms = "J", "A"
} else if !slavoGermanic && isVowel(prev) &&
(next == 'A' || next == 'O') {
mp, ms = "J", "H"
} else if pos == last {
mp, ms = "J", ""
} else {
switch next {
case 'L', 'T', 'K', 'S', 'N', 'M', 'B', 'Z':
// NOP
default:
mp = "J"
}
}
case 'Q', 'K':
if prev != c && prev != 'C' {
mp = "K"
}
case 'L':
if word[pos+1] == 'L' {
skip = 1
if pos > 0 && ((word[pos+3] == ' ' &&
(((word[pos+2] == 'O' || word[pos+2] == 'A') &&
word[pos-1] == 'I') || (word[pos+2] == 'E' &&
word[pos-1] == 'A'))) ||
((word[last] == 'S' && (word[last-1] == 'A' ||
word[last-1] == 'O')) ||
(word[last] == 'A' || word[last] == 'O') &&
(word[pos-1] == 'A' && word[pos+2] == 'E'))) {
// Spanish, -illo, -illa, -alle
ms = ""
}
}
mp = "L"
case 'M':
if prev != 'M' {
mp = "M"
}
case 'N':
if pos == 1 && (prev == 'K' || prev == 'G' || prev == 'P') {
// Skip GN, KN, PN at the start of the word
result.metaphone, result.secondary = "", ""
break
}
fallthrough
case 'Ñ':
if prev != c {
mp = "N"
}
case 'P':
next := word[pos+1]
if next == 'H' {
mp = "F"
skip = 1
break
}
if next == 'S' && pos == 0 {
// Ignore PS at the start of the word
skip = 1
break
}
if next == 'P' || next == 'B' {
skip = 1
}
mp = "P"
// case 'Q': is grouped with K
case 'R':
if prev == 'R' {
break
}
if pos == last && !slavoGermanic && prev == 'E' && pos > 1 &&
word[pos-2] == 'I' && (pos < 4 || word[pos-4] != 'M' ||
!(word[pos-3] == 'E' || word[pos-3] == 'A')) {
// French, e.g. rogier, but not e.g. hochmeier
mp, ms = "", "R"
} else {
mp = "R"
}
case 'S', 'ß', 'Š':
if prev == 'S' {
break
}
next := word[pos+1]
if (prev == 'I' || prev == 'Y') && next == 'L' {
// isl, ysl, e.g. island, isle, carlysle
break
}
if pos == 0 {
if next == 'M' || next == 'N' || next == 'L' || next == 'W' {
mp, ms = "S", "X"
break
}
if strings.HasPrefix(word, "SUGAR") {
mp, ms = "X", "S"
break
}
}
if next == 'H' {
if word[pos+2] == 'O' {
if (word[pos+3] == 'E' && word[pos+4] == 'K') ||
(word[pos+3] == 'L' && (word[pos+4] == 'M' ||
word[pos+4] == 'Z')) {
// holm, holz, hoek
mp = "S"
break
}
} else if word[pos+2] == 'E' && word[pos+3] == 'I' &&
word[pos+4] == 'M' {
// heim
mp = "S"
break
}
mp = "X"
skip = 1
} else if next == 'I' && (word[pos+2] == 'O' || word[pos+2] == 'A') {
// sio, sia
mp = "S"
if !slavoGermanic {
ms = "X"
}
skip = 2
} else if next == 'Z' {
mp, ms = "S", "X"
skip = 1
} else if next == 'C' {
skip = 2
if word[pos+2] == 'H' {
n3, n4 := word[pos+3], word[pos+4]
if (n3 == 'O' && n4 == 'O') || (n3 == 'U' && n4 == 'Y') ||
(n3 == 'E' && (n4 == 'D' || n4 == 'M')) {
// Dutch origin, e.g. "school", "schooner"
mp = "SK"
} else if n3 == 'E' && (n4 == 'R' || n4 == 'N') {
mp, ms = "X", "SK"
} else {
mp = "X"
if pos == 0 && !isVowel(int(word[3])) && word[3] != 'W' {
ms = "S"
}
}
} else if word[pos+2] == 'I' || word[pos+2] == 'E' ||
word[pos+2] == 'Y' {
mp = "S"
} else {
mp = "SK"
skip = 1
// TODO: Check correctness of skip
}
} else if pos == last && prev == 'I' {
if pos > 1 && (word[pos-2] == 'A' || word[pos-2] == 'O') {
// French, e.g. artois
ms = "S"
} else {
mp = "S"
}
} else {
mp = "S"
}
case 'T':
if prev == 'T' {
if word[pos+1] == 'H' {
// tth
mp, ms = "0", "T"
skip = 1
} else {
mp = "T"
}
break
}
if prev == 'D' {
break
}
next := word[pos+1]
if next == 'I' {
if word[pos+2] == 'A' ||
(word[pos+2] == 'O' && word[pos+3] == 'N') {
// tia, tion
mp = "X"
skip = 2
}
} else if next == 'C' && word[pos+2] == 'H' {
// tch
mp = "X"
skip = 2
} else if next == 'H' {
skip = 1
if word[pos+3] == 'M' {
if word[pos+2] == 'O' || word[pos+2] == 'A' {
mp = "T"
break
}
}
mp, ms = "0", "T"
} else if next != 'T' {
mp = "T"
}
// case 'V': is grouped with F
case 'W':
next := word[pos+1]
if next == 'R' {
if pos != 0 {
mp = "R"
}
skip = 1
break
}
if pos == 0 {
if next == 'H' {
mp = "A"
}
break
}
if (pos == last && isVowel(prev)) ||
strings.HasPrefix(word, "SCH") {
ms = "F"
break
}
n2, n3 := word[pos+2], word[pos+3]
if (prev == 'E' || prev == 'O') && next == 'S' && n2 == 'K' &&
(n3 == 'I' || n3 == 'Y') {
// -ewski, -ewsky, -owski, -owsky
ms = "F"
} else if next == 'I' && n3 == 'Z' && (n2 == 'C' || n2 == 'T') {
// -wicz, -witz
mp, ms = "TS", "FX"
skip = 3
}
case 'X':
if pos == 0 {
// Initial X pronounced like a Z, e.g. Xavier
mp = "S"
} else if prev != 'X' {
if pos == last && prev == 'U' && pos > 1 &&
(word[pos-2] == 'A' || word[pos-2] == 'O') {
// French, e.g. breaux
break
}
mp = "KS"
}
case 'Z':
if prev == 'S' || prev == 'Z' {
break
}
if word[pos+1] == 'H' {
// Chinese, e.g. Zhao
mp = "J"
skip = 1
break
}
if word[pos+1] == 'I' || word[pos+1] == 'O' || word[pos+1] == 'A' ||
(slavoGermanic && prev != 'T' && pos > 0) {
ms = "TS"
}
fallthrough
case 'Ç':
mp = "S"
default:
}
prev = c
result.metaphone += mp
if ms == "-" {
ms = mp
}
result.secondary += ms
if *debugMetaphone {
fmt.Fprintf(os.Stderr, "\t%c -> [%s] [%s]\n", c, mp, ms)
}
if len(result.metaphone) >= maxLen && len(result.secondary) >= maxLen {
break
}
}
if len(result.metaphone) > maxLen {
result.metaphone = result.metaphone[0:maxLen]
}
if len(result.secondary) > maxLen {
result.secondary = result.secondary[0:maxLen]
}
if result.secondary == result.metaphone {
result.secondary = result.metaphone
}
if *debugMetaphone {
fmt.Fprintf(os.Stderr, "%s: [%s] [%s]\n", result.literal, result.metaphone,
result.secondary)
}
return
}
func isVowel(c int) bool {
// TODO: Non-English support is rather limited
return c == 'A' || c == 'E' || c == 'I' || c == 'O' || c == 'U' || c == 'Y' ||
c == 'Ä' || c == 'Ö' || c == 'Ü' || c == 'Å' || c == 'É' || c == 'È' ||
c == 'Ï'
}
| Less | identifier_name |
broken_telephone.go | /*
* broken_telephone.go: The children's game of "broken telephone"
* implemented with goroutines passing messages read from stdin.
* Each goroutine mutates the message by replacing one word before
* passing it on.
*
* For Introduction to Go, Spring 2010
* Kimmo Kulovesi <kkuloves@cs.helsinki.fi>
*/
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"rand"
"sort"
"strings"
"unicode"
"utf8"
)
var (
numberOfLinks = flag.Int("l", 20,
"Number of unreliable links to pass the message through")
wordList = flag.String("w", "/usr/share/dict/words",
"Filename of the word list to use")
printIntermediateStates = flag.Bool("i", false,
"Print intermediate states of the message")
debugMetaphone = flag.Bool("d", false,
"Print double metaphone algorithm debug output (very verbose!)")
allowNamesInWordList = flag.Bool("n", false,
"Allow names to be read from the word list")
dictionary metaphoneDict
)
// Minimum length of a word to consider it for substitution.
// TODO: Implement stopwords instead of using this limit.
const minWordLength = 4
// Default dicitionary capacity.
const dictCapacity = 100000
// The distance (in either direction) at which to "fudge" the word
// substitutes - increasing this gives more random matches.
const fudgeDistance = 4
// The maximum length of the phonetic representations to use, 0 for unlimited.
// The traditional metaphone algorithm uses 4.
const doubleMetaphoneMaxLen = 4
// mutate copies m but randomly substitutes one word.
func mutate(m message) message {
if len(m) == 0 {
return m
}
n := make(message, len(m))
copy(n, m)
for tries := 0; tries < 3; tries++ {
i := rand.Intn(len(m))
word := m[i]
if len(word.original) >= minWordLength {
n[i] = dictionary.randomSubstituteFor(word)
return n
}
}
return n
}
// telephone passes mutated messages between from and to.
func telephone(from, to chan message) {
for m := <-from; m != nil; m = <-from {
if *printIntermediateStates {
fmt.Printf("Heard: %s\n", m)
}
to <- mutate(m)
}
to <- nil
}
// main reads the dictionary, creates the telephone system and then passes
// messages read from stdin to it.
func main() {
flag.Parse()
if file, err := os.Open(*wordList, os.O_RDONLY, 0); err == nil {
fmt.Fprintf(os.Stderr, "Reading words from \"%s\"...\n", *wordList)
dictionary = readWords(file)
file.Close()
} else {
fmt.Fprintf(os.Stderr, "%s: %s\n", *wordList, err)
}
if dictionary.Len() < 1 {
fmt.Fprintf(os.Stderr, "No words read, aborting.")
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "%d words in dictionary\n", dictionary.Len())
send := make(chan message)
receive := send
for i := *numberOfLinks; i > 0; i-- {
c := make(chan message)
go telephone(receive, c)
receive = c
}
input := bufio.NewReader(os.Stdin)
for {
if line, err := input.ReadString('\n'); err == nil {
words := strings.Fields(line[0 : len(line)-1])
msg := make(message, len(words))
for i, word := range words {
msg[i] = doubleMetaphone(word)
}
send <- msg
fmt.Println(<-receive)
} else {
if err != os.EOF {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
}
break
}
}
send <- nil
}
type metaphoneWord struct {
original, literal, metaphone, secondary string
}
type message []*metaphoneWord
func (m message) String() (s string) {
for i, word := range m {
s += word.original
if i != len(m)-1 {
s += " "
}
}
return
}
type metaphoneDict []metaphoneWord
func newMetaphoneDict() metaphoneDict { return make(metaphoneDict, 0, dictCapacity) }
func (d metaphoneDict) Len() int { return len(d) }
func (d metaphoneDict) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d metaphoneDict) Less(i, j int) bool { return d[i].metaphone < d[j].metaphone }
// phoneticLocation returns the index where metaphone is or would be sorted.
func (d metaphoneDict) phoneticLocation(metaphone string) (i int) {
left, right := 0, len(d)
for left < right {
i = left + ((right - left) / 2)
if d[i].metaphone < metaphone {
left = i + 1
} else {
right = i
}
}
return
}
// matches returns a slice containing all exact matches in d for metaphone.
func (d metaphoneDict) matches(metaphone string) metaphoneDict {
var l, r int
i := d.phoneticLocation(metaphone)
for r = i; r < len(d) && d[r].metaphone == metaphone; r++ {
}
for l = i; l >= 0 && d[l].metaphone == metaphone; l-- {
}
l++
if r-l < fudgeDistance*2 {
l -= fudgeDistance
r += fudgeDistance
}
if l < 0 {
l = 0
}
if r > len(d) {
r = len(d)
}
return d[l:r]
}
// randomNonEqual returns a randomly selected word in d which is not
// literally equal to w. Returns -1 if no such word exists.
// (The assumption is that the same word literal appears at most twice.)
func (d metaphoneDict) randomNonEqual(w string) *metaphoneWord {
if len(d) == 0 {
return nil
}
i := rand.Intn(len(d))
switch {
case d[i].literal != w:
case i > 0 && d[i-1].literal != w:
i--
case i < len(d)-1 && d[i+1].literal != w:
i++
case i == 0 && len(d) > 2 && d[i+2].literal != w:
i += 2
case i == len(d)-1 && len(d) > 2 && d[i-2].literal != w:
i -= 2
default:
return nil
}
return &d[i]
}
// randomSubstituteFor returns a semi-random but usually phonetically
// close substitute for w.
func (d metaphoneDict) randomSubstituteFor(w *metaphoneWord) *metaphoneWord {
mp := w.metaphone
if rand.Intn(2) == 0 {
mp = w.secondary
}
match := d.matches(mp).randomNonEqual(w.literal)
if match != nil {
return match
}
return w
}
// Push adds mp to the end of d, reallocating space if necessary.
func (d *metaphoneDict) Push(mp metaphoneWord) {
dict := *d
if len(dict) < cap(dict) {
dict = dict[0 : len(dict)+1]
} else {
dict = make(metaphoneDict, len(dict)+1, cap(dict)+dictCapacity)
copy(dict, *d)
}
dict[len(dict)-1] = mp
*d = dict
}
// readWords creates a metaphone dictionary from input of one word per line.
// The dictionary is sorted by phonetic representation.
func readWords(input io.Reader) metaphoneDict {
dict := newMetaphoneDict()
rd := bufio.NewReader(input)
for {
if word, err := rd.ReadString('\n'); err == nil {
word = strings.TrimSpace(word)
if !*allowNamesInWordList {
rune, _ := utf8.DecodeRuneInString(word)
if unicode.IsUpper(rune) {
continue
}
}
if strings.Index(word, " ") != -1 || len(word) < minWordLength {
continue
}
mp := *doubleMetaphone(word)
dict.Push(mp)
if mp.metaphone != mp.secondary {
// Secondary phonetic representation
dict.Push(metaphoneWord{word, mp.literal,
mp.secondary, mp.metaphone})
}
} else {
if err != os.EOF {
fmt.Printf("Error: %s\n", err)
}
break
}
}
sort.Sort(dict)
return dict
}
// doubleMetaphone is like doubleMetaphoneLimited but uses the default maxLen.
func doubleMetaphone(word string) (result *metaphoneWord) {
if doubleMetaphoneMaxLen > 0 {
return doubleMetaphoneLimited(word, doubleMetaphoneMaxLen)
}
return doubleMetaphoneLimited(word, len(word)*2)
}
// doubleMetaphone returns two phonetic representations of an English word.
// The secondary representation may equal primary. The implementation is
// currently not fully complete with all special cases. The phonetic
// representations are limited to maxLen length.
func doubleMetaphoneLimited(word string, maxLen int) (result *metaphoneWord) {
result = new(metaphoneWord)
result.original = word
word = strings.ToUpper(word)
// TODO: Strip punctuation
result.literal = word
prev, skip, last, slavoGermanic := 0, 0, len(word)-1, false
testSlavoGermanic: for pos, c := range word {
switch c {
case 'C':
if pos == last || word[pos+1] != 'Z' {
break
}
fallthrough
case 'W', 'K':
slavoGermanic = true
break testSlavoGermanic
}
}
word += " " // Allow indexing beyond the end
for pos, c := range word {
if c == ' ' {
break
}
if skip > 0 {
prev = 0
skip--
continue
}
mp, ms := "", "-"
switch c {
case 'A', 'E', 'I', 'O', 'U', 'Y', 'Ü', 'Ä', 'Ö', 'Å', 'É', 'È', 'Ï':
if pos == 0 {
// Initial vowel
mp = "A"
} else if pos == 1 && prev == 'W' {
// W + vowel at the start of the word
mp, ms = "A", "F"
}
case 'B':
if prev == 'M' && pos > 1 && word[pos-2] == 'U' &&
(pos == last || (word[pos+1] == 'E' &&
word[pos+2] == 'R')) {
// e.g. dumb, thumb
break
}
if prev != 'B' {
mp = "P"
}
case 'C':
if prev == 'X' {
break
}
if pos == 0 && strings.HasPrefix(word, "CAESAR") {
mp = "S"
skip = 1
break
}
next := word[pos+1]
if next == 'H' {
skip = 1
n2, n3 := word[pos+2], word[pos+3] | // michael
mp, ms = "K", "X"
break
}
if (pos == 1 && (prev == 'M' || prev == 'S')) ||
n2 == 'T' || n2 == 'S' {
// Mc, Sch, -cht, -chs
mp = "K"
break
}
if (prev == 'A' || prev == 'O' || prev == 'U' ||
prev == 'E') && (n2 == 'L' || n2 == 'R' || n2 == 'N' ||
n2 == 'M' || n2 == 'B' || n2 == 'B' ||
n2 == 'H' || n2 == 'F' || n2 == 'V' ||
n2 == 'W') {
// e.g. wachtler, wechsler, but not tichner
mp = "K"
break
}
if pos > 1 {
p2 := word[pos-2]
if prev == 'R' &&
((p2 == 'O' && n2 == 'E' && n3 == 'S') ||
(p2 == 'O' && n2 == 'I' && n3 == 'D') ||
(p2 == 'A' && n2 == 'I' && n3 == 'T')) {
// orchestra, orchid, architect (but not arch)
mp = "K"
break
}
}
} else {
// pos == 0
n4, n5 := word[pos+4], word[pos+5]
if (n2 == 'A' && n3 == 'R' && ((n4 == 'A' && n5 == 'C') ||
(n4 == 'I' && n5 == 'S'))) ||
(n2 == 'E' && n3 == 'M') || (n2 == 'Y' && n3 == 'M') ||
(n2 == 'I' && n3 == 'A') ||
(n2 == 'O' && n3 == 'R' && (n4 != 'O' || n5 != 'E')) {
// e.g. character, charisma, chorus, chemistry
// but not "chore"
mp = "K"
} else {
switch n2 {
case 'L', 'R', 'N', 'M', 'B', 'H', 'F', 'V', 'W', ' ':
mp = "K"
default:
mp = "X"
}
}
break
}
mp, ms = "X", "K"
break
} else if next == 'Z' {
if pos < 2 || word[pos-1] != 'I' || word[pos-2] == 'W' {
// cz, not wicz
mp, ms = "S", "X"
skip = 1
break
}
} else if next == 'C' {
n2 := word[pos+2]
if n2 == 'I' && word[pos+3] == 'A' {
// -ccia, e.g. focaccia
mp = "X"
skip = 2
break
}
if pos != 2 || prev != 'M' {
// -cc, but not e.g. McClellan
if n2 == 'I' || n2 == 'E' ||
(n2 == 'H' && word[pos+3] != 'U') {
// e.g. bellocchio, but not bacchus
skip = 3
if pos == 1 && prev == 'A' {
// e.g. accident
mp = "KS"
break
} else if prev == 'U' && n2 == 'E' &&
(word[pos+4] == 'S' || word[pos+4] == 'E') {
// succeed, success
mp = "KS"
break
}
mp = "X"
break
}
}
if n2 != 'I' && n2 != 'E' {
skip = 1
}
} else if next == 'K' || next == 'Q' {
skip = 1
} else if next == 'I' {
mp = "S"
skip = 1
n2 := word[pos+2]
if n2 == 'O' || n2 == 'E' || n2 == 'A' {
// cio, cie, cia
ms = "X"
}
break
} else if next == 'E' || next == 'Y' {
skip = 1
mp = "S"
break
}
mp = "K"
case 'D':
if prev != 'D' && prev != 'T' {
if word[pos+1] == 'G' {
skip = 1
switch word[pos+2] {
case 'E', 'I', 'Y':
// e.g. "edge"
mp = "J"
default:
// e.g. "edgar"
mp = "K"
}
break
}
mp = "T"
}
case 'F', 'V':
if prev != c {
mp = "F"
}
case 'G':
next := word[pos+1]
if next == 'H' {
skip = 1
if !isVowel(prev) {
mp = "K"
break
}
if pos == 0 {
if word[pos+2] == 'I' {
mp = "J"
} else {
mp = "K"
}
break
}
if pos > 1 {
if word[pos-2] == 'B' || word[pos-2] == 'H' ||
word[pos-2] == 'D' {
// e.g. hugh
break
}
if pos > 2 {
p3 := word[pos-3]
if p3 == 'B' || p3 == 'H' || p3 == 'D' {
// e.g. bough
break
}
if pos > 3 && (word[pos-4] == 'B' || word[pos-4] == 'H') {
// e.g. brought
break
}
if prev == 'U' && (p3 == 'C' || p3 == 'G' ||
p3 == 'L' || p3 == 'R' ||
p3 == 'T') {
// e.g. laugh, cough, rough, tough
mp = "F"
break
}
}
}
if prev != 'I' {
mp = "K"
}
break
}
if next == 'N' {
skip = 1
if !slavoGermanic {
if pos == 1 && isVowel(prev) {
mp, ms = "KN", "N"
break
} else if word[pos+2] != 'E' || word[pos+3] != 'Y' {
// not e.g. cagney
mp, ms = "N", "KN"
break
}
}
mp = "KN"
break
}
if next == 'L' {
if word[pos+2] == 'I' && !slavoGermanic {
// e.g. tagliaro
mp, ms = "KL", "L"
skip = 1
}
} else if next == 'E' || next == 'I' || next == 'Y' || next == 'G' {
skip = 1
n2 := word[pos+2]
if next != 'G' {
if pos == 0 {
if (next == 'E' && (n2 == 'S' || n2 == 'P' ||
n2 == 'B' || n2 == 'L' ||
n2 == 'Y' || n2 == 'I' ||
n2 == 'R')) || next == 'Y' ||
(next == 'I' && (n2 == 'L' || n2 == 'N')) {
skip = 1
mp, ms = "K", "J"
break
}
}
if !(next == 'I' || prev == 'I' || prev == 'E' ||
(next == 'Y' && (prev == 'R' || prev == 'O')) ||
(next == 'E' && pos > 0 && word[pos-1] != 'R')) {
// -ger-, -gy-
mp = "K"
if !(pos == 3 && next == 'E' &&
strings.HasPrefix(word, "DANGER") ||
strings.HasPrefix(word, "RANGER") ||
strings.HasPrefix(word, "MANGER")) {
ms = "J"
}
break
}
} else if !(n2 == 'I' && (prev == 'A' || prev == 'O')) {
// not -aggi -oggi
mp = "K"
break
}
if !strings.HasPrefix(word, "SCH") ||
(next == 'E' && n2 == 'T') {
// obvious Germanic
mp = "K"
} else if next == 'I' && pos == (last-3) &&
strings.HasSuffix(word, "ER") {
// -gier suffix
mp = "J"
} else {
mp, ms = "J", "K"
}
break
}
mp = "K"
case 'H':
if pos == 0 || isVowel(prev) {
next, _ := utf8.DecodeRuneInString(word[pos+1 : len(word)])
if isVowel(next) {
// H between two vowels, or at the beginning followed by a vowel
mp = "H"
skip = 1
}
}
case 'J':
if prev == 'S' || prev == 'K' || prev == 'L' || prev == 'J' {
break
}
next := word[pos+1]
if pos == 0 {
if next == 'O' && word[pos+2] == 'S' && word[pos+3] == 'E' {
if word[pos+4] == ' ' {
// Jose
mp = "H"
} else {
mp, ms = "J", "H"
}
break
}
mp, ms = "J", "A"
} else if !slavoGermanic && isVowel(prev) &&
(next == 'A' || next == 'O') {
mp, ms = "J", "H"
} else if pos == last {
mp, ms = "J", ""
} else {
switch next {
case 'L', 'T', 'K', 'S', 'N', 'M', 'B', 'Z':
// NOP
default:
mp = "J"
}
}
case 'Q', 'K':
if prev != c && prev != 'C' {
mp = "K"
}
case 'L':
if word[pos+1] == 'L' {
skip = 1
if pos > 0 && ((word[pos+3] == ' ' &&
(((word[pos+2] == 'O' || word[pos+2] == 'A') &&
word[pos-1] == 'I') || (word[pos+2] == 'E' &&
word[pos-1] == 'A'))) ||
((word[last] == 'S' && (word[last-1] == 'A' ||
word[last-1] == 'O')) ||
(word[last] == 'A' || word[last] == 'O') &&
(word[pos-1] == 'A' && word[pos+2] == 'E'))) {
// Spanish, -illo, -illa, -alle
ms = ""
}
}
mp = "L"
case 'M':
if prev != 'M' {
mp = "M"
}
case 'N':
if pos == 1 && (prev == 'K' || prev == 'G' || prev == 'P') {
// Skip GN, KN, PN at the start of the word
result.metaphone, result.secondary = "", ""
break
}
fallthrough
case 'Ñ':
if prev != c {
mp = "N"
}
case 'P':
next := word[pos+1]
if next == 'H' {
mp = "F"
skip = 1
break
}
if next == 'S' && pos == 0 {
// Ignore PS at the start of the word
skip = 1
break
}
if next == 'P' || next == 'B' {
skip = 1
}
mp = "P"
// case 'Q': is grouped with K
case 'R':
if prev == 'R' {
break
}
if pos == last && !slavoGermanic && prev == 'E' && pos > 1 &&
word[pos-2] == 'I' && (pos < 4 || word[pos-4] != 'M' ||
!(word[pos-3] == 'E' || word[pos-3] == 'A')) {
// French, e.g. rogier, but not e.g. hochmeier
mp, ms = "", "R"
} else {
mp = "R"
}
case 'S', 'ß', 'Š':
if prev == 'S' {
break
}
next := word[pos+1]
if (prev == 'I' || prev == 'Y') && next == 'L' {
// isl, ysl, e.g. island, isle, carlysle
break
}
if pos == 0 {
if next == 'M' || next == 'N' || next == 'L' || next == 'W' {
mp, ms = "S", "X"
break
}
if strings.HasPrefix(word, "SUGAR") {
mp, ms = "X", "S"
break
}
}
if next == 'H' {
if word[pos+2] == 'O' {
if (word[pos+3] == 'E' && word[pos+4] == 'K') ||
(word[pos+3] == 'L' && (word[pos+4] == 'M' ||
word[pos+4] == 'Z')) {
// holm, holz, hoek
mp = "S"
break
}
} else if word[pos+2] == 'E' && word[pos+3] == 'I' &&
word[pos+4] == 'M' {
// heim
mp = "S"
break
}
mp = "X"
skip = 1
} else if next == 'I' && (word[pos+2] == 'O' || word[pos+2] == 'A') {
// sio, sia
mp = "S"
if !slavoGermanic {
ms = "X"
}
skip = 2
} else if next == 'Z' {
mp, ms = "S", "X"
skip = 1
} else if next == 'C' {
skip = 2
if word[pos+2] == 'H' {
n3, n4 := word[pos+3], word[pos+4]
if (n3 == 'O' && n4 == 'O') || (n3 == 'U' && n4 == 'Y') ||
(n3 == 'E' && (n4 == 'D' || n4 == 'M')) {
// Dutch origin, e.g. "school", "schooner"
mp = "SK"
} else if n3 == 'E' && (n4 == 'R' || n4 == 'N') {
mp, ms = "X", "SK"
} else {
mp = "X"
if pos == 0 && !isVowel(int(word[3])) && word[3] != 'W' {
ms = "S"
}
}
} else if word[pos+2] == 'I' || word[pos+2] == 'E' ||
word[pos+2] == 'Y' {
mp = "S"
} else {
mp = "SK"
skip = 1
// TODO: Check correctness of skip
}
} else if pos == last && prev == 'I' {
if pos > 1 && (word[pos-2] == 'A' || word[pos-2] == 'O') {
// French, e.g. artois
ms = "S"
} else {
mp = "S"
}
} else {
mp = "S"
}
case 'T':
if prev == 'T' {
if word[pos+1] == 'H' {
// tth
mp, ms = "0", "T"
skip = 1
} else {
mp = "T"
}
break
}
if prev == 'D' {
break
}
next := word[pos+1]
if next == 'I' {
if word[pos+2] == 'A' ||
(word[pos+2] == 'O' && word[pos+3] == 'N') {
// tia, tion
mp = "X"
skip = 2
}
} else if next == 'C' && word[pos+2] == 'H' {
// tch
mp = "X"
skip = 2
} else if next == 'H' {
skip = 1
if word[pos+3] == 'M' {
if word[pos+2] == 'O' || word[pos+2] == 'A' {
mp = "T"
break
}
}
mp, ms = "0", "T"
} else if next != 'T' {
mp = "T"
}
// case 'V': is grouped with F
case 'W':
next := word[pos+1]
if next == 'R' {
if pos != 0 {
mp = "R"
}
skip = 1
break
}
if pos == 0 {
if next == 'H' {
mp = "A"
}
break
}
if (pos == last && isVowel(prev)) ||
strings.HasPrefix(word, "SCH") {
ms = "F"
break
}
n2, n3 := word[pos+2], word[pos+3]
if (prev == 'E' || prev == 'O') && next == 'S' && n2 == 'K' &&
(n3 == 'I' || n3 == 'Y') {
// -ewski, -ewsky, -owski, -owsky
ms = "F"
} else if next == 'I' && n3 == 'Z' && (n2 == 'C' || n2 == 'T') {
// -wicz, -witz
mp, ms = "TS", "FX"
skip = 3
}
case 'X':
if pos == 0 {
// Initial X pronounced like a Z, e.g. Xavier
mp = "S"
} else if prev != 'X' {
if pos == last && prev == 'U' && pos > 1 &&
(word[pos-2] == 'A' || word[pos-2] == 'O') {
// French, e.g. breaux
break
}
mp = "KS"
}
case 'Z':
if prev == 'S' || prev == 'Z' {
break
}
if word[pos+1] == 'H' {
// Chinese, e.g. Zhao
mp = "J"
skip = 1
break
}
if word[pos+1] == 'I' || word[pos+1] == 'O' || word[pos+1] == 'A' ||
(slavoGermanic && prev != 'T' && pos > 0) {
ms = "TS"
}
fallthrough
case 'Ç':
mp = "S"
default:
}
prev = c
result.metaphone += mp
if ms == "-" {
ms = mp
}
result.secondary += ms
if *debugMetaphone {
fmt.Fprintf(os.Stderr, "\t%c -> [%s] [%s]\n", c, mp, ms)
}
if len(result.metaphone) >= maxLen && len(result.secondary) >= maxLen {
break
}
}
if len(result.metaphone) > maxLen {
result.metaphone = result.metaphone[0:maxLen]
}
if len(result.secondary) > maxLen {
result.secondary = result.secondary[0:maxLen]
}
if result.secondary == result.metaphone {
result.secondary = result.metaphone
}
if *debugMetaphone {
fmt.Fprintf(os.Stderr, "%s: [%s] [%s]\n", result.literal, result.metaphone,
result.secondary)
}
return
}
func isVowel(c int) bool {
// TODO: Non-English support is rather limited
return c == 'A' || c == 'E' || c == 'I' || c == 'O' || c == 'U' || c == 'Y' ||
c == 'Ä' || c == 'Ö' || c == 'Ü' || c == 'Å' || c == 'É' || c == 'È' ||
c == 'Ï'
} | if pos > 0 {
if n2 == 'A' && n3 == 'E' { | random_line_split |
broken_telephone.go | /*
* broken_telephone.go: The children's game of "broken telephone"
* implemented with goroutines passing messages read from stdin.
* Each goroutine mutates the message by replacing one word before
* passing it on.
*
* For Introduction to Go, Spring 2010
* Kimmo Kulovesi <kkuloves@cs.helsinki.fi>
*/
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"rand"
"sort"
"strings"
"unicode"
"utf8"
)
var (
numberOfLinks = flag.Int("l", 20,
"Number of unreliable links to pass the message through")
wordList = flag.String("w", "/usr/share/dict/words",
"Filename of the word list to use")
printIntermediateStates = flag.Bool("i", false,
"Print intermediate states of the message")
debugMetaphone = flag.Bool("d", false,
"Print double metaphone algorithm debug output (very verbose!)")
allowNamesInWordList = flag.Bool("n", false,
"Allow names to be read from the word list")
dictionary metaphoneDict
)
// Minimum length of a word to consider it for substitution.
// TODO: Implement stopwords instead of using this limit.
const minWordLength = 4
// Default dicitionary capacity.
const dictCapacity = 100000
// The distance (in either direction) at which to "fudge" the word
// substitutes - increasing this gives more random matches.
const fudgeDistance = 4
// The maximum length of the phonetic representations to use, 0 for unlimited.
// The traditional metaphone algorithm uses 4.
const doubleMetaphoneMaxLen = 4
// mutate copies m but randomly substitutes one word.
func mutate(m message) message {
if len(m) == 0 {
return m
}
n := make(message, len(m))
copy(n, m)
for tries := 0; tries < 3; tries++ {
i := rand.Intn(len(m))
word := m[i]
if len(word.original) >= minWordLength {
n[i] = dictionary.randomSubstituteFor(word)
return n
}
}
return n
}
// telephone passes mutated messages between from and to.
func telephone(from, to chan message) {
for m := <-from; m != nil; m = <-from {
if *printIntermediateStates {
fmt.Printf("Heard: %s\n", m)
}
to <- mutate(m)
}
to <- nil
}
// main reads the dictionary, creates the telephone system and then passes
// messages read from stdin to it.
func main() {
flag.Parse()
if file, err := os.Open(*wordList, os.O_RDONLY, 0); err == nil {
fmt.Fprintf(os.Stderr, "Reading words from \"%s\"...\n", *wordList)
dictionary = readWords(file)
file.Close()
} else {
fmt.Fprintf(os.Stderr, "%s: %s\n", *wordList, err)
}
if dictionary.Len() < 1 {
fmt.Fprintf(os.Stderr, "No words read, aborting.")
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "%d words in dictionary\n", dictionary.Len())
send := make(chan message)
receive := send
for i := *numberOfLinks; i > 0; i-- {
c := make(chan message)
go telephone(receive, c)
receive = c
}
input := bufio.NewReader(os.Stdin)
for {
if line, err := input.ReadString('\n'); err == nil {
words := strings.Fields(line[0 : len(line)-1])
msg := make(message, len(words))
for i, word := range words {
msg[i] = doubleMetaphone(word)
}
send <- msg
fmt.Println(<-receive)
} else {
if err != os.EOF {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
}
break
}
}
send <- nil
}
type metaphoneWord struct {
original, literal, metaphone, secondary string
}
type message []*metaphoneWord
func (m message) String() (s string) {
for i, word := range m {
s += word.original
if i != len(m)-1 {
s += " "
}
}
return
}
type metaphoneDict []metaphoneWord
func newMetaphoneDict() metaphoneDict { return make(metaphoneDict, 0, dictCapacity) }
func (d metaphoneDict) Len() int { return len(d) }
func (d metaphoneDict) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d metaphoneDict) Less(i, j int) bool { return d[i].metaphone < d[j].metaphone }
// phoneticLocation returns the index where metaphone is or would be sorted.
func (d metaphoneDict) phoneticLocation(metaphone string) (i int) {
left, right := 0, len(d)
for left < right {
i = left + ((right - left) / 2)
if d[i].metaphone < metaphone {
left = i + 1
} else {
right = i
}
}
return
}
// matches returns a slice containing all exact matches in d for metaphone.
func (d metaphoneDict) matches(metaphone string) metaphoneDict {
var l, r int
i := d.phoneticLocation(metaphone)
for r = i; r < len(d) && d[r].metaphone == metaphone; r++ {
}
for l = i; l >= 0 && d[l].metaphone == metaphone; l-- {
}
l++
if r-l < fudgeDistance*2 {
l -= fudgeDistance
r += fudgeDistance
}
if l < 0 {
l = 0
}
if r > len(d) {
r = len(d)
}
return d[l:r]
}
// randomNonEqual returns a randomly selected word in d which is not
// literally equal to w. Returns -1 if no such word exists.
// (The assumption is that the same word literal appears at most twice.)
func (d metaphoneDict) randomNonEqual(w string) *metaphoneWord {
if len(d) == 0 {
return nil
}
i := rand.Intn(len(d))
switch {
case d[i].literal != w:
case i > 0 && d[i-1].literal != w:
i--
case i < len(d)-1 && d[i+1].literal != w:
i++
case i == 0 && len(d) > 2 && d[i+2].literal != w:
i += 2
case i == len(d)-1 && len(d) > 2 && d[i-2].literal != w:
i -= 2
default:
return nil
}
return &d[i]
}
// randomSubstituteFor returns a semi-random but usually phonetically
// close substitute for w.
func (d metaphoneDict) randomSubstituteFor(w *metaphoneWord) *metaphoneWord {
mp := w.metaphone
if rand.Intn(2) == 0 {
mp = w.secondary
}
match := d.matches(mp).randomNonEqual(w.literal)
if match != nil {
return match
}
return w
}
// Push adds mp to the end of d, reallocating space if necessary.
func (d *metaphoneDict) Push(mp metaphoneWord) {
dict := *d
if len(dict) < cap(dict) {
dict = dict[0 : len(dict)+1]
} else {
dict = make(metaphoneDict, len(dict)+1, cap(dict)+dictCapacity)
copy(dict, *d)
}
dict[len(dict)-1] = mp
*d = dict
}
// readWords creates a metaphone dictionary from input of one word per line.
// The dictionary is sorted by phonetic representation.
func readWords(input io.Reader) metaphoneDict {
dict := newMetaphoneDict()
rd := bufio.NewReader(input)
for {
if word, err := rd.ReadString('\n'); err == nil {
word = strings.TrimSpace(word)
if !*allowNamesInWordList {
rune, _ := utf8.DecodeRuneInString(word)
if unicode.IsUpper(rune) {
continue
}
}
if strings.Index(word, " ") != -1 || len(word) < minWordLength {
continue
}
mp := *doubleMetaphone(word)
dict.Push(mp)
if mp.metaphone != mp.secondary {
// Secondary phonetic representation
dict.Push(metaphoneWord{word, mp.literal,
mp.secondary, mp.metaphone})
}
} else {
if err != os.EOF {
fmt.Printf("Error: %s\n", err)
}
break
}
}
sort.Sort(dict)
return dict
}
// doubleMetaphone is like doubleMetaphoneLimited but uses the default maxLen.
func doubleMetaphone(word string) (result *metaphoneWord) {
if doubleMetaphoneMaxLen > 0 {
return doubleMetaphoneLimited(word, doubleMetaphoneMaxLen)
}
return doubleMetaphoneLimited(word, len(word)*2)
}
// doubleMetaphone returns two phonetic representations of an English word.
// The secondary representation may equal primary. The implementation is
// currently not fully complete with all special cases. The phonetic
// representations are limited to maxLen length.
func doubleMetaphoneLimited(word string, maxLen int) (result *metaphoneWord) {
result = new(metaphoneWord)
result.original = word
word = strings.ToUpper(word)
// TODO: Strip punctuation
result.literal = word
prev, skip, last, slavoGermanic := 0, 0, len(word)-1, false
testSlavoGermanic: for pos, c := range word {
switch c {
case 'C':
if pos == last || word[pos+1] != 'Z' {
break
}
fallthrough
case 'W', 'K':
slavoGermanic = true
break testSlavoGermanic
}
}
word += " " // Allow indexing beyond the end
for pos, c := range word {
if c == ' ' {
break
}
if skip > 0 {
prev = 0
skip--
continue
}
mp, ms := "", "-"
switch c {
case 'A', 'E', 'I', 'O', 'U', 'Y', 'Ü', 'Ä', 'Ö', 'Å', 'É', 'È', 'Ï':
if pos == 0 {
// Initial vowel
mp = "A"
} else if pos == 1 && prev == 'W' {
// W + vowel at the start of the word
mp, ms = "A", "F"
}
case 'B':
if prev == 'M' && pos > 1 && word[pos-2] == 'U' &&
(pos == last || (word[pos+1] == 'E' &&
word[pos+2] == 'R')) {
// e.g. dumb, thumb
break
}
if prev != 'B' {
mp = "P"
}
case 'C':
if prev == 'X' {
break
}
if pos == 0 && strings.HasPrefix(word, "CAESAR") {
mp = "S"
skip = 1
break
}
next := word[pos+1]
if next == 'H' {
skip = 1
n2, n3 := word[pos+2], word[pos+3]
if pos > 0 {
if n2 == 'A' && n3 == 'E' {
// michael
mp, ms = "K", "X"
break
}
if (pos == 1 && (prev == 'M' || prev == 'S')) ||
n2 == 'T' || n2 == 'S' {
// Mc, Sch, -cht, -chs
mp = "K"
break
}
if (prev == 'A' || prev == 'O' || prev == 'U' ||
prev == 'E') && (n2 == 'L' || n2 == 'R' || n2 == 'N' ||
n2 == 'M' || n2 == 'B' || n2 == 'B' ||
n2 == 'H' || n2 == 'F' || n2 == 'V' ||
n2 == 'W') {
// e.g. wachtler, wechsler, but not tichner
mp = "K"
break
}
if pos > 1 {
p2 := word[pos-2]
if prev == 'R' &&
((p2 == 'O' && n2 == 'E' && n3 == 'S') ||
(p2 == 'O' && n2 == 'I' && n3 == 'D') ||
(p2 == 'A' && n2 == 'I' && n3 == 'T')) {
// orchestra, orchid, architect (but not arch)
mp = "K"
break
}
}
} else {
// pos == 0
n4, n5 := word[pos+4], word[pos+5]
if (n2 == 'A' && n3 == 'R' && ((n4 == 'A' && n5 == 'C') ||
(n4 == 'I' && n5 == 'S'))) ||
(n2 == 'E' && n3 == 'M') || (n2 == 'Y' && n3 == 'M') ||
(n2 == 'I' && n3 == 'A') ||
(n2 == 'O' && n3 == 'R' && (n4 != 'O' || n5 != 'E')) {
// e.g. character, charisma, chorus, chemistry
// but not "chore"
mp = "K"
} else {
switch n2 {
case 'L', 'R', 'N', 'M', 'B', 'H', 'F', 'V', 'W', ' ':
mp = "K"
default:
mp = "X"
}
}
break
}
mp, ms = "X", "K"
break
} else if next == 'Z' {
if pos < 2 || word[pos-1] != 'I' || word[pos-2] == 'W' {
// cz, not wicz
mp, ms = "S", "X"
skip = 1
break
}
} else if next == 'C' {
n2 := word[pos+2]
if n2 == 'I' && word[pos+3] == 'A' {
// -ccia, e.g. focaccia
mp = "X"
skip = 2
break
}
if pos != 2 || prev != 'M' {
// -cc, but not e.g. McClellan
if n2 == 'I' || n2 == 'E' ||
(n2 == 'H' && word[pos+3] != 'U') {
// e.g. bellocchio, but not bacchus
skip = 3
if pos == 1 && prev == 'A' {
// e.g. accident
mp = "KS"
break
} else if prev == 'U' && n2 == 'E' &&
(word[pos+4] == 'S' || word[pos+4] == 'E') {
// succeed, success
mp = "KS"
break
}
mp = "X"
break
}
}
if n2 != 'I' && n2 != 'E' {
skip = 1
}
} else if next == 'K' || next == 'Q' {
skip = 1
} else if next == 'I' {
mp = "S"
skip = 1
n2 := word[pos+2]
if n2 == 'O' || n2 == 'E' || n2 == 'A' {
// cio, cie, cia
ms = "X"
}
break
} else if next == 'E' || next == 'Y' {
skip = 1
mp = "S"
break
}
mp = "K"
case 'D':
if prev != 'D' && prev != 'T' {
if word[pos+1] == 'G' {
skip = 1
switch word[pos+2] {
case 'E', 'I', 'Y':
// e.g. "edge"
mp = "J"
default:
// e.g. "edgar"
mp = "K"
}
break
}
mp = "T"
}
case 'F', 'V':
if prev != c {
m | 'G':
next := word[pos+1]
if next == 'H' {
skip = 1
if !isVowel(prev) {
mp = "K"
break
}
if pos == 0 {
if word[pos+2] == 'I' {
mp = "J"
} else {
mp = "K"
}
break
}
if pos > 1 {
if word[pos-2] == 'B' || word[pos-2] == 'H' ||
word[pos-2] == 'D' {
// e.g. hugh
break
}
if pos > 2 {
p3 := word[pos-3]
if p3 == 'B' || p3 == 'H' || p3 == 'D' {
// e.g. bough
break
}
if pos > 3 && (word[pos-4] == 'B' || word[pos-4] == 'H') {
// e.g. brought
break
}
if prev == 'U' && (p3 == 'C' || p3 == 'G' ||
p3 == 'L' || p3 == 'R' ||
p3 == 'T') {
// e.g. laugh, cough, rough, tough
mp = "F"
break
}
}
}
if prev != 'I' {
mp = "K"
}
break
}
if next == 'N' {
skip = 1
if !slavoGermanic {
if pos == 1 && isVowel(prev) {
mp, ms = "KN", "N"
break
} else if word[pos+2] != 'E' || word[pos+3] != 'Y' {
// not e.g. cagney
mp, ms = "N", "KN"
break
}
}
mp = "KN"
break
}
if next == 'L' {
if word[pos+2] == 'I' && !slavoGermanic {
// e.g. tagliaro
mp, ms = "KL", "L"
skip = 1
}
} else if next == 'E' || next == 'I' || next == 'Y' || next == 'G' {
skip = 1
n2 := word[pos+2]
if next != 'G' {
if pos == 0 {
if (next == 'E' && (n2 == 'S' || n2 == 'P' ||
n2 == 'B' || n2 == 'L' ||
n2 == 'Y' || n2 == 'I' ||
n2 == 'R')) || next == 'Y' ||
(next == 'I' && (n2 == 'L' || n2 == 'N')) {
skip = 1
mp, ms = "K", "J"
break
}
}
if !(next == 'I' || prev == 'I' || prev == 'E' ||
(next == 'Y' && (prev == 'R' || prev == 'O')) ||
(next == 'E' && pos > 0 && word[pos-1] != 'R')) {
// -ger-, -gy-
mp = "K"
if !(pos == 3 && next == 'E' &&
strings.HasPrefix(word, "DANGER") ||
strings.HasPrefix(word, "RANGER") ||
strings.HasPrefix(word, "MANGER")) {
ms = "J"
}
break
}
} else if !(n2 == 'I' && (prev == 'A' || prev == 'O')) {
// not -aggi -oggi
mp = "K"
break
}
if !strings.HasPrefix(word, "SCH") ||
(next == 'E' && n2 == 'T') {
// obvious Germanic
mp = "K"
} else if next == 'I' && pos == (last-3) &&
strings.HasSuffix(word, "ER") {
// -gier suffix
mp = "J"
} else {
mp, ms = "J", "K"
}
break
}
mp = "K"
case 'H':
if pos == 0 || isVowel(prev) {
next, _ := utf8.DecodeRuneInString(word[pos+1 : len(word)])
if isVowel(next) {
// H between two vowels, or at the beginning followed by a vowel
mp = "H"
skip = 1
}
}
case 'J':
if prev == 'S' || prev == 'K' || prev == 'L' || prev == 'J' {
break
}
next := word[pos+1]
if pos == 0 {
if next == 'O' && word[pos+2] == 'S' && word[pos+3] == 'E' {
if word[pos+4] == ' ' {
// Jose
mp = "H"
} else {
mp, ms = "J", "H"
}
break
}
mp, ms = "J", "A"
} else if !slavoGermanic && isVowel(prev) &&
(next == 'A' || next == 'O') {
mp, ms = "J", "H"
} else if pos == last {
mp, ms = "J", ""
} else {
switch next {
case 'L', 'T', 'K', 'S', 'N', 'M', 'B', 'Z':
// NOP
default:
mp = "J"
}
}
case 'Q', 'K':
if prev != c && prev != 'C' {
mp = "K"
}
case 'L':
if word[pos+1] == 'L' {
skip = 1
if pos > 0 && ((word[pos+3] == ' ' &&
(((word[pos+2] == 'O' || word[pos+2] == 'A') &&
word[pos-1] == 'I') || (word[pos+2] == 'E' &&
word[pos-1] == 'A'))) ||
((word[last] == 'S' && (word[last-1] == 'A' ||
word[last-1] == 'O')) ||
(word[last] == 'A' || word[last] == 'O') &&
(word[pos-1] == 'A' && word[pos+2] == 'E'))) {
// Spanish, -illo, -illa, -alle
ms = ""
}
}
mp = "L"
case 'M':
if prev != 'M' {
mp = "M"
}
case 'N':
if pos == 1 && (prev == 'K' || prev == 'G' || prev == 'P') {
// Skip GN, KN, PN at the start of the word
result.metaphone, result.secondary = "", ""
break
}
fallthrough
case 'Ñ':
if prev != c {
mp = "N"
}
case 'P':
next := word[pos+1]
if next == 'H' {
mp = "F"
skip = 1
break
}
if next == 'S' && pos == 0 {
// Ignore PS at the start of the word
skip = 1
break
}
if next == 'P' || next == 'B' {
skip = 1
}
mp = "P"
// case 'Q': is grouped with K
case 'R':
if prev == 'R' {
break
}
if pos == last && !slavoGermanic && prev == 'E' && pos > 1 &&
word[pos-2] == 'I' && (pos < 4 || word[pos-4] != 'M' ||
!(word[pos-3] == 'E' || word[pos-3] == 'A')) {
// French, e.g. rogier, but not e.g. hochmeier
mp, ms = "", "R"
} else {
mp = "R"
}
case 'S', 'ß', 'Š':
if prev == 'S' {
break
}
next := word[pos+1]
if (prev == 'I' || prev == 'Y') && next == 'L' {
// isl, ysl, e.g. island, isle, carlysle
break
}
if pos == 0 {
if next == 'M' || next == 'N' || next == 'L' || next == 'W' {
mp, ms = "S", "X"
break
}
if strings.HasPrefix(word, "SUGAR") {
mp, ms = "X", "S"
break
}
}
if next == 'H' {
if word[pos+2] == 'O' {
if (word[pos+3] == 'E' && word[pos+4] == 'K') ||
(word[pos+3] == 'L' && (word[pos+4] == 'M' ||
word[pos+4] == 'Z')) {
// holm, holz, hoek
mp = "S"
break
}
} else if word[pos+2] == 'E' && word[pos+3] == 'I' &&
word[pos+4] == 'M' {
// heim
mp = "S"
break
}
mp = "X"
skip = 1
} else if next == 'I' && (word[pos+2] == 'O' || word[pos+2] == 'A') {
// sio, sia
mp = "S"
if !slavoGermanic {
ms = "X"
}
skip = 2
} else if next == 'Z' {
mp, ms = "S", "X"
skip = 1
} else if next == 'C' {
skip = 2
if word[pos+2] == 'H' {
n3, n4 := word[pos+3], word[pos+4]
if (n3 == 'O' && n4 == 'O') || (n3 == 'U' && n4 == 'Y') ||
(n3 == 'E' && (n4 == 'D' || n4 == 'M')) {
// Dutch origin, e.g. "school", "schooner"
mp = "SK"
} else if n3 == 'E' && (n4 == 'R' || n4 == 'N') {
mp, ms = "X", "SK"
} else {
mp = "X"
if pos == 0 && !isVowel(int(word[3])) && word[3] != 'W' {
ms = "S"
}
}
} else if word[pos+2] == 'I' || word[pos+2] == 'E' ||
word[pos+2] == 'Y' {
mp = "S"
} else {
mp = "SK"
skip = 1
// TODO: Check correctness of skip
}
} else if pos == last && prev == 'I' {
if pos > 1 && (word[pos-2] == 'A' || word[pos-2] == 'O') {
// French, e.g. artois
ms = "S"
} else {
mp = "S"
}
} else {
mp = "S"
}
case 'T':
if prev == 'T' {
if word[pos+1] == 'H' {
// tth
mp, ms = "0", "T"
skip = 1
} else {
mp = "T"
}
break
}
if prev == 'D' {
break
}
next := word[pos+1]
if next == 'I' {
if word[pos+2] == 'A' ||
(word[pos+2] == 'O' && word[pos+3] == 'N') {
// tia, tion
mp = "X"
skip = 2
}
} else if next == 'C' && word[pos+2] == 'H' {
// tch
mp = "X"
skip = 2
} else if next == 'H' {
skip = 1
if word[pos+3] == 'M' {
if word[pos+2] == 'O' || word[pos+2] == 'A' {
mp = "T"
break
}
}
mp, ms = "0", "T"
} else if next != 'T' {
mp = "T"
}
// case 'V': is grouped with F
case 'W':
next := word[pos+1]
if next == 'R' {
if pos != 0 {
mp = "R"
}
skip = 1
break
}
if pos == 0 {
if next == 'H' {
mp = "A"
}
break
}
if (pos == last && isVowel(prev)) ||
strings.HasPrefix(word, "SCH") {
ms = "F"
break
}
n2, n3 := word[pos+2], word[pos+3]
if (prev == 'E' || prev == 'O') && next == 'S' && n2 == 'K' &&
(n3 == 'I' || n3 == 'Y') {
// -ewski, -ewsky, -owski, -owsky
ms = "F"
} else if next == 'I' && n3 == 'Z' && (n2 == 'C' || n2 == 'T') {
// -wicz, -witz
mp, ms = "TS", "FX"
skip = 3
}
case 'X':
if pos == 0 {
// Initial X pronounced like a Z, e.g. Xavier
mp = "S"
} else if prev != 'X' {
if pos == last && prev == 'U' && pos > 1 &&
(word[pos-2] == 'A' || word[pos-2] == 'O') {
// French, e.g. breaux
break
}
mp = "KS"
}
case 'Z':
if prev == 'S' || prev == 'Z' {
break
}
if word[pos+1] == 'H' {
// Chinese, e.g. Zhao
mp = "J"
skip = 1
break
}
if word[pos+1] == 'I' || word[pos+1] == 'O' || word[pos+1] == 'A' ||
(slavoGermanic && prev != 'T' && pos > 0) {
ms = "TS"
}
fallthrough
case 'Ç':
mp = "S"
default:
}
prev = c
result.metaphone += mp
if ms == "-" {
ms = mp
}
result.secondary += ms
if *debugMetaphone {
fmt.Fprintf(os.Stderr, "\t%c -> [%s] [%s]\n", c, mp, ms)
}
if len(result.metaphone) >= maxLen && len(result.secondary) >= maxLen {
break
}
}
if len(result.metaphone) > maxLen {
result.metaphone = result.metaphone[0:maxLen]
}
if len(result.secondary) > maxLen {
result.secondary = result.secondary[0:maxLen]
}
if result.secondary == result.metaphone {
result.secondary = result.metaphone
}
if *debugMetaphone {
fmt.Fprintf(os.Stderr, "%s: [%s] [%s]\n", result.literal, result.metaphone,
result.secondary)
}
return
}
func isVowel(c int) bool {
// TODO: Non-English support is rather limited
return c == 'A' || c == 'E' || c == 'I' || c == 'O' || c == 'U' || c == 'Y' ||
c == 'Ä' || c == 'Ö' || c == 'Ü' || c == 'Å' || c == 'É' || c == 'È' ||
c == 'Ï'
}
| p = "F"
}
case | conditional_block |
mod.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use super::*;
mod notification_stream;
use crate::types::PeerError;
use notification_stream::NotificationStream;
/// Processes incoming commands from the control stream and dispatches them to the control command
/// handler. This started only when we have connection and when we have either a target or
/// controller SDP profile record for the current peer.
async fn process_control_stream(peer: Arc<RwLock<RemotePeer>>) {
let connection = {
let peer_guard = peer.read();
match peer_guard.control_channel.connection() {
Some(connection) => connection.clone(),
None => return,
}
};
let command_stream = connection.take_command_stream();
// Limit to 16 since that is the max number of transactions we can process at any one time per
// AVCTP
match command_stream
.map(Ok)
.try_for_each_concurrent(16, |command| async {
let fut = peer.read().command_handler.handle_command(command.unwrap());
let result: Result<(), PeerError> = fut.await;
result
})
.await
{
Ok(_) => fx_log_info!("Peer command stream closed"),
Err(e) => fx_log_err!("Peer command returned error {:?}", e),
}
// Command stream closed/errored. Disconnect the peer.
{
peer.write().reset_connection(false);
}
}
/// Handles received notifications from the peer from the subscribed notifications streams and
/// dispatches the notifications back to the controller listeners
fn handle_notification(
notif: &NotificationEventId,
peer: &Arc<RwLock<RemotePeer>>,
data: &[u8],
) -> Result<bool, Error> {
fx_vlog!(tag: "avrcp", 2, "received notification for {:?} {:?}", notif, data);
let preamble = VendorDependentPreamble::decode(data).map_err(|e| Error::PacketError(e))?;
let data = &data[preamble.encoded_len()..];
if data.len() < preamble.parameter_length as usize {
return Err(Error::UnexpectedResponse);
}
match notif {
NotificationEventId::EventPlaybackStatusChanged => {
let response = PlaybackStatusChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackStatusChanged(response.playback_status()),
);
Ok(false)
}
NotificationEventId::EventTrackChanged => {
let response = TrackChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::TrackIdChanged(
response.identifier(),
));
Ok(false)
}
NotificationEventId::EventPlaybackPosChanged => {
let response = PlaybackPosChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackPosChanged(response.position()),
);
Ok(false)
}
NotificationEventId::EventVolumeChanged => {
let response = VolumeChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::VolumeChanged(
response.volume(),
));
Ok(false)
}
_ => Ok(true),
}
}
/// Starts a task to attempt an outgoing L2CAP connection to remote's AVRCP control channel.
/// The control channel should be in `Connecting` state before spawning this task.
/// TODO(BT-2747): Fix a race where an incoming connection can come in while we are making an
/// outgoing connection. Properly handle the case where we are attempting to connect to remote
/// at the same time they make an incoming connection according to how the the spec says.
fn start_make_connection_task(peer: Arc<RwLock<RemotePeer>>) {
let peer = peer.clone();
fasync::spawn(async move {
let (peer_id, profile_service) = {
let peer_guard = peer.read();
// early return if we are not in `Connecting`
match peer_guard.control_channel {
PeerChannel::Connecting => {}
_ => return,
}
(peer_guard.peer_id.clone(), peer_guard.profile_svc.clone())
};
match profile_service.connect_to_device(&peer_id, PSM_AVCTP as u16).await {
Ok(socket) => {
let mut peer_guard = peer.write();
match peer_guard.control_channel {
PeerChannel::Connecting => match AvcPeer::new(socket) {
Ok(peer) => {
peer_guard.set_control_connection(peer);
}
Err(e) => {
peer_guard.reset_connection(false);
fx_log_err!("Unable to make peer from socket {}: {:?}", peer_id, e);
}
},
_ => {
fx_log_info!(
"incoming connection established while making outgoing {:?}",
peer_id
);
// an incoming l2cap connection was made while we were making an
// outgoing one. Drop both connections per spec.
peer_guard.reset_connection(false);
}
};
}
Err(e) => {
fx_log_err!("connect_to_device error {}: {:?}", peer_id, e);
let mut peer_guard = peer.write();
if let PeerChannel::Connecting = peer_guard.control_channel {
peer_guard.reset_connection(false);
}
}
}
})
}
/// Checks for supported notification on the peer and registers for notifications.
/// This is started on a remote peer when we have a connection and target profile descriptor.
async fn pump_notifications(peer: Arc<RwLock<RemotePeer>>) {
// events we support when speaking to a peer that supports the target profile.
const SUPPORTED_NOTIFICATIONS: [NotificationEventId; 4] = [
NotificationEventId::EventPlaybackStatusChanged,
NotificationEventId::EventTrackChanged,
NotificationEventId::EventPlaybackPosChanged,
NotificationEventId::EventVolumeChanged,
];
let supported_notifications: Vec<NotificationEventId> =
SUPPORTED_NOTIFICATIONS.iter().cloned().collect();
// look up what notifications we support on this peer first. Consider updating this from
// time to time.
let remote_supported_notifications = match get_supported_events_internal(peer.clone()).await {
Ok(x) => x,
Err(_) => return,
};
let supported_notifications: Vec<NotificationEventId> = remote_supported_notifications
.into_iter()
.filter(|k| supported_notifications.contains(k))
.collect();
let mut notification_streams = SelectAll::new();
for notif in supported_notifications {
fx_vlog!(tag: "avrcp", 2, "creating notification stream for {:#?}", notif);
let stream =
NotificationStream::new(peer.clone(), notif, 1).map_ok(move |data| (notif, data));
notification_streams.push(stream);
}
pin_mut!(notification_streams);
loop {
if futures::select! {
event_result = notification_streams.select_next_some() => {
match event_result {
Ok((notif, data)) => {
handle_notification(¬if, &peer, &data[..])
.unwrap_or_else(|e| { fx_log_err!("Error decoding packet from peer {:?}", e); true} )
},
Err(Error::CommandNotSupported) => false,
Err(_) => true,
_=> true,
}
}
complete => { true }
} {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "stopping notifications for {:#?}", peer.read().peer_id);
}
/// Starts a task to poll notifications on the remote peer. Aborted when the peer connection is
/// reset.
fn start_notifications_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
pump_notifications(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// Starts a task to poll control messages from the peer. Aborted when the peer connection is
/// reset. Started when we have a connection to the remote peer and we have any type of valid SDP
/// profile from the peer.
fn start_control_stream_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
process_control_stream(peer).await;
},
registration, |
/// State observer task around a remote peer. Takes a change stream from the remote peer that wakes
/// the task whenever some state has changed on the peer. Swaps tasks such as making outgoing
/// connections, processing the incoming control messages, and registering for notifications on the
/// remote peer.
pub(super) async fn state_watcher(peer: Arc<RwLock<RemotePeer>>) {
fx_vlog!(tag: "avrcp", 2, "state_watcher starting");
let mut change_stream = peer.read().state_change_listener.take_change_stream();
let peer_weak = Arc::downgrade(&peer);
drop(peer);
let mut channel_processor_abort_handle: Option<AbortHandle> = None;
let mut notification_poll_abort_handle: Option<AbortHandle> = None;
while let Some(_) = change_stream.next().await {
fx_vlog!(tag: "avrcp", 2, "state_watcher command received");
if let Some(peer) = peer_weak.upgrade() {
let mut peer_guard = peer.write();
fx_vlog!(tag: "avrcp", 2, "make_connection control channel {:?}", peer_guard.control_channel);
match peer_guard.control_channel {
PeerChannel::Connecting => {}
PeerChannel::Disconnected => {
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
channel_processor_abort_handle = None;
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
notification_poll_abort_handle = None;
}
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& peer_guard.attempt_connection
{
fx_vlog!(tag: "avrcp", 2, "make_connection {:?}", peer_guard.peer_id);
peer_guard.attempt_connection = false;
peer_guard.control_channel = PeerChannel::Connecting;
start_make_connection_task(peer.clone());
}
}
PeerChannel::Connected(_) => {
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& channel_processor_abort_handle.is_none()
{
channel_processor_abort_handle =
Some(start_control_stream_processing_task(peer.clone()));
}
if peer_guard.target_descriptor.is_some()
&& notification_poll_abort_handle.is_none()
{
notification_poll_abort_handle =
Some(start_notifications_processing_task(peer.clone()));
}
}
}
} else {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "state_watcher shutting down. aborting processors");
// Stop processing state changes entirely on the peer.
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
}
} | )
.map(|_| ()),
);
handle
} | random_line_split |
mod.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use super::*;
mod notification_stream;
use crate::types::PeerError;
use notification_stream::NotificationStream;
/// Processes incoming commands from the control stream and dispatches them to the control command
/// handler. This started only when we have connection and when we have either a target or
/// controller SDP profile record for the current peer.
async fn process_control_stream(peer: Arc<RwLock<RemotePeer>>) {
let connection = {
let peer_guard = peer.read();
match peer_guard.control_channel.connection() {
Some(connection) => connection.clone(),
None => return,
}
};
let command_stream = connection.take_command_stream();
// Limit to 16 since that is the max number of transactions we can process at any one time per
// AVCTP
match command_stream
.map(Ok)
.try_for_each_concurrent(16, |command| async {
let fut = peer.read().command_handler.handle_command(command.unwrap());
let result: Result<(), PeerError> = fut.await;
result
})
.await
{
Ok(_) => fx_log_info!("Peer command stream closed"),
Err(e) => fx_log_err!("Peer command returned error {:?}", e),
}
// Command stream closed/errored. Disconnect the peer.
{
peer.write().reset_connection(false);
}
}
/// Handles received notifications from the peer from the subscribed notifications streams and
/// dispatches the notifications back to the controller listeners
fn handle_notification(
notif: &NotificationEventId,
peer: &Arc<RwLock<RemotePeer>>,
data: &[u8],
) -> Result<bool, Error> |
/// Starts a task to attempt an outgoing L2CAP connection to remote's AVRCP control channel.
/// The control channel should be in `Connecting` state before spawning this task.
/// TODO(BT-2747): Fix a race where an incoming connection can come in while we are making an
/// outgoing connection. Properly handle the case where we are attempting to connect to remote
/// at the same time they make an incoming connection according to how the the spec says.
fn start_make_connection_task(peer: Arc<RwLock<RemotePeer>>) {
let peer = peer.clone();
fasync::spawn(async move {
let (peer_id, profile_service) = {
let peer_guard = peer.read();
// early return if we are not in `Connecting`
match peer_guard.control_channel {
PeerChannel::Connecting => {}
_ => return,
}
(peer_guard.peer_id.clone(), peer_guard.profile_svc.clone())
};
match profile_service.connect_to_device(&peer_id, PSM_AVCTP as u16).await {
Ok(socket) => {
let mut peer_guard = peer.write();
match peer_guard.control_channel {
PeerChannel::Connecting => match AvcPeer::new(socket) {
Ok(peer) => {
peer_guard.set_control_connection(peer);
}
Err(e) => {
peer_guard.reset_connection(false);
fx_log_err!("Unable to make peer from socket {}: {:?}", peer_id, e);
}
},
_ => {
fx_log_info!(
"incoming connection established while making outgoing {:?}",
peer_id
);
// an incoming l2cap connection was made while we were making an
// outgoing one. Drop both connections per spec.
peer_guard.reset_connection(false);
}
};
}
Err(e) => {
fx_log_err!("connect_to_device error {}: {:?}", peer_id, e);
let mut peer_guard = peer.write();
if let PeerChannel::Connecting = peer_guard.control_channel {
peer_guard.reset_connection(false);
}
}
}
})
}
/// Checks for supported notification on the peer and registers for notifications.
/// This is started on a remote peer when we have a connection and target profile descriptor.
async fn pump_notifications(peer: Arc<RwLock<RemotePeer>>) {
// events we support when speaking to a peer that supports the target profile.
const SUPPORTED_NOTIFICATIONS: [NotificationEventId; 4] = [
NotificationEventId::EventPlaybackStatusChanged,
NotificationEventId::EventTrackChanged,
NotificationEventId::EventPlaybackPosChanged,
NotificationEventId::EventVolumeChanged,
];
let supported_notifications: Vec<NotificationEventId> =
SUPPORTED_NOTIFICATIONS.iter().cloned().collect();
// look up what notifications we support on this peer first. Consider updating this from
// time to time.
let remote_supported_notifications = match get_supported_events_internal(peer.clone()).await {
Ok(x) => x,
Err(_) => return,
};
let supported_notifications: Vec<NotificationEventId> = remote_supported_notifications
.into_iter()
.filter(|k| supported_notifications.contains(k))
.collect();
let mut notification_streams = SelectAll::new();
for notif in supported_notifications {
fx_vlog!(tag: "avrcp", 2, "creating notification stream for {:#?}", notif);
let stream =
NotificationStream::new(peer.clone(), notif, 1).map_ok(move |data| (notif, data));
notification_streams.push(stream);
}
pin_mut!(notification_streams);
loop {
if futures::select! {
event_result = notification_streams.select_next_some() => {
match event_result {
Ok((notif, data)) => {
handle_notification(¬if, &peer, &data[..])
.unwrap_or_else(|e| { fx_log_err!("Error decoding packet from peer {:?}", e); true} )
},
Err(Error::CommandNotSupported) => false,
Err(_) => true,
_=> true,
}
}
complete => { true }
} {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "stopping notifications for {:#?}", peer.read().peer_id);
}
/// Starts a task to poll notifications on the remote peer. Aborted when the peer connection is
/// reset.
fn start_notifications_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
pump_notifications(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// Starts a task to poll control messages from the peer. Aborted when the peer connection is
/// reset. Started when we have a connection to the remote peer and we have any type of valid SDP
/// profile from the peer.
fn start_control_stream_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
process_control_stream(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// State observer task around a remote peer. Takes a change stream from the remote peer that wakes
/// the task whenever some state has changed on the peer. Swaps tasks such as making outgoing
/// connections, processing the incoming control messages, and registering for notifications on the
/// remote peer.
pub(super) async fn state_watcher(peer: Arc<RwLock<RemotePeer>>) {
fx_vlog!(tag: "avrcp", 2, "state_watcher starting");
let mut change_stream = peer.read().state_change_listener.take_change_stream();
let peer_weak = Arc::downgrade(&peer);
drop(peer);
let mut channel_processor_abort_handle: Option<AbortHandle> = None;
let mut notification_poll_abort_handle: Option<AbortHandle> = None;
while let Some(_) = change_stream.next().await {
fx_vlog!(tag: "avrcp", 2, "state_watcher command received");
if let Some(peer) = peer_weak.upgrade() {
let mut peer_guard = peer.write();
fx_vlog!(tag: "avrcp", 2, "make_connection control channel {:?}", peer_guard.control_channel);
match peer_guard.control_channel {
PeerChannel::Connecting => {}
PeerChannel::Disconnected => {
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
channel_processor_abort_handle = None;
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
notification_poll_abort_handle = None;
}
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& peer_guard.attempt_connection
{
fx_vlog!(tag: "avrcp", 2, "make_connection {:?}", peer_guard.peer_id);
peer_guard.attempt_connection = false;
peer_guard.control_channel = PeerChannel::Connecting;
start_make_connection_task(peer.clone());
}
}
PeerChannel::Connected(_) => {
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& channel_processor_abort_handle.is_none()
{
channel_processor_abort_handle =
Some(start_control_stream_processing_task(peer.clone()));
}
if peer_guard.target_descriptor.is_some()
&& notification_poll_abort_handle.is_none()
{
notification_poll_abort_handle =
Some(start_notifications_processing_task(peer.clone()));
}
}
}
} else {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "state_watcher shutting down. aborting processors");
// Stop processing state changes entirely on the peer.
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
}
}
| {
fx_vlog!(tag: "avrcp", 2, "received notification for {:?} {:?}", notif, data);
let preamble = VendorDependentPreamble::decode(data).map_err(|e| Error::PacketError(e))?;
let data = &data[preamble.encoded_len()..];
if data.len() < preamble.parameter_length as usize {
return Err(Error::UnexpectedResponse);
}
match notif {
NotificationEventId::EventPlaybackStatusChanged => {
let response = PlaybackStatusChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackStatusChanged(response.playback_status()),
);
Ok(false)
}
NotificationEventId::EventTrackChanged => {
let response = TrackChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::TrackIdChanged(
response.identifier(),
));
Ok(false)
}
NotificationEventId::EventPlaybackPosChanged => {
let response = PlaybackPosChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackPosChanged(response.position()),
);
Ok(false)
}
NotificationEventId::EventVolumeChanged => {
let response = VolumeChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::VolumeChanged(
response.volume(),
));
Ok(false)
}
_ => Ok(true),
}
} | identifier_body |
mod.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use super::*;
mod notification_stream;
use crate::types::PeerError;
use notification_stream::NotificationStream;
/// Processes incoming commands from the control stream and dispatches them to the control command
/// handler. This started only when we have connection and when we have either a target or
/// controller SDP profile record for the current peer.
async fn process_control_stream(peer: Arc<RwLock<RemotePeer>>) {
let connection = {
let peer_guard = peer.read();
match peer_guard.control_channel.connection() {
Some(connection) => connection.clone(),
None => return,
}
};
let command_stream = connection.take_command_stream();
// Limit to 16 since that is the max number of transactions we can process at any one time per
// AVCTP
match command_stream
.map(Ok)
.try_for_each_concurrent(16, |command| async {
let fut = peer.read().command_handler.handle_command(command.unwrap());
let result: Result<(), PeerError> = fut.await;
result
})
.await
{
Ok(_) => fx_log_info!("Peer command stream closed"),
Err(e) => fx_log_err!("Peer command returned error {:?}", e),
}
// Command stream closed/errored. Disconnect the peer.
{
peer.write().reset_connection(false);
}
}
/// Handles received notifications from the peer from the subscribed notifications streams and
/// dispatches the notifications back to the controller listeners
fn handle_notification(
notif: &NotificationEventId,
peer: &Arc<RwLock<RemotePeer>>,
data: &[u8],
) -> Result<bool, Error> {
fx_vlog!(tag: "avrcp", 2, "received notification for {:?} {:?}", notif, data);
let preamble = VendorDependentPreamble::decode(data).map_err(|e| Error::PacketError(e))?;
let data = &data[preamble.encoded_len()..];
if data.len() < preamble.parameter_length as usize {
return Err(Error::UnexpectedResponse);
}
match notif {
NotificationEventId::EventPlaybackStatusChanged => {
let response = PlaybackStatusChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackStatusChanged(response.playback_status()),
);
Ok(false)
}
NotificationEventId::EventTrackChanged => {
let response = TrackChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::TrackIdChanged(
response.identifier(),
));
Ok(false)
}
NotificationEventId::EventPlaybackPosChanged => {
let response = PlaybackPosChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(
ControllerEvent::PlaybackPosChanged(response.position()),
);
Ok(false)
}
NotificationEventId::EventVolumeChanged => {
let response = VolumeChangedNotificationResponse::decode(data)
.map_err(|e| Error::PacketError(e))?;
peer.write().handle_new_controller_notification_event(ControllerEvent::VolumeChanged(
response.volume(),
));
Ok(false)
}
_ => Ok(true),
}
}
/// Starts a task to attempt an outgoing L2CAP connection to remote's AVRCP control channel.
/// The control channel should be in `Connecting` state before spawning this task.
/// TODO(BT-2747): Fix a race where an incoming connection can come in while we are making an
/// outgoing connection. Properly handle the case where we are attempting to connect to remote
/// at the same time they make an incoming connection according to how the the spec says.
fn start_make_connection_task(peer: Arc<RwLock<RemotePeer>>) {
let peer = peer.clone();
fasync::spawn(async move {
let (peer_id, profile_service) = {
let peer_guard = peer.read();
// early return if we are not in `Connecting`
match peer_guard.control_channel {
PeerChannel::Connecting => {}
_ => return,
}
(peer_guard.peer_id.clone(), peer_guard.profile_svc.clone())
};
match profile_service.connect_to_device(&peer_id, PSM_AVCTP as u16).await {
Ok(socket) => {
let mut peer_guard = peer.write();
match peer_guard.control_channel {
PeerChannel::Connecting => match AvcPeer::new(socket) {
Ok(peer) => {
peer_guard.set_control_connection(peer);
}
Err(e) => {
peer_guard.reset_connection(false);
fx_log_err!("Unable to make peer from socket {}: {:?}", peer_id, e);
}
},
_ => {
fx_log_info!(
"incoming connection established while making outgoing {:?}",
peer_id
);
// an incoming l2cap connection was made while we were making an
// outgoing one. Drop both connections per spec.
peer_guard.reset_connection(false);
}
};
}
Err(e) => {
fx_log_err!("connect_to_device error {}: {:?}", peer_id, e);
let mut peer_guard = peer.write();
if let PeerChannel::Connecting = peer_guard.control_channel {
peer_guard.reset_connection(false);
}
}
}
})
}
/// Checks for supported notification on the peer and registers for notifications.
/// This is started on a remote peer when we have a connection and target profile descriptor.
async fn pump_notifications(peer: Arc<RwLock<RemotePeer>>) {
// events we support when speaking to a peer that supports the target profile.
const SUPPORTED_NOTIFICATIONS: [NotificationEventId; 4] = [
NotificationEventId::EventPlaybackStatusChanged,
NotificationEventId::EventTrackChanged,
NotificationEventId::EventPlaybackPosChanged,
NotificationEventId::EventVolumeChanged,
];
let supported_notifications: Vec<NotificationEventId> =
SUPPORTED_NOTIFICATIONS.iter().cloned().collect();
// look up what notifications we support on this peer first. Consider updating this from
// time to time.
let remote_supported_notifications = match get_supported_events_internal(peer.clone()).await {
Ok(x) => x,
Err(_) => return,
};
let supported_notifications: Vec<NotificationEventId> = remote_supported_notifications
.into_iter()
.filter(|k| supported_notifications.contains(k))
.collect();
let mut notification_streams = SelectAll::new();
for notif in supported_notifications {
fx_vlog!(tag: "avrcp", 2, "creating notification stream for {:#?}", notif);
let stream =
NotificationStream::new(peer.clone(), notif, 1).map_ok(move |data| (notif, data));
notification_streams.push(stream);
}
pin_mut!(notification_streams);
loop {
if futures::select! {
event_result = notification_streams.select_next_some() => {
match event_result {
Ok((notif, data)) => {
handle_notification(¬if, &peer, &data[..])
.unwrap_or_else(|e| { fx_log_err!("Error decoding packet from peer {:?}", e); true} )
},
Err(Error::CommandNotSupported) => false,
Err(_) => true,
_=> true,
}
}
complete => { true }
} {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "stopping notifications for {:#?}", peer.read().peer_id);
}
/// Starts a task to poll notifications on the remote peer. Aborted when the peer connection is
/// reset.
fn start_notifications_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
pump_notifications(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// Starts a task to poll control messages from the peer. Aborted when the peer connection is
/// reset. Started when we have a connection to the remote peer and we have any type of valid SDP
/// profile from the peer.
fn start_control_stream_processing_task(peer: Arc<RwLock<RemotePeer>>) -> AbortHandle {
let (handle, registration) = AbortHandle::new_pair();
fasync::spawn(
Abortable::new(
async move {
process_control_stream(peer).await;
},
registration,
)
.map(|_| ()),
);
handle
}
/// State observer task around a remote peer. Takes a change stream from the remote peer that wakes
/// the task whenever some state has changed on the peer. Swaps tasks such as making outgoing
/// connections, processing the incoming control messages, and registering for notifications on the
/// remote peer.
pub(super) async fn | (peer: Arc<RwLock<RemotePeer>>) {
fx_vlog!(tag: "avrcp", 2, "state_watcher starting");
let mut change_stream = peer.read().state_change_listener.take_change_stream();
let peer_weak = Arc::downgrade(&peer);
drop(peer);
let mut channel_processor_abort_handle: Option<AbortHandle> = None;
let mut notification_poll_abort_handle: Option<AbortHandle> = None;
while let Some(_) = change_stream.next().await {
fx_vlog!(tag: "avrcp", 2, "state_watcher command received");
if let Some(peer) = peer_weak.upgrade() {
let mut peer_guard = peer.write();
fx_vlog!(tag: "avrcp", 2, "make_connection control channel {:?}", peer_guard.control_channel);
match peer_guard.control_channel {
PeerChannel::Connecting => {}
PeerChannel::Disconnected => {
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
channel_processor_abort_handle = None;
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
notification_poll_abort_handle = None;
}
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& peer_guard.attempt_connection
{
fx_vlog!(tag: "avrcp", 2, "make_connection {:?}", peer_guard.peer_id);
peer_guard.attempt_connection = false;
peer_guard.control_channel = PeerChannel::Connecting;
start_make_connection_task(peer.clone());
}
}
PeerChannel::Connected(_) => {
// Have we discovered service profile data on the peer?
if (peer_guard.target_descriptor.is_some()
|| peer_guard.controller_descriptor.is_some())
&& channel_processor_abort_handle.is_none()
{
channel_processor_abort_handle =
Some(start_control_stream_processing_task(peer.clone()));
}
if peer_guard.target_descriptor.is_some()
&& notification_poll_abort_handle.is_none()
{
notification_poll_abort_handle =
Some(start_notifications_processing_task(peer.clone()));
}
}
}
} else {
break;
}
}
fx_vlog!(tag: "avrcp", 2, "state_watcher shutting down. aborting processors");
// Stop processing state changes entirely on the peer.
if let Some(ref abort_handle) = channel_processor_abort_handle {
abort_handle.abort();
}
if let Some(ref abort_handle) = notification_poll_abort_handle {
abort_handle.abort();
}
}
| state_watcher | identifier_name |
global.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Values that should be shared across all modules, without necessarily
//! having to pass them all over the place, but aren't consensus values.
//! should be used sparingly.
use crate::consensus::{
graph_weight, header_version, HeaderDifficultyInfo, BASE_EDGE_BITS, BLOCK_TIME_SEC,
C32_GRAPH_WEIGHT, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS,
DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT,
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
};
use crate::core::block::{Block, HeaderVersion};
use crate::genesis;
use crate::pow::{
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof,
};
use crate::ser::ProtocolVersion;
use std::cell::Cell;
use util::OneTime;
/// An enum collecting sets of parameters used throughout the
/// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values
/// Define these here, as they should be developer-set, not really tweakable
/// by users
/// The default "local" protocol version for this node.
/// We negotiate compatible versions with each peer via Hand/Shake.
/// Note: We also use a specific (possible different) protocol version
/// for both the backend database and MMR data files.
/// This defines the p2p layer protocol version for this node.
pub const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion(1_000);
/// Automated testing edge_bits
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10;
/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 8;
/// User testing edge_bits
pub const USER_TESTING_MIN_EDGE_BITS: u8 = 15;
/// User testing proof size
pub const USER_TESTING_PROOF_SIZE: usize = 42;
/// Automated testing coinbase maturity
pub const AUTOMATED_TESTING_COINBASE_MATURITY: u64 = 3;
/// User testing coinbase maturity
pub const USER_TESTING_COINBASE_MATURITY: u64 = 3;
/// Testing cut through horizon in blocks
pub const AUTOMATED_TESTING_CUT_THROUGH_HORIZON: u32 = 20;
/// Testing cut through horizon in blocks
pub const USER_TESTING_CUT_THROUGH_HORIZON: u32 = 70;
/// Testing state sync threshold in blocks
pub const TESTING_STATE_SYNC_THRESHOLD: u32 = 20;
/// Testing initial block difficulty
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250;
/// Default unit of fee per tx weight, making each output cost about a Grincent
pub const DEFAULT_ACCEPT_FEE_BASE: u64 = GRIN_BASE / 100 / 20; // 500_000
/// default Future Time Limit (FTL) of 5 minutes
pub const DEFAULT_FUTURE_TIME_LIMIT: u64 = 5 * 60;
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
/// If a peer's last seen time is 2 weeks ago we will forget such kind of defunct peers.
const PEER_EXPIRATION_DAYS: i64 = 7 * 2;
/// Constant that expresses defunct peer timeout in seconds to be used in checks.
pub const PEER_EXPIRATION_REMOVE_TIME: i64 = PEER_EXPIRATION_DAYS * 24 * 3600;
/// Trigger compaction check on average every day for all nodes.
/// Randomized per node - roll the dice on every block to decide.
/// Will compact the txhashset to remove pruned data.
/// Will also remove old blocks and associated data from the database.
/// For a node configured as "archival_mode = true" only the txhashset will be compacted.
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
/// Number of blocks to reuse a txhashset zip for (automated testing and user testing).
pub const TESTING_TXHASHSET_ARCHIVE_INTERVAL: u64 = 10;
/// Number of blocks to reuse a txhashset zip for.
pub const TXHASHSET_ARCHIVE_INTERVAL: u64 = 12 * 60;
/// Types of chain a server can run with, dictates the genesis block and
/// and mining parameters used.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub enum ChainTypes {
/// For CI testing
AutomatedTesting,
/// For User testing
UserTesting,
/// Protocol testing network
Testnet,
/// Main production network
Mainnet,
}
impl ChainTypes {
/// Short name representing the chain type ("test", "main", etc.)
pub fn shortname(&self) -> String {
match *self {
ChainTypes::AutomatedTesting => "auto".to_owned(),
ChainTypes::UserTesting => "user".to_owned(),
ChainTypes::Testnet => "test".to_owned(),
ChainTypes::Mainnet => "main".to_owned(),
}
}
}
impl Default for ChainTypes {
fn default() -> ChainTypes {
ChainTypes::Mainnet
}
}
lazy_static! {
/// Global chain_type that must be initialized once on node startup.
/// This is accessed via get_chain_type() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_CHAIN_TYPE: OneTime<ChainTypes> = OneTime::new();
/// Global acccept fee base that must be initialized once on node startup.
/// This is accessed via get_acccept_fee_base() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_ACCEPT_FEE_BASE: OneTime<u64> = OneTime::new();
/// Global future time limit that must be initialized once on node startup.
/// This is accessed via get_future_time_limit() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_FUTURE_TIME_LIMIT: OneTime<u64> = OneTime::new();
/// Global feature flag for NRD kernel support.
/// If enabled NRD kernels are treated as valid after HF3 (based on header version).
/// If disabled NRD kernels are invalid regardless of header version or block height.
pub static ref GLOBAL_NRD_FEATURE_ENABLED: OneTime<bool> = OneTime::new();
}
thread_local! {
/// Mainnet|Testnet|UserTesting|AutomatedTesting
pub static CHAIN_TYPE: Cell<Option<ChainTypes>> = Cell::new(None);
/// minimum transaction fee per unit of transaction weight for mempool acceptance
pub static ACCEPT_FEE_BASE: Cell<Option<u64>> = Cell::new(None);
/// maximum number of seconds into future for timestamp of block to be acceptable
pub static FUTURE_TIME_LIMIT: Cell<Option<u64>> = Cell::new(None);
/// Local feature flag for NRD kernel support.
pub static NRD_FEATURE_ENABLED: Cell<Option<bool>> = Cell::new(None);
}
/// One time initialization of the global chain_type.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.init(new_type)
}
/// Set the global chain_type using an override
pub fn set_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.set(new_type, true);
}
/// Set the chain type on a per-thread basis via thread_local storage.
pub fn set_local_chain_type(new_type: ChainTypes) {
CHAIN_TYPE.with(|chain_type| chain_type.set(Some(new_type)))
}
/// Get the chain type via thread_local, fallback to global chain_type.
pub fn get_chain_type() -> ChainTypes {
CHAIN_TYPE.with(|chain_type| match chain_type.get() {
None => {
if !GLOBAL_CHAIN_TYPE.is_init() {
panic!("GLOBAL_CHAIN_TYPE and CHAIN_TYPE unset. Consider set_local_chain_type() in tests.");
}
let chain_type = GLOBAL_CHAIN_TYPE.borrow();
set_local_chain_type(chain_type);
chain_type
}
Some(chain_type) => chain_type,
})
}
/// Return genesis block for the active chain type
pub fn get_genesis_block() -> Block {
match get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
}
}
/// One time initialization of the global future time limit
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.init(new_ftl)
}
/// The global future time limit may be reset again using the override
pub fn set_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.set(new_ftl, true)
}
/// One time initialization of the global accept fee base
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.init(new_base)
}
/// The global accept fee base may be reset using override.
pub fn set_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.set(new_base, true)
}
/// Set the accept fee base on a per-thread basis via thread_local storage.
pub fn set_local_accept_fee_base(new_base: u64) {
ACCEPT_FEE_BASE.with(|base| base.set(Some(new_base)))
}
/// Accept Fee Base
/// Look at thread local config first. If not set fallback to global config.
/// Default to grin-cent/20 if global config unset.
pub fn get_accept_fee_base() -> u64 {
ACCEPT_FEE_BASE.with(|base| match base.get() {
None => {
let base = if GLOBAL_ACCEPT_FEE_BASE.is_init() {
GLOBAL_ACCEPT_FEE_BASE.borrow()
} else {
DEFAULT_ACCEPT_FEE_BASE
};
set_local_accept_fee_base(base);
base
}
Some(base) => base,
})
}
/// Set the future time limit on a per-thread basis via thread_local storage.
pub fn set_local_future_time_limit(new_ftl: u64) {
FUTURE_TIME_LIMIT.with(|ftl| ftl.set(Some(new_ftl)))
}
/// Future Time Limit (FTL)
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn get_future_time_limit() -> u64 {
FUTURE_TIME_LIMIT.with(|ftl| match ftl.get() {
None => {
let ftl = if GLOBAL_FUTURE_TIME_LIMIT.is_init() {
GLOBAL_FUTURE_TIME_LIMIT.borrow()
} else {
DEFAULT_FUTURE_TIME_LIMIT
};
set_local_future_time_limit(ftl);
ftl
}
Some(ftl) => ftl,
})
}
/// One time initialization of the global NRD feature flag.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_nrd_enabled(enabled: bool) |
/// Set the global NRD feature flag using override.
pub fn set_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.set(enabled, true)
}
/// Explicitly enable the local NRD feature flag.
pub fn set_local_nrd_enabled(enabled: bool) {
NRD_FEATURE_ENABLED.with(|flag| flag.set(Some(enabled)))
}
/// Is the NRD feature flag enabled?
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn is_nrd_enabled() -> bool {
NRD_FEATURE_ENABLED.with(|flag| match flag.get() {
None => {
if GLOBAL_NRD_FEATURE_ENABLED.is_init() {
let global_flag = GLOBAL_NRD_FEATURE_ENABLED.borrow();
flag.set(Some(global_flag));
global_flag
} else {
// Global config unset, default to false.
false
}
}
Some(flag) => flag,
})
}
/// Return either a cuckaroo* context or a cuckatoo context
/// Single change point
pub fn create_pow_context<T>(
height: u64,
edge_bits: u8,
proof_size: usize,
max_sols: u32,
) -> Result<Box<dyn PoWContext>, pow::Error> {
let chain_type = get_chain_type();
if chain_type == ChainTypes::Mainnet || chain_type == ChainTypes::Testnet {
// Mainnet and Testnet have Cuckatoo31+ for AF and Cuckaroo{,d,m,z}29 for AR
if edge_bits > 29 {
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
} else {
match header_version(height) {
HeaderVersion(1) => new_cuckaroo_ctx(edge_bits, proof_size),
HeaderVersion(2) => new_cuckarood_ctx(edge_bits, proof_size),
HeaderVersion(3) => new_cuckaroom_ctx(edge_bits, proof_size),
HeaderVersion(4) => new_cuckarooz_ctx(edge_bits, proof_size),
_ => no_cuckaroo_ctx(),
}
}
} else {
// Everything else is Cuckatoo only
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
}
}
/// The minimum acceptable edge_bits
pub fn min_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => DEFAULT_MIN_EDGE_BITS,
}
}
/// Reference edge_bits used to compute factor on higher Cuck(at)oo graph sizes,
/// while the min_edge_bits can be changed on a soft fork, changing
/// base_edge_bits is a hard fork.
pub fn base_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => BASE_EDGE_BITS,
}
}
/// The proofsize
pub fn proofsize() -> usize {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
_ => PROOFSIZE,
}
}
/// Coinbase maturity for coinbases to be spent
pub fn coinbase_maturity() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
_ => COINBASE_MATURITY,
}
}
/// Initial mining difficulty
pub fn initial_block_difficulty() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::Testnet => INITIAL_DIFFICULTY,
ChainTypes::Mainnet => INITIAL_DIFFICULTY,
}
}
/// Initial mining secondary scale
pub fn initial_graph_weight() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
ChainTypes::Mainnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
}
}
/// Minimum valid graph weight post HF4
pub fn min_wtema_graph_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS),
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS),
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS),
ChainTypes::Mainnet => C32_GRAPH_WEIGHT,
}
}
/// Maximum allowed block weight.
pub fn max_block_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::UserTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::Testnet => MAX_BLOCK_WEIGHT,
ChainTypes::Mainnet => MAX_BLOCK_WEIGHT,
}
}
/// Maximum allowed transaction weight (1 weight unit ~= 32 bytes)
pub fn max_tx_weight() -> u64 {
let coinbase_weight = OUTPUT_WEIGHT + KERNEL_WEIGHT;
max_block_weight().saturating_sub(coinbase_weight) as u64
}
/// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_CUT_THROUGH_HORIZON,
ChainTypes::UserTesting => USER_TESTING_CUT_THROUGH_HORIZON,
_ => CUT_THROUGH_HORIZON,
}
}
/// Threshold at which we can request a txhashset (and full blocks from)
pub fn state_sync_threshold() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_STATE_SYNC_THRESHOLD,
ChainTypes::UserTesting => TESTING_STATE_SYNC_THRESHOLD,
_ => STATE_SYNC_THRESHOLD,
}
}
/// Number of blocks to reuse a txhashset zip for.
pub fn txhashset_archive_interval() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
ChainTypes::UserTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
_ => TXHASHSET_ARCHIVE_INTERVAL,
}
}
/// Are we in production mode?
/// Production defined as a live public network, testnet[n] or mainnet.
pub fn is_production_mode() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
ChainTypes::Mainnet => true,
_ => false,
}
}
/// Are we in testnet?
/// Note: We do not have a corresponding is_mainnet() as we want any tests to be as close
/// as possible to "mainnet" configuration as possible.
/// We want to avoid missing any mainnet only code paths.
pub fn is_testnet() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
_ => false,
}
}
/// Converts an iterator of block difficulty data to more a more manageable
/// vector and pads if needed (which will) only be needed for the first few
/// blocks after genesis
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<HeaderDifficultyInfo>
where
T: IntoIterator<Item = HeaderDifficultyInfo>,
{
// Convert iterator to vector, so we can append to it if necessary
let needed_block_count = DMA_WINDOW as usize + 1;
let mut last_n: Vec<HeaderDifficultyInfo> =
cursor.into_iter().take(needed_block_count).collect();
// Only needed just after blockchain launch... basically ensures there's
// always enough data by simulating perfectly timed pre-genesis
// blocks at the genesis difficulty as needed.
let n = last_n.len();
if needed_block_count > n {
let last_ts_delta = if n > 1 {
last_n[0].timestamp - last_n[1].timestamp
} else {
BLOCK_TIME_SEC
};
let last_diff = last_n[0].difficulty;
// fill in simulated blocks with values from the previous real block
let mut last_ts = last_n.last().unwrap().timestamp;
for _ in n..needed_block_count {
last_ts = last_ts.saturating_sub(last_ts_delta);
last_n.push(HeaderDifficultyInfo::from_ts_diff(last_ts, last_diff));
}
}
last_n.reverse();
last_n
}
/// Calculates the size of a header (in bytes) given a number of edge bits in the PoW
#[inline]
pub fn header_size_bytes(edge_bits: u8) -> usize {
let size = 2 + 2 * 8 + 5 * 32 + 32 + 2 * 8;
let proof_size = 8 + 4 + 8 + 1 + Proof::pack_len(edge_bits);
size + proof_size
}
#[cfg(test)]
mod test {
use super::*;
use crate::core::Block;
use crate::genesis::*;
use crate::pow::mine_genesis_block;
use crate::ser::{BinWriter, Writeable};
fn test_header_len(genesis: Block) {
let mut raw = Vec::<u8>::with_capacity(1_024);
let mut writer = BinWriter::new(&mut raw, ProtocolVersion::local());
genesis.header.write(&mut writer).unwrap();
assert_eq!(raw.len(), header_size_bytes(genesis.header.pow.edge_bits()));
}
#[test]
fn automated_testing_header_len() {
set_local_chain_type(ChainTypes::AutomatedTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn user_testing_header_len() {
set_local_chain_type(ChainTypes::UserTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn testnet_header_len() {
set_local_chain_type(ChainTypes::Testnet);
test_header_len(genesis_test());
}
#[test]
fn mainnet_header_len() {
set_local_chain_type(ChainTypes::Mainnet);
test_header_len(genesis_main());
}
}
| {
GLOBAL_NRD_FEATURE_ENABLED.init(enabled)
} | identifier_body |
global.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Values that should be shared across all modules, without necessarily
//! having to pass them all over the place, but aren't consensus values.
//! should be used sparingly.
use crate::consensus::{
graph_weight, header_version, HeaderDifficultyInfo, BASE_EDGE_BITS, BLOCK_TIME_SEC,
C32_GRAPH_WEIGHT, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS,
DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT,
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
};
use crate::core::block::{Block, HeaderVersion};
use crate::genesis;
use crate::pow::{
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof,
};
use crate::ser::ProtocolVersion;
use std::cell::Cell;
use util::OneTime;
/// An enum collecting sets of parameters used throughout the
/// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values
/// Define these here, as they should be developer-set, not really tweakable
/// by users
/// The default "local" protocol version for this node.
/// We negotiate compatible versions with each peer via Hand/Shake.
/// Note: We also use a specific (possible different) protocol version
/// for both the backend database and MMR data files.
/// This defines the p2p layer protocol version for this node.
pub const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion(1_000);
/// Automated testing edge_bits
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10;
/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 8;
/// User testing edge_bits
pub const USER_TESTING_MIN_EDGE_BITS: u8 = 15;
/// User testing proof size
pub const USER_TESTING_PROOF_SIZE: usize = 42;
/// Automated testing coinbase maturity
pub const AUTOMATED_TESTING_COINBASE_MATURITY: u64 = 3;
/// User testing coinbase maturity
pub const USER_TESTING_COINBASE_MATURITY: u64 = 3;
/// Testing cut through horizon in blocks
pub const AUTOMATED_TESTING_CUT_THROUGH_HORIZON: u32 = 20;
/// Testing cut through horizon in blocks
pub const USER_TESTING_CUT_THROUGH_HORIZON: u32 = 70;
/// Testing state sync threshold in blocks
pub const TESTING_STATE_SYNC_THRESHOLD: u32 = 20;
/// Testing initial block difficulty
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250;
/// Default unit of fee per tx weight, making each output cost about a Grincent
pub const DEFAULT_ACCEPT_FEE_BASE: u64 = GRIN_BASE / 100 / 20; // 500_000
/// default Future Time Limit (FTL) of 5 minutes
pub const DEFAULT_FUTURE_TIME_LIMIT: u64 = 5 * 60;
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
/// If a peer's last seen time is 2 weeks ago we will forget such kind of defunct peers.
const PEER_EXPIRATION_DAYS: i64 = 7 * 2;
/// Constant that expresses defunct peer timeout in seconds to be used in checks.
pub const PEER_EXPIRATION_REMOVE_TIME: i64 = PEER_EXPIRATION_DAYS * 24 * 3600;
/// Trigger compaction check on average every day for all nodes.
/// Randomized per node - roll the dice on every block to decide.
/// Will compact the txhashset to remove pruned data.
/// Will also remove old blocks and associated data from the database.
/// For a node configured as "archival_mode = true" only the txhashset will be compacted.
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
/// Number of blocks to reuse a txhashset zip for (automated testing and user testing).
pub const TESTING_TXHASHSET_ARCHIVE_INTERVAL: u64 = 10;
/// Number of blocks to reuse a txhashset zip for.
pub const TXHASHSET_ARCHIVE_INTERVAL: u64 = 12 * 60;
/// Types of chain a server can run with, dictates the genesis block and
/// and mining parameters used.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub enum ChainTypes {
/// For CI testing
AutomatedTesting,
/// For User testing
UserTesting,
/// Protocol testing network
Testnet,
/// Main production network
Mainnet,
}
impl ChainTypes {
/// Short name representing the chain type ("test", "main", etc.)
pub fn shortname(&self) -> String {
match *self {
ChainTypes::AutomatedTesting => "auto".to_owned(),
ChainTypes::UserTesting => "user".to_owned(),
ChainTypes::Testnet => "test".to_owned(),
ChainTypes::Mainnet => "main".to_owned(),
}
}
}
impl Default for ChainTypes {
fn default() -> ChainTypes {
ChainTypes::Mainnet
}
}
lazy_static! {
/// Global chain_type that must be initialized once on node startup.
/// This is accessed via get_chain_type() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_CHAIN_TYPE: OneTime<ChainTypes> = OneTime::new();
/// Global acccept fee base that must be initialized once on node startup.
/// This is accessed via get_acccept_fee_base() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_ACCEPT_FEE_BASE: OneTime<u64> = OneTime::new();
/// Global future time limit that must be initialized once on node startup.
/// This is accessed via get_future_time_limit() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_FUTURE_TIME_LIMIT: OneTime<u64> = OneTime::new();
/// Global feature flag for NRD kernel support.
/// If enabled NRD kernels are treated as valid after HF3 (based on header version).
/// If disabled NRD kernels are invalid regardless of header version or block height.
pub static ref GLOBAL_NRD_FEATURE_ENABLED: OneTime<bool> = OneTime::new();
}
thread_local! {
/// Mainnet|Testnet|UserTesting|AutomatedTesting
pub static CHAIN_TYPE: Cell<Option<ChainTypes>> = Cell::new(None);
/// minimum transaction fee per unit of transaction weight for mempool acceptance
pub static ACCEPT_FEE_BASE: Cell<Option<u64>> = Cell::new(None);
/// maximum number of seconds into future for timestamp of block to be acceptable
pub static FUTURE_TIME_LIMIT: Cell<Option<u64>> = Cell::new(None);
/// Local feature flag for NRD kernel support.
pub static NRD_FEATURE_ENABLED: Cell<Option<bool>> = Cell::new(None);
}
/// One time initialization of the global chain_type.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.init(new_type)
}
/// Set the global chain_type using an override
pub fn set_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.set(new_type, true);
}
/// Set the chain type on a per-thread basis via thread_local storage.
pub fn set_local_chain_type(new_type: ChainTypes) {
CHAIN_TYPE.with(|chain_type| chain_type.set(Some(new_type)))
}
/// Get the chain type via thread_local, fallback to global chain_type.
pub fn | () -> ChainTypes {
CHAIN_TYPE.with(|chain_type| match chain_type.get() {
None => {
if !GLOBAL_CHAIN_TYPE.is_init() {
panic!("GLOBAL_CHAIN_TYPE and CHAIN_TYPE unset. Consider set_local_chain_type() in tests.");
}
let chain_type = GLOBAL_CHAIN_TYPE.borrow();
set_local_chain_type(chain_type);
chain_type
}
Some(chain_type) => chain_type,
})
}
/// Return genesis block for the active chain type
pub fn get_genesis_block() -> Block {
match get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
}
}
/// One time initialization of the global future time limit
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.init(new_ftl)
}
/// The global future time limit may be reset again using the override
pub fn set_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.set(new_ftl, true)
}
/// One time initialization of the global accept fee base
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.init(new_base)
}
/// The global accept fee base may be reset using override.
pub fn set_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.set(new_base, true)
}
/// Set the accept fee base on a per-thread basis via thread_local storage.
pub fn set_local_accept_fee_base(new_base: u64) {
ACCEPT_FEE_BASE.with(|base| base.set(Some(new_base)))
}
/// Accept Fee Base
/// Look at thread local config first. If not set fallback to global config.
/// Default to grin-cent/20 if global config unset.
pub fn get_accept_fee_base() -> u64 {
ACCEPT_FEE_BASE.with(|base| match base.get() {
None => {
let base = if GLOBAL_ACCEPT_FEE_BASE.is_init() {
GLOBAL_ACCEPT_FEE_BASE.borrow()
} else {
DEFAULT_ACCEPT_FEE_BASE
};
set_local_accept_fee_base(base);
base
}
Some(base) => base,
})
}
/// Set the future time limit on a per-thread basis via thread_local storage.
pub fn set_local_future_time_limit(new_ftl: u64) {
FUTURE_TIME_LIMIT.with(|ftl| ftl.set(Some(new_ftl)))
}
/// Future Time Limit (FTL)
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn get_future_time_limit() -> u64 {
FUTURE_TIME_LIMIT.with(|ftl| match ftl.get() {
None => {
let ftl = if GLOBAL_FUTURE_TIME_LIMIT.is_init() {
GLOBAL_FUTURE_TIME_LIMIT.borrow()
} else {
DEFAULT_FUTURE_TIME_LIMIT
};
set_local_future_time_limit(ftl);
ftl
}
Some(ftl) => ftl,
})
}
/// One time initialization of the global NRD feature flag.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.init(enabled)
}
/// Set the global NRD feature flag using override.
pub fn set_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.set(enabled, true)
}
/// Explicitly enable the local NRD feature flag.
pub fn set_local_nrd_enabled(enabled: bool) {
NRD_FEATURE_ENABLED.with(|flag| flag.set(Some(enabled)))
}
/// Is the NRD feature flag enabled?
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn is_nrd_enabled() -> bool {
NRD_FEATURE_ENABLED.with(|flag| match flag.get() {
None => {
if GLOBAL_NRD_FEATURE_ENABLED.is_init() {
let global_flag = GLOBAL_NRD_FEATURE_ENABLED.borrow();
flag.set(Some(global_flag));
global_flag
} else {
// Global config unset, default to false.
false
}
}
Some(flag) => flag,
})
}
/// Return either a cuckaroo* context or a cuckatoo context
/// Single change point
pub fn create_pow_context<T>(
height: u64,
edge_bits: u8,
proof_size: usize,
max_sols: u32,
) -> Result<Box<dyn PoWContext>, pow::Error> {
let chain_type = get_chain_type();
if chain_type == ChainTypes::Mainnet || chain_type == ChainTypes::Testnet {
// Mainnet and Testnet have Cuckatoo31+ for AF and Cuckaroo{,d,m,z}29 for AR
if edge_bits > 29 {
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
} else {
match header_version(height) {
HeaderVersion(1) => new_cuckaroo_ctx(edge_bits, proof_size),
HeaderVersion(2) => new_cuckarood_ctx(edge_bits, proof_size),
HeaderVersion(3) => new_cuckaroom_ctx(edge_bits, proof_size),
HeaderVersion(4) => new_cuckarooz_ctx(edge_bits, proof_size),
_ => no_cuckaroo_ctx(),
}
}
} else {
// Everything else is Cuckatoo only
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
}
}
/// The minimum acceptable edge_bits
pub fn min_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => DEFAULT_MIN_EDGE_BITS,
}
}
/// Reference edge_bits used to compute factor on higher Cuck(at)oo graph sizes,
/// while the min_edge_bits can be changed on a soft fork, changing
/// base_edge_bits is a hard fork.
pub fn base_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => BASE_EDGE_BITS,
}
}
/// The proofsize
pub fn proofsize() -> usize {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
_ => PROOFSIZE,
}
}
/// Coinbase maturity for coinbases to be spent
pub fn coinbase_maturity() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
_ => COINBASE_MATURITY,
}
}
/// Initial mining difficulty
pub fn initial_block_difficulty() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::Testnet => INITIAL_DIFFICULTY,
ChainTypes::Mainnet => INITIAL_DIFFICULTY,
}
}
/// Initial mining secondary scale
pub fn initial_graph_weight() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
ChainTypes::Mainnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
}
}
/// Minimum valid graph weight post HF4
pub fn min_wtema_graph_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS),
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS),
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS),
ChainTypes::Mainnet => C32_GRAPH_WEIGHT,
}
}
/// Maximum allowed block weight.
pub fn max_block_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::UserTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::Testnet => MAX_BLOCK_WEIGHT,
ChainTypes::Mainnet => MAX_BLOCK_WEIGHT,
}
}
/// Maximum allowed transaction weight (1 weight unit ~= 32 bytes)
pub fn max_tx_weight() -> u64 {
let coinbase_weight = OUTPUT_WEIGHT + KERNEL_WEIGHT;
max_block_weight().saturating_sub(coinbase_weight) as u64
}
/// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_CUT_THROUGH_HORIZON,
ChainTypes::UserTesting => USER_TESTING_CUT_THROUGH_HORIZON,
_ => CUT_THROUGH_HORIZON,
}
}
/// Threshold at which we can request a txhashset (and full blocks from)
pub fn state_sync_threshold() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_STATE_SYNC_THRESHOLD,
ChainTypes::UserTesting => TESTING_STATE_SYNC_THRESHOLD,
_ => STATE_SYNC_THRESHOLD,
}
}
/// Number of blocks to reuse a txhashset zip for.
pub fn txhashset_archive_interval() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
ChainTypes::UserTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
_ => TXHASHSET_ARCHIVE_INTERVAL,
}
}
/// Are we in production mode?
/// Production defined as a live public network, testnet[n] or mainnet.
pub fn is_production_mode() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
ChainTypes::Mainnet => true,
_ => false,
}
}
/// Are we in testnet?
/// Note: We do not have a corresponding is_mainnet() as we want any tests to be as close
/// as possible to "mainnet" configuration as possible.
/// We want to avoid missing any mainnet only code paths.
pub fn is_testnet() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
_ => false,
}
}
/// Converts an iterator of block difficulty data to more a more manageable
/// vector and pads if needed (which will) only be needed for the first few
/// blocks after genesis
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<HeaderDifficultyInfo>
where
T: IntoIterator<Item = HeaderDifficultyInfo>,
{
// Convert iterator to vector, so we can append to it if necessary
let needed_block_count = DMA_WINDOW as usize + 1;
let mut last_n: Vec<HeaderDifficultyInfo> =
cursor.into_iter().take(needed_block_count).collect();
// Only needed just after blockchain launch... basically ensures there's
// always enough data by simulating perfectly timed pre-genesis
// blocks at the genesis difficulty as needed.
let n = last_n.len();
if needed_block_count > n {
let last_ts_delta = if n > 1 {
last_n[0].timestamp - last_n[1].timestamp
} else {
BLOCK_TIME_SEC
};
let last_diff = last_n[0].difficulty;
// fill in simulated blocks with values from the previous real block
let mut last_ts = last_n.last().unwrap().timestamp;
for _ in n..needed_block_count {
last_ts = last_ts.saturating_sub(last_ts_delta);
last_n.push(HeaderDifficultyInfo::from_ts_diff(last_ts, last_diff));
}
}
last_n.reverse();
last_n
}
/// Calculates the size of a header (in bytes) given a number of edge bits in the PoW
#[inline]
pub fn header_size_bytes(edge_bits: u8) -> usize {
let size = 2 + 2 * 8 + 5 * 32 + 32 + 2 * 8;
let proof_size = 8 + 4 + 8 + 1 + Proof::pack_len(edge_bits);
size + proof_size
}
#[cfg(test)]
mod test {
use super::*;
use crate::core::Block;
use crate::genesis::*;
use crate::pow::mine_genesis_block;
use crate::ser::{BinWriter, Writeable};
fn test_header_len(genesis: Block) {
let mut raw = Vec::<u8>::with_capacity(1_024);
let mut writer = BinWriter::new(&mut raw, ProtocolVersion::local());
genesis.header.write(&mut writer).unwrap();
assert_eq!(raw.len(), header_size_bytes(genesis.header.pow.edge_bits()));
}
#[test]
fn automated_testing_header_len() {
set_local_chain_type(ChainTypes::AutomatedTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn user_testing_header_len() {
set_local_chain_type(ChainTypes::UserTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn testnet_header_len() {
set_local_chain_type(ChainTypes::Testnet);
test_header_len(genesis_test());
}
#[test]
fn mainnet_header_len() {
set_local_chain_type(ChainTypes::Mainnet);
test_header_len(genesis_main());
}
}
| get_chain_type | identifier_name |
global.rs | // Copyright 2021 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Values that should be shared across all modules, without necessarily
//! having to pass them all over the place, but aren't consensus values.
//! should be used sparingly.
use crate::consensus::{
graph_weight, header_version, HeaderDifficultyInfo, BASE_EDGE_BITS, BLOCK_TIME_SEC,
C32_GRAPH_WEIGHT, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT, DEFAULT_MIN_EDGE_BITS,
DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT,
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
};
use crate::core::block::{Block, HeaderVersion};
use crate::genesis;
use crate::pow::{
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof,
};
use crate::ser::ProtocolVersion;
use std::cell::Cell;
use util::OneTime;
/// An enum collecting sets of parameters used throughout the
/// code wherever mining is needed. This should allow for
/// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values
/// Define these here, as they should be developer-set, not really tweakable
/// by users
/// The default "local" protocol version for this node.
/// We negotiate compatible versions with each peer via Hand/Shake.
/// Note: We also use a specific (possible different) protocol version
/// for both the backend database and MMR data files.
/// This defines the p2p layer protocol version for this node.
pub const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion(1_000);
/// Automated testing edge_bits
pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10;
/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 8;
/// User testing edge_bits
pub const USER_TESTING_MIN_EDGE_BITS: u8 = 15;
/// User testing proof size
pub const USER_TESTING_PROOF_SIZE: usize = 42;
/// Automated testing coinbase maturity
pub const AUTOMATED_TESTING_COINBASE_MATURITY: u64 = 3;
/// User testing coinbase maturity
pub const USER_TESTING_COINBASE_MATURITY: u64 = 3;
/// Testing cut through horizon in blocks
pub const AUTOMATED_TESTING_CUT_THROUGH_HORIZON: u32 = 20;
/// Testing cut through horizon in blocks
pub const USER_TESTING_CUT_THROUGH_HORIZON: u32 = 70;
/// Testing state sync threshold in blocks
pub const TESTING_STATE_SYNC_THRESHOLD: u32 = 20;
/// Testing initial block difficulty
pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testing max_block_weight (artifically low, just enough to support a few txs).
pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250;
/// Default unit of fee per tx weight, making each output cost about a Grincent
pub const DEFAULT_ACCEPT_FEE_BASE: u64 = GRIN_BASE / 100 / 20; // 500_000
/// default Future Time Limit (FTL) of 5 minutes
pub const DEFAULT_FUTURE_TIME_LIMIT: u64 = 5 * 60;
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
/// If a peer's last seen time is 2 weeks ago we will forget such kind of defunct peers.
const PEER_EXPIRATION_DAYS: i64 = 7 * 2;
/// Constant that expresses defunct peer timeout in seconds to be used in checks.
pub const PEER_EXPIRATION_REMOVE_TIME: i64 = PEER_EXPIRATION_DAYS * 24 * 3600;
/// Trigger compaction check on average every day for all nodes.
/// Randomized per node - roll the dice on every block to decide.
/// Will compact the txhashset to remove pruned data.
/// Will also remove old blocks and associated data from the database.
/// For a node configured as "archival_mode = true" only the txhashset will be compacted.
pub const COMPACTION_CHECK: u64 = DAY_HEIGHT;
/// Number of blocks to reuse a txhashset zip for (automated testing and user testing).
pub const TESTING_TXHASHSET_ARCHIVE_INTERVAL: u64 = 10;
/// Number of blocks to reuse a txhashset zip for.
pub const TXHASHSET_ARCHIVE_INTERVAL: u64 = 12 * 60;
/// Types of chain a server can run with, dictates the genesis block and
/// and mining parameters used.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub enum ChainTypes {
/// For CI testing
AutomatedTesting,
/// For User testing
UserTesting,
/// Protocol testing network
Testnet,
/// Main production network
Mainnet,
}
impl ChainTypes {
/// Short name representing the chain type ("test", "main", etc.)
pub fn shortname(&self) -> String {
match *self {
ChainTypes::AutomatedTesting => "auto".to_owned(),
ChainTypes::UserTesting => "user".to_owned(),
ChainTypes::Testnet => "test".to_owned(),
ChainTypes::Mainnet => "main".to_owned(),
}
}
}
impl Default for ChainTypes {
fn default() -> ChainTypes {
ChainTypes::Mainnet
}
}
lazy_static! {
/// Global chain_type that must be initialized once on node startup.
/// This is accessed via get_chain_type() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_CHAIN_TYPE: OneTime<ChainTypes> = OneTime::new();
/// Global acccept fee base that must be initialized once on node startup.
/// This is accessed via get_acccept_fee_base() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_ACCEPT_FEE_BASE: OneTime<u64> = OneTime::new();
/// Global future time limit that must be initialized once on node startup.
/// This is accessed via get_future_time_limit() which allows the global value
/// to be overridden on a per-thread basis (for testing).
pub static ref GLOBAL_FUTURE_TIME_LIMIT: OneTime<u64> = OneTime::new();
/// Global feature flag for NRD kernel support.
/// If enabled NRD kernels are treated as valid after HF3 (based on header version).
/// If disabled NRD kernels are invalid regardless of header version or block height.
pub static ref GLOBAL_NRD_FEATURE_ENABLED: OneTime<bool> = OneTime::new();
}
thread_local! {
/// Mainnet|Testnet|UserTesting|AutomatedTesting
pub static CHAIN_TYPE: Cell<Option<ChainTypes>> = Cell::new(None);
/// minimum transaction fee per unit of transaction weight for mempool acceptance
pub static ACCEPT_FEE_BASE: Cell<Option<u64>> = Cell::new(None);
/// maximum number of seconds into future for timestamp of block to be acceptable
pub static FUTURE_TIME_LIMIT: Cell<Option<u64>> = Cell::new(None);
/// Local feature flag for NRD kernel support.
pub static NRD_FEATURE_ENABLED: Cell<Option<bool>> = Cell::new(None);
}
/// One time initialization of the global chain_type.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.init(new_type)
}
/// Set the global chain_type using an override
pub fn set_global_chain_type(new_type: ChainTypes) {
GLOBAL_CHAIN_TYPE.set(new_type, true);
}
/// Set the chain type on a per-thread basis via thread_local storage.
pub fn set_local_chain_type(new_type: ChainTypes) {
CHAIN_TYPE.with(|chain_type| chain_type.set(Some(new_type)))
}
/// Get the chain type via thread_local, fallback to global chain_type.
pub fn get_chain_type() -> ChainTypes {
CHAIN_TYPE.with(|chain_type| match chain_type.get() {
None => {
if !GLOBAL_CHAIN_TYPE.is_init() {
panic!("GLOBAL_CHAIN_TYPE and CHAIN_TYPE unset. Consider set_local_chain_type() in tests.");
}
let chain_type = GLOBAL_CHAIN_TYPE.borrow();
set_local_chain_type(chain_type);
chain_type
}
Some(chain_type) => chain_type,
})
}
/// Return genesis block for the active chain type
pub fn get_genesis_block() -> Block {
match get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
}
}
/// One time initialization of the global future time limit
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.init(new_ftl)
}
/// The global future time limit may be reset again using the override
pub fn set_global_future_time_limit(new_ftl: u64) {
GLOBAL_FUTURE_TIME_LIMIT.set(new_ftl, true)
}
/// One time initialization of the global accept fee base
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.init(new_base)
}
/// The global accept fee base may be reset using override.
pub fn set_global_accept_fee_base(new_base: u64) {
GLOBAL_ACCEPT_FEE_BASE.set(new_base, true)
}
/// Set the accept fee base on a per-thread basis via thread_local storage.
pub fn set_local_accept_fee_base(new_base: u64) {
ACCEPT_FEE_BASE.with(|base| base.set(Some(new_base)))
}
/// Accept Fee Base
/// Look at thread local config first. If not set fallback to global config.
/// Default to grin-cent/20 if global config unset.
pub fn get_accept_fee_base() -> u64 {
ACCEPT_FEE_BASE.with(|base| match base.get() {
None => {
let base = if GLOBAL_ACCEPT_FEE_BASE.is_init() {
GLOBAL_ACCEPT_FEE_BASE.borrow()
} else {
DEFAULT_ACCEPT_FEE_BASE
};
set_local_accept_fee_base(base);
base
}
Some(base) => base,
})
}
/// Set the future time limit on a per-thread basis via thread_local storage.
pub fn set_local_future_time_limit(new_ftl: u64) {
FUTURE_TIME_LIMIT.with(|ftl| ftl.set(Some(new_ftl)))
}
/// Future Time Limit (FTL)
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn get_future_time_limit() -> u64 {
FUTURE_TIME_LIMIT.with(|ftl| match ftl.get() {
None => {
let ftl = if GLOBAL_FUTURE_TIME_LIMIT.is_init() {
GLOBAL_FUTURE_TIME_LIMIT.borrow()
} else {
DEFAULT_FUTURE_TIME_LIMIT
};
set_local_future_time_limit(ftl);
ftl
}
Some(ftl) => ftl,
})
}
/// One time initialization of the global NRD feature flag.
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.init(enabled)
}
/// Set the global NRD feature flag using override.
pub fn set_global_nrd_enabled(enabled: bool) {
GLOBAL_NRD_FEATURE_ENABLED.set(enabled, true)
}
/// Explicitly enable the local NRD feature flag.
pub fn set_local_nrd_enabled(enabled: bool) {
NRD_FEATURE_ENABLED.with(|flag| flag.set(Some(enabled)))
}
/// Is the NRD feature flag enabled?
/// Look at thread local config first. If not set fallback to global config.
/// Default to false if global config unset.
pub fn is_nrd_enabled() -> bool {
NRD_FEATURE_ENABLED.with(|flag| match flag.get() {
None => {
if GLOBAL_NRD_FEATURE_ENABLED.is_init() {
let global_flag = GLOBAL_NRD_FEATURE_ENABLED.borrow();
flag.set(Some(global_flag));
global_flag
} else {
// Global config unset, default to false.
false
}
}
Some(flag) => flag,
})
}
/// Return either a cuckaroo* context or a cuckatoo context
/// Single change point
pub fn create_pow_context<T>(
height: u64,
edge_bits: u8,
proof_size: usize,
max_sols: u32,
) -> Result<Box<dyn PoWContext>, pow::Error> {
let chain_type = get_chain_type();
if chain_type == ChainTypes::Mainnet || chain_type == ChainTypes::Testnet {
// Mainnet and Testnet have Cuckatoo31+ for AF and Cuckaroo{,d,m,z}29 for AR
if edge_bits > 29 {
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
} else {
match header_version(height) {
HeaderVersion(1) => new_cuckaroo_ctx(edge_bits, proof_size),
HeaderVersion(2) => new_cuckarood_ctx(edge_bits, proof_size),
HeaderVersion(3) => new_cuckaroom_ctx(edge_bits, proof_size),
HeaderVersion(4) => new_cuckarooz_ctx(edge_bits, proof_size),
_ => no_cuckaroo_ctx(),
}
}
} else {
// Everything else is Cuckatoo only
new_cuckatoo_ctx(edge_bits, proof_size, max_sols)
}
}
/// The minimum acceptable edge_bits
pub fn min_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => DEFAULT_MIN_EDGE_BITS,
}
}
/// Reference edge_bits used to compute factor on higher Cuck(at)oo graph sizes,
/// while the min_edge_bits can be changed on a soft fork, changing
/// base_edge_bits is a hard fork.
pub fn base_edge_bits() -> u8 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_EDGE_BITS,
ChainTypes::UserTesting => USER_TESTING_MIN_EDGE_BITS,
_ => BASE_EDGE_BITS,
}
}
/// The proofsize
pub fn proofsize() -> usize {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
ChainTypes::UserTesting => USER_TESTING_PROOF_SIZE,
_ => PROOFSIZE,
}
}
/// Coinbase maturity for coinbases to be spent
pub fn coinbase_maturity() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_COINBASE_MATURITY,
ChainTypes::UserTesting => USER_TESTING_COINBASE_MATURITY,
_ => COINBASE_MATURITY,
}
}
/// Initial mining difficulty
pub fn initial_block_difficulty() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::UserTesting => TESTING_INITIAL_DIFFICULTY,
ChainTypes::Testnet => INITIAL_DIFFICULTY,
ChainTypes::Mainnet => INITIAL_DIFFICULTY,
}
}
/// Initial mining secondary scale
pub fn initial_graph_weight() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS) as u32,
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
ChainTypes::Mainnet => graph_weight(0, SECOND_POW_EDGE_BITS) as u32,
}
}
/// Minimum valid graph weight post HF4
pub fn min_wtema_graph_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => graph_weight(0, AUTOMATED_TESTING_MIN_EDGE_BITS),
ChainTypes::UserTesting => graph_weight(0, USER_TESTING_MIN_EDGE_BITS),
ChainTypes::Testnet => graph_weight(0, SECOND_POW_EDGE_BITS),
ChainTypes::Mainnet => C32_GRAPH_WEIGHT,
}
}
/// Maximum allowed block weight.
pub fn max_block_weight() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::UserTesting => TESTING_MAX_BLOCK_WEIGHT,
ChainTypes::Testnet => MAX_BLOCK_WEIGHT,
ChainTypes::Mainnet => MAX_BLOCK_WEIGHT,
}
}
/// Maximum allowed transaction weight (1 weight unit ~= 32 bytes)
pub fn max_tx_weight() -> u64 {
let coinbase_weight = OUTPUT_WEIGHT + KERNEL_WEIGHT;
max_block_weight().saturating_sub(coinbase_weight) as u64
}
/// Horizon at which we can cut-through and do full local pruning
pub fn cut_through_horizon() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_CUT_THROUGH_HORIZON,
ChainTypes::UserTesting => USER_TESTING_CUT_THROUGH_HORIZON,
_ => CUT_THROUGH_HORIZON,
}
}
/// Threshold at which we can request a txhashset (and full blocks from)
pub fn state_sync_threshold() -> u32 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_STATE_SYNC_THRESHOLD,
ChainTypes::UserTesting => TESTING_STATE_SYNC_THRESHOLD,
_ => STATE_SYNC_THRESHOLD,
}
}
/// Number of blocks to reuse a txhashset zip for.
pub fn txhashset_archive_interval() -> u64 {
match get_chain_type() {
ChainTypes::AutomatedTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
ChainTypes::UserTesting => TESTING_TXHASHSET_ARCHIVE_INTERVAL,
_ => TXHASHSET_ARCHIVE_INTERVAL,
}
}
/// Are we in production mode?
/// Production defined as a live public network, testnet[n] or mainnet.
pub fn is_production_mode() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
ChainTypes::Mainnet => true,
_ => false,
}
}
/// Are we in testnet?
/// Note: We do not have a corresponding is_mainnet() as we want any tests to be as close
/// as possible to "mainnet" configuration as possible.
/// We want to avoid missing any mainnet only code paths.
pub fn is_testnet() -> bool {
match get_chain_type() {
ChainTypes::Testnet => true,
_ => false,
}
}
/// Converts an iterator of block difficulty data to more a more manageable
/// vector and pads if needed (which will) only be needed for the first few
/// blocks after genesis
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<HeaderDifficultyInfo>
where
T: IntoIterator<Item = HeaderDifficultyInfo>,
{
// Convert iterator to vector, so we can append to it if necessary
let needed_block_count = DMA_WINDOW as usize + 1;
let mut last_n: Vec<HeaderDifficultyInfo> =
cursor.into_iter().take(needed_block_count).collect();
// Only needed just after blockchain launch... basically ensures there's
// always enough data by simulating perfectly timed pre-genesis
// blocks at the genesis difficulty as needed.
let n = last_n.len();
if needed_block_count > n {
let last_ts_delta = if n > 1 {
last_n[0].timestamp - last_n[1].timestamp
} else {
BLOCK_TIME_SEC
};
let last_diff = last_n[0].difficulty;
// fill in simulated blocks with values from the previous real block
let mut last_ts = last_n.last().unwrap().timestamp;
for _ in n..needed_block_count {
last_ts = last_ts.saturating_sub(last_ts_delta);
last_n.push(HeaderDifficultyInfo::from_ts_diff(last_ts, last_diff));
}
}
last_n.reverse();
last_n
}
| size + proof_size
}
#[cfg(test)]
mod test {
use super::*;
use crate::core::Block;
use crate::genesis::*;
use crate::pow::mine_genesis_block;
use crate::ser::{BinWriter, Writeable};
fn test_header_len(genesis: Block) {
let mut raw = Vec::<u8>::with_capacity(1_024);
let mut writer = BinWriter::new(&mut raw, ProtocolVersion::local());
genesis.header.write(&mut writer).unwrap();
assert_eq!(raw.len(), header_size_bytes(genesis.header.pow.edge_bits()));
}
#[test]
fn automated_testing_header_len() {
set_local_chain_type(ChainTypes::AutomatedTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn user_testing_header_len() {
set_local_chain_type(ChainTypes::UserTesting);
test_header_len(mine_genesis_block().unwrap());
}
#[test]
fn testnet_header_len() {
set_local_chain_type(ChainTypes::Testnet);
test_header_len(genesis_test());
}
#[test]
fn mainnet_header_len() {
set_local_chain_type(ChainTypes::Mainnet);
test_header_len(genesis_main());
}
} | /// Calculates the size of a header (in bytes) given a number of edge bits in the PoW
#[inline]
pub fn header_size_bytes(edge_bits: u8) -> usize {
let size = 2 + 2 * 8 + 5 * 32 + 32 + 2 * 8;
let proof_size = 8 + 4 + 8 + 1 + Proof::pack_len(edge_bits); | random_line_split |
simple-filters.go | package filters
import (
"log"
"math"
"math/rand"
"strconv"
"strings"
"github.com/exascience/elprep/bed"
"github.com/exascience/elprep/sam"
"github.com/exascience/elprep/utils"
)
// ReplaceReferenceSequenceDictionary returns a filter for replacing
// the reference sequence dictionary in a Header.
func ReplaceReferenceSequenceDictionary(dict []utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
if sortingOrder := sam.SortingOrder(header.HD["SO"]); sortingOrder == sam.Coordinate {
previousPos := -1
oldDict := header.SQ
for _, entry := range dict {
sn := entry["SN"]
pos := utils.Find(oldDict, func(entry utils.StringMap) bool { return entry["SN"] == sn })
if pos >= 0 {
if pos > previousPos {
previousPos = pos
} else {
header.SetHDSO(sam.Unknown)
break
}
}
}
}
dictTable := make(map[string]bool)
for _, entry := range dict {
dictTable[entry["SN"]] = true
}
header.SQ = dict
return func(aln *sam.Alignment) bool { return dictTable[aln.RNAME] }
}
}
// ReplaceReferenceSequenceDictionaryFromSamFile returns a filter for
// replacing the reference sequence dictionary in a Header with one
// parsed from the given SAM/DICT file.
func ReplaceReferenceSequenceDictionaryFromSamFile(samFile string) (f sam.Filter, err error) {
input, err := sam.Open(samFile, true)
if err != nil {
return nil, err
}
defer func() {
nerr := input.Close()
if err == nil {
err = nerr
}
}()
header, _, err := sam.ParseHeader(input.Reader)
if err != nil {
return nil, err
}
return ReplaceReferenceSequenceDictionary(header.SQ), nil
}
// RemoveUnmappedReads is a filter for removing unmapped sam-alignment
// instances, based on FLAG.
func RemoveUnmappedReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return (aln.FLAG & sam.Unmapped) == 0 }
}
// RemoveUnmappedReadsStrict is a filter for removing unmapped
// sam-alignment instances, based on FLAG, or POS=0, or RNAME=*.
func RemoveUnmappedReadsStrict(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
return ((aln.FLAG & sam.Unmapped) == 0) && (aln.POS != 0) && (aln.RNAME != "*")
}
}
// RemoveNonExactMappingReads is a filter that removes all reads that
// are not exact matches with the reference (soft-clipping ok), based
// on CIGAR string (only M and S allowed).
func RemoveNonExactMappingReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return !strings.ContainsAny(aln.CIGAR, "IDNHPX=") }
}
// Symbols for optional fields used for determining exact matches. See
// http://samtools.github.io/hts-specs/SAMv1.pdf - Section 1.5.
var (
X0 = utils.Intern("X0")
X1 = utils.Intern("X1")
XM = utils.Intern("XM")
XO = utils.Intern("XO")
XG = utils.Intern("XG")
)
// RemoveNonExactMappingReadsStrict is a filter that removes all reads
// that are not exact matches with the reference, based on the
// optional fields X0=1 (unique mapping), X1=0 (no suboptimal hit),
// XM=0 (no mismatch), XO=0 (no gap opening), XG=0 (no gap extension).
func RemoveNonExactMappingReadsStrict(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
if x0, ok := aln.TAGS.Get(X0); !ok || x0.(int32) != 1 {
return false
}
if x1, ok := aln.TAGS.Get(X1); !ok || x1.(int32) != 0 {
return false
}
if xm, ok := aln.TAGS.Get(XM); !ok || xm.(int32) != 0 {
return false
}
if xo, ok := aln.TAGS.Get(XO); !ok || xo.(int32) != 0 |
if xg, ok := aln.TAGS.Get(XG); !ok || xg.(int32) != 0 {
return false
}
return true
}
}
// RemoveDuplicateReads is a filter for removing duplicate
// sam-alignment instances, based on FLAG.
func RemoveDuplicateReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return (aln.FLAG & sam.Duplicate) == 0 }
}
var sr = utils.Intern("sr")
// RemoveOptionalReads is a filter for removing alignments that
// represent optional information in elPrep.
func RemoveOptionalReads(header *sam.Header) sam.AlignmentFilter {
if _, found := header.UserRecords["@sr"]; found {
delete(header.UserRecords, "@sr")
return func(aln *sam.Alignment) bool { _, found := aln.TAGS.Get(sr); return !found }
}
return nil
}
// AddOrReplaceReadGroup returns a filter for adding or replacing the
// read group both in the Header and in each Alignment.
func AddOrReplaceReadGroup(readGroup utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
header.RG = []utils.StringMap{readGroup}
id := readGroup["ID"]
return func(aln *sam.Alignment) bool { aln.SetRG(id); return true }
}
}
// AddPGLine returns a filter for adding a @PG tag to a Header, and
// ensuring that it is the first one in the chain.
func AddPGLine(newPG utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
id := newPG["ID"]
for utils.Find(header.PG, func(entry utils.StringMap) bool { return entry["ID"] == id }) >= 0 {
id += " "
id += strconv.FormatInt(rand.Int63n(0x10000), 16)
}
newPG["ID"] = id
for _, PG := range header.PG {
nextID := PG["ID"]
if pos := utils.Find(header.PG, func(entry utils.StringMap) bool { return entry["PP"] == nextID }); pos < 0 {
newPG["PP"] = nextID
break
}
}
header.PG = append(header.PG, newPG)
return nil
}
}
// RenameChromosomes is a filter for prepending "chr" to the reference
// sequence names in a Header, and in RNAME and RNEXT in each
// Alignment.
func RenameChromosomes(header *sam.Header) sam.AlignmentFilter {
for _, entry := range header.SQ {
if sn, found := entry["SN"]; found {
entry["SN"] = "chr" + sn
}
}
return func(aln *sam.Alignment) bool {
if (aln.RNAME != "=") && (aln.RNAME != "*") {
aln.RNAME = "chr" + aln.RNAME
}
if (aln.RNEXT != "=") && (aln.RNEXT != "*") {
aln.RNEXT = "chr" + aln.RNEXT
}
return true
}
}
// AddREFID is a filter for adding the refid (index in the reference
// sequence dictionary) to alignments as temporary values.
func AddREFID(header *sam.Header) sam.AlignmentFilter {
dictTable := make(map[string]int32)
for index, entry := range header.SQ {
dictTable[entry["SN"]] = int32(index)
}
return func(aln *sam.Alignment) bool {
value, found := dictTable[aln.RNAME]
if !found {
value = -1
}
aln.SetREFID(value)
return true
}
}
// RemoveOptionalFields returns a filter for removing optional fields
// in an alignment.
func RemoveOptionalFields(tags []string) sam.Filter {
if len(tags) == 0 {
return nil
}
// Intern the tags once.
var optionals []utils.Symbol
for _, tag := range tags {
optionals = append(optionals, utils.Intern(tag))
}
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS, _ = aln.TAGS.DeleteIf(func(key utils.Symbol, val interface{}) bool {
for _, tag := range optionals {
if tag == key {
return true
}
}
return false
})
return true
}
}
}
// KeepOptionalFields returns a filter for removing all but a list of
// given optional fields in an alignment.
func KeepOptionalFields(tags []string) sam.Filter {
if len(tags) == 0 {
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS = nil
return true
}
}
}
// Intern the tags once.
var optionals []utils.Symbol
for _, tag := range tags {
optionals = append(optionals, utils.Intern(tag))
}
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS, _ = aln.TAGS.DeleteIf(func(key utils.Symbol, val interface{}) bool {
for _, tag := range optionals {
if tag == key {
return false
}
}
return true
})
return true
}
}
}
// CleanSam is a filter for soft-clipping an alignment at the end of a
// reference sequence, and set MAPQ to 0 if unmapped.
func CleanSam(header *sam.Header) sam.AlignmentFilter {
referenceSequenceTable := make(map[string]int32)
for _, sn := range header.SQ {
referenceSequenceTable[sn["SN"]], _ = sam.SQLN(sn)
}
return func(aln *sam.Alignment) bool {
if aln.IsUnmapped() {
aln.MAPQ = 0
} else if cigar, err := sam.ScanCigarString(aln.CIGAR); err != nil {
log.Fatal(err, ", while scanning a CIGAR string for ", aln.QNAME, " in CleanSam")
} else if length := referenceSequenceTable[aln.RNAME]; end(aln, cigar) > length {
clipFrom := length - aln.POS + 1
aln.CIGAR = softClipEndOfRead(clipFrom, cigar)
}
return true
}
}
// RemoveNonOverlappingReads returns a filter for removing all reads
// that do not overlap with a set of regions specified by a bed file.
func RemoveNonOverlappingReads(bed *bed.Bed) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
alnStart := aln.POS
cigar, err := sam.ScanCigarString(aln.CIGAR)
if err != nil {
log.Fatal(err, ", while scanning a CIGAR string for", aln.QNAME, " in FilterNonOverlappingReads")
}
var alnEnd int32
if aln.IsUnmapped() || readLengthFromCigar(cigar) <= 0 {
alnEnd = aln.POS
} else {
alnEnd = end(aln, cigar)
}
regions := bed.RegionMap[utils.Intern(aln.RNAME)]
left := 0
right := len(regions) - 1
for left <= right {
mid := (left + right) / 2
regionStart := regions[mid].Start
regionEnd := regions[mid].End
if regionStart > alnEnd-1 {
right = mid - 1
} else if regionEnd <= alnStart-1 {
left = mid + 1
} else {
return true
}
}
return false
}
}
}
// RemoveMappingQualityLessThan is a filter for removing reads
// that do not match or exceed the given mapping quality.
func RemoveMappingQualityLessThan(mq int) sam.Filter {
if mq == 0 {
return nil // no need to add any filter because aln.MAPQ is always >= 0
}
if mq > math.MaxUint8 {
return func(_ *sam.Header) sam.AlignmentFilter {
return func(_ *sam.Alignment) bool {
return false // no aln.MAPQ can be > math.MaxUint8
}
}
}
mapq := byte(mq)
return func(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return aln.MAPQ >= mapq }
}
}
| {
return false
} | conditional_block |
simple-filters.go | package filters
import (
"log"
"math"
"math/rand"
"strconv"
"strings"
"github.com/exascience/elprep/bed"
"github.com/exascience/elprep/sam"
"github.com/exascience/elprep/utils"
)
// ReplaceReferenceSequenceDictionary returns a filter for replacing
// the reference sequence dictionary in a Header.
func ReplaceReferenceSequenceDictionary(dict []utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
if sortingOrder := sam.SortingOrder(header.HD["SO"]); sortingOrder == sam.Coordinate {
previousPos := -1
oldDict := header.SQ
for _, entry := range dict {
sn := entry["SN"]
pos := utils.Find(oldDict, func(entry utils.StringMap) bool { return entry["SN"] == sn })
if pos >= 0 {
if pos > previousPos {
previousPos = pos
} else {
header.SetHDSO(sam.Unknown)
break
}
}
}
}
dictTable := make(map[string]bool)
for _, entry := range dict {
dictTable[entry["SN"]] = true
}
header.SQ = dict
return func(aln *sam.Alignment) bool { return dictTable[aln.RNAME] }
}
}
// ReplaceReferenceSequenceDictionaryFromSamFile returns a filter for
// replacing the reference sequence dictionary in a Header with one
// parsed from the given SAM/DICT file.
func ReplaceReferenceSequenceDictionaryFromSamFile(samFile string) (f sam.Filter, err error) {
input, err := sam.Open(samFile, true)
if err != nil {
return nil, err
}
defer func() {
nerr := input.Close()
if err == nil {
err = nerr
}
}()
header, _, err := sam.ParseHeader(input.Reader)
if err != nil {
return nil, err
}
return ReplaceReferenceSequenceDictionary(header.SQ), nil
}
// RemoveUnmappedReads is a filter for removing unmapped sam-alignment
// instances, based on FLAG.
func RemoveUnmappedReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return (aln.FLAG & sam.Unmapped) == 0 }
}
// RemoveUnmappedReadsStrict is a filter for removing unmapped
// sam-alignment instances, based on FLAG, or POS=0, or RNAME=*.
func | (_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
return ((aln.FLAG & sam.Unmapped) == 0) && (aln.POS != 0) && (aln.RNAME != "*")
}
}
// RemoveNonExactMappingReads is a filter that removes all reads that
// are not exact matches with the reference (soft-clipping ok), based
// on CIGAR string (only M and S allowed).
func RemoveNonExactMappingReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return !strings.ContainsAny(aln.CIGAR, "IDNHPX=") }
}
// Symbols for optional fields used for determining exact matches. See
// http://samtools.github.io/hts-specs/SAMv1.pdf - Section 1.5.
var (
X0 = utils.Intern("X0")
X1 = utils.Intern("X1")
XM = utils.Intern("XM")
XO = utils.Intern("XO")
XG = utils.Intern("XG")
)
// RemoveNonExactMappingReadsStrict is a filter that removes all reads
// that are not exact matches with the reference, based on the
// optional fields X0=1 (unique mapping), X1=0 (no suboptimal hit),
// XM=0 (no mismatch), XO=0 (no gap opening), XG=0 (no gap extension).
func RemoveNonExactMappingReadsStrict(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
if x0, ok := aln.TAGS.Get(X0); !ok || x0.(int32) != 1 {
return false
}
if x1, ok := aln.TAGS.Get(X1); !ok || x1.(int32) != 0 {
return false
}
if xm, ok := aln.TAGS.Get(XM); !ok || xm.(int32) != 0 {
return false
}
if xo, ok := aln.TAGS.Get(XO); !ok || xo.(int32) != 0 {
return false
}
if xg, ok := aln.TAGS.Get(XG); !ok || xg.(int32) != 0 {
return false
}
return true
}
}
// RemoveDuplicateReads is a filter for removing duplicate
// sam-alignment instances, based on FLAG.
func RemoveDuplicateReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return (aln.FLAG & sam.Duplicate) == 0 }
}
var sr = utils.Intern("sr")
// RemoveOptionalReads is a filter for removing alignments that
// represent optional information in elPrep.
func RemoveOptionalReads(header *sam.Header) sam.AlignmentFilter {
if _, found := header.UserRecords["@sr"]; found {
delete(header.UserRecords, "@sr")
return func(aln *sam.Alignment) bool { _, found := aln.TAGS.Get(sr); return !found }
}
return nil
}
// AddOrReplaceReadGroup returns a filter for adding or replacing the
// read group both in the Header and in each Alignment.
func AddOrReplaceReadGroup(readGroup utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
header.RG = []utils.StringMap{readGroup}
id := readGroup["ID"]
return func(aln *sam.Alignment) bool { aln.SetRG(id); return true }
}
}
// AddPGLine returns a filter for adding a @PG tag to a Header, and
// ensuring that it is the first one in the chain.
func AddPGLine(newPG utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
id := newPG["ID"]
for utils.Find(header.PG, func(entry utils.StringMap) bool { return entry["ID"] == id }) >= 0 {
id += " "
id += strconv.FormatInt(rand.Int63n(0x10000), 16)
}
newPG["ID"] = id
for _, PG := range header.PG {
nextID := PG["ID"]
if pos := utils.Find(header.PG, func(entry utils.StringMap) bool { return entry["PP"] == nextID }); pos < 0 {
newPG["PP"] = nextID
break
}
}
header.PG = append(header.PG, newPG)
return nil
}
}
// RenameChromosomes is a filter for prepending "chr" to the reference
// sequence names in a Header, and in RNAME and RNEXT in each
// Alignment.
func RenameChromosomes(header *sam.Header) sam.AlignmentFilter {
for _, entry := range header.SQ {
if sn, found := entry["SN"]; found {
entry["SN"] = "chr" + sn
}
}
return func(aln *sam.Alignment) bool {
if (aln.RNAME != "=") && (aln.RNAME != "*") {
aln.RNAME = "chr" + aln.RNAME
}
if (aln.RNEXT != "=") && (aln.RNEXT != "*") {
aln.RNEXT = "chr" + aln.RNEXT
}
return true
}
}
// AddREFID is a filter for adding the refid (index in the reference
// sequence dictionary) to alignments as temporary values.
func AddREFID(header *sam.Header) sam.AlignmentFilter {
dictTable := make(map[string]int32)
for index, entry := range header.SQ {
dictTable[entry["SN"]] = int32(index)
}
return func(aln *sam.Alignment) bool {
value, found := dictTable[aln.RNAME]
if !found {
value = -1
}
aln.SetREFID(value)
return true
}
}
// RemoveOptionalFields returns a filter for removing optional fields
// in an alignment.
func RemoveOptionalFields(tags []string) sam.Filter {
if len(tags) == 0 {
return nil
}
// Intern the tags once.
var optionals []utils.Symbol
for _, tag := range tags {
optionals = append(optionals, utils.Intern(tag))
}
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS, _ = aln.TAGS.DeleteIf(func(key utils.Symbol, val interface{}) bool {
for _, tag := range optionals {
if tag == key {
return true
}
}
return false
})
return true
}
}
}
// KeepOptionalFields returns a filter for removing all but a list of
// given optional fields in an alignment.
func KeepOptionalFields(tags []string) sam.Filter {
if len(tags) == 0 {
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS = nil
return true
}
}
}
// Intern the tags once.
var optionals []utils.Symbol
for _, tag := range tags {
optionals = append(optionals, utils.Intern(tag))
}
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS, _ = aln.TAGS.DeleteIf(func(key utils.Symbol, val interface{}) bool {
for _, tag := range optionals {
if tag == key {
return false
}
}
return true
})
return true
}
}
}
// CleanSam is a filter for soft-clipping an alignment at the end of a
// reference sequence, and set MAPQ to 0 if unmapped.
func CleanSam(header *sam.Header) sam.AlignmentFilter {
referenceSequenceTable := make(map[string]int32)
for _, sn := range header.SQ {
referenceSequenceTable[sn["SN"]], _ = sam.SQLN(sn)
}
return func(aln *sam.Alignment) bool {
if aln.IsUnmapped() {
aln.MAPQ = 0
} else if cigar, err := sam.ScanCigarString(aln.CIGAR); err != nil {
log.Fatal(err, ", while scanning a CIGAR string for ", aln.QNAME, " in CleanSam")
} else if length := referenceSequenceTable[aln.RNAME]; end(aln, cigar) > length {
clipFrom := length - aln.POS + 1
aln.CIGAR = softClipEndOfRead(clipFrom, cigar)
}
return true
}
}
// RemoveNonOverlappingReads returns a filter for removing all reads
// that do not overlap with a set of regions specified by a bed file.
func RemoveNonOverlappingReads(bed *bed.Bed) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
alnStart := aln.POS
cigar, err := sam.ScanCigarString(aln.CIGAR)
if err != nil {
log.Fatal(err, ", while scanning a CIGAR string for", aln.QNAME, " in FilterNonOverlappingReads")
}
var alnEnd int32
if aln.IsUnmapped() || readLengthFromCigar(cigar) <= 0 {
alnEnd = aln.POS
} else {
alnEnd = end(aln, cigar)
}
regions := bed.RegionMap[utils.Intern(aln.RNAME)]
left := 0
right := len(regions) - 1
for left <= right {
mid := (left + right) / 2
regionStart := regions[mid].Start
regionEnd := regions[mid].End
if regionStart > alnEnd-1 {
right = mid - 1
} else if regionEnd <= alnStart-1 {
left = mid + 1
} else {
return true
}
}
return false
}
}
}
// RemoveMappingQualityLessThan is a filter for removing reads
// that do not match or exceed the given mapping quality.
func RemoveMappingQualityLessThan(mq int) sam.Filter {
if mq == 0 {
return nil // no need to add any filter because aln.MAPQ is always >= 0
}
if mq > math.MaxUint8 {
return func(_ *sam.Header) sam.AlignmentFilter {
return func(_ *sam.Alignment) bool {
return false // no aln.MAPQ can be > math.MaxUint8
}
}
}
mapq := byte(mq)
return func(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return aln.MAPQ >= mapq }
}
}
| RemoveUnmappedReadsStrict | identifier_name |
simple-filters.go | package filters
import (
"log"
"math"
"math/rand"
"strconv"
"strings"
"github.com/exascience/elprep/bed"
"github.com/exascience/elprep/sam"
"github.com/exascience/elprep/utils"
)
// ReplaceReferenceSequenceDictionary returns a filter for replacing
// the reference sequence dictionary in a Header.
func ReplaceReferenceSequenceDictionary(dict []utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
if sortingOrder := sam.SortingOrder(header.HD["SO"]); sortingOrder == sam.Coordinate {
previousPos := -1
oldDict := header.SQ
for _, entry := range dict {
sn := entry["SN"]
pos := utils.Find(oldDict, func(entry utils.StringMap) bool { return entry["SN"] == sn })
if pos >= 0 {
if pos > previousPos {
previousPos = pos
} else {
header.SetHDSO(sam.Unknown)
break
}
}
}
}
dictTable := make(map[string]bool)
for _, entry := range dict {
dictTable[entry["SN"]] = true
}
header.SQ = dict
return func(aln *sam.Alignment) bool { return dictTable[aln.RNAME] }
}
}
// ReplaceReferenceSequenceDictionaryFromSamFile returns a filter for
// replacing the reference sequence dictionary in a Header with one
// parsed from the given SAM/DICT file.
func ReplaceReferenceSequenceDictionaryFromSamFile(samFile string) (f sam.Filter, err error) {
input, err := sam.Open(samFile, true)
if err != nil {
return nil, err
}
defer func() {
nerr := input.Close()
if err == nil {
err = nerr
}
}()
header, _, err := sam.ParseHeader(input.Reader)
if err != nil {
return nil, err
}
return ReplaceReferenceSequenceDictionary(header.SQ), nil
}
// RemoveUnmappedReads is a filter for removing unmapped sam-alignment
// instances, based on FLAG.
func RemoveUnmappedReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return (aln.FLAG & sam.Unmapped) == 0 }
}
// RemoveUnmappedReadsStrict is a filter for removing unmapped
// sam-alignment instances, based on FLAG, or POS=0, or RNAME=*.
func RemoveUnmappedReadsStrict(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
return ((aln.FLAG & sam.Unmapped) == 0) && (aln.POS != 0) && (aln.RNAME != "*")
}
}
// RemoveNonExactMappingReads is a filter that removes all reads that
// are not exact matches with the reference (soft-clipping ok), based
// on CIGAR string (only M and S allowed).
func RemoveNonExactMappingReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return !strings.ContainsAny(aln.CIGAR, "IDNHPX=") }
}
// Symbols for optional fields used for determining exact matches. See
// http://samtools.github.io/hts-specs/SAMv1.pdf - Section 1.5.
var (
X0 = utils.Intern("X0")
X1 = utils.Intern("X1")
XM = utils.Intern("XM")
XO = utils.Intern("XO")
XG = utils.Intern("XG")
)
// RemoveNonExactMappingReadsStrict is a filter that removes all reads
// that are not exact matches with the reference, based on the
// optional fields X0=1 (unique mapping), X1=0 (no suboptimal hit),
// XM=0 (no mismatch), XO=0 (no gap opening), XG=0 (no gap extension).
func RemoveNonExactMappingReadsStrict(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
if x0, ok := aln.TAGS.Get(X0); !ok || x0.(int32) != 1 {
return false
}
if x1, ok := aln.TAGS.Get(X1); !ok || x1.(int32) != 0 {
return false
}
if xm, ok := aln.TAGS.Get(XM); !ok || xm.(int32) != 0 {
return false
}
if xo, ok := aln.TAGS.Get(XO); !ok || xo.(int32) != 0 {
return false
}
if xg, ok := aln.TAGS.Get(XG); !ok || xg.(int32) != 0 {
return false
}
return true
}
}
// RemoveDuplicateReads is a filter for removing duplicate
// sam-alignment instances, based on FLAG.
func RemoveDuplicateReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return (aln.FLAG & sam.Duplicate) == 0 }
}
var sr = utils.Intern("sr")
// RemoveOptionalReads is a filter for removing alignments that
// represent optional information in elPrep.
func RemoveOptionalReads(header *sam.Header) sam.AlignmentFilter {
if _, found := header.UserRecords["@sr"]; found {
delete(header.UserRecords, "@sr")
return func(aln *sam.Alignment) bool { _, found := aln.TAGS.Get(sr); return !found }
}
return nil
}
// AddOrReplaceReadGroup returns a filter for adding or replacing the
// read group both in the Header and in each Alignment.
func AddOrReplaceReadGroup(readGroup utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
header.RG = []utils.StringMap{readGroup}
id := readGroup["ID"]
return func(aln *sam.Alignment) bool { aln.SetRG(id); return true }
}
}
// AddPGLine returns a filter for adding a @PG tag to a Header, and
// ensuring that it is the first one in the chain.
func AddPGLine(newPG utils.StringMap) sam.Filter |
// RenameChromosomes is a filter for prepending "chr" to the reference
// sequence names in a Header, and in RNAME and RNEXT in each
// Alignment.
func RenameChromosomes(header *sam.Header) sam.AlignmentFilter {
for _, entry := range header.SQ {
if sn, found := entry["SN"]; found {
entry["SN"] = "chr" + sn
}
}
return func(aln *sam.Alignment) bool {
if (aln.RNAME != "=") && (aln.RNAME != "*") {
aln.RNAME = "chr" + aln.RNAME
}
if (aln.RNEXT != "=") && (aln.RNEXT != "*") {
aln.RNEXT = "chr" + aln.RNEXT
}
return true
}
}
// AddREFID is a filter for adding the refid (index in the reference
// sequence dictionary) to alignments as temporary values.
func AddREFID(header *sam.Header) sam.AlignmentFilter {
dictTable := make(map[string]int32)
for index, entry := range header.SQ {
dictTable[entry["SN"]] = int32(index)
}
return func(aln *sam.Alignment) bool {
value, found := dictTable[aln.RNAME]
if !found {
value = -1
}
aln.SetREFID(value)
return true
}
}
// RemoveOptionalFields returns a filter for removing optional fields
// in an alignment.
func RemoveOptionalFields(tags []string) sam.Filter {
if len(tags) == 0 {
return nil
}
// Intern the tags once.
var optionals []utils.Symbol
for _, tag := range tags {
optionals = append(optionals, utils.Intern(tag))
}
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS, _ = aln.TAGS.DeleteIf(func(key utils.Symbol, val interface{}) bool {
for _, tag := range optionals {
if tag == key {
return true
}
}
return false
})
return true
}
}
}
// KeepOptionalFields returns a filter for removing all but a list of
// given optional fields in an alignment.
func KeepOptionalFields(tags []string) sam.Filter {
if len(tags) == 0 {
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS = nil
return true
}
}
}
// Intern the tags once.
var optionals []utils.Symbol
for _, tag := range tags {
optionals = append(optionals, utils.Intern(tag))
}
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS, _ = aln.TAGS.DeleteIf(func(key utils.Symbol, val interface{}) bool {
for _, tag := range optionals {
if tag == key {
return false
}
}
return true
})
return true
}
}
}
// CleanSam is a filter for soft-clipping an alignment at the end of a
// reference sequence, and set MAPQ to 0 if unmapped.
func CleanSam(header *sam.Header) sam.AlignmentFilter {
referenceSequenceTable := make(map[string]int32)
for _, sn := range header.SQ {
referenceSequenceTable[sn["SN"]], _ = sam.SQLN(sn)
}
return func(aln *sam.Alignment) bool {
if aln.IsUnmapped() {
aln.MAPQ = 0
} else if cigar, err := sam.ScanCigarString(aln.CIGAR); err != nil {
log.Fatal(err, ", while scanning a CIGAR string for ", aln.QNAME, " in CleanSam")
} else if length := referenceSequenceTable[aln.RNAME]; end(aln, cigar) > length {
clipFrom := length - aln.POS + 1
aln.CIGAR = softClipEndOfRead(clipFrom, cigar)
}
return true
}
}
// RemoveNonOverlappingReads returns a filter for removing all reads
// that do not overlap with a set of regions specified by a bed file.
func RemoveNonOverlappingReads(bed *bed.Bed) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
alnStart := aln.POS
cigar, err := sam.ScanCigarString(aln.CIGAR)
if err != nil {
log.Fatal(err, ", while scanning a CIGAR string for", aln.QNAME, " in FilterNonOverlappingReads")
}
var alnEnd int32
if aln.IsUnmapped() || readLengthFromCigar(cigar) <= 0 {
alnEnd = aln.POS
} else {
alnEnd = end(aln, cigar)
}
regions := bed.RegionMap[utils.Intern(aln.RNAME)]
left := 0
right := len(regions) - 1
for left <= right {
mid := (left + right) / 2
regionStart := regions[mid].Start
regionEnd := regions[mid].End
if regionStart > alnEnd-1 {
right = mid - 1
} else if regionEnd <= alnStart-1 {
left = mid + 1
} else {
return true
}
}
return false
}
}
}
// RemoveMappingQualityLessThan is a filter for removing reads
// that do not match or exceed the given mapping quality.
func RemoveMappingQualityLessThan(mq int) sam.Filter {
if mq == 0 {
return nil // no need to add any filter because aln.MAPQ is always >= 0
}
if mq > math.MaxUint8 {
return func(_ *sam.Header) sam.AlignmentFilter {
return func(_ *sam.Alignment) bool {
return false // no aln.MAPQ can be > math.MaxUint8
}
}
}
mapq := byte(mq)
return func(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return aln.MAPQ >= mapq }
}
}
| {
return func(header *sam.Header) sam.AlignmentFilter {
id := newPG["ID"]
for utils.Find(header.PG, func(entry utils.StringMap) bool { return entry["ID"] == id }) >= 0 {
id += " "
id += strconv.FormatInt(rand.Int63n(0x10000), 16)
}
newPG["ID"] = id
for _, PG := range header.PG {
nextID := PG["ID"]
if pos := utils.Find(header.PG, func(entry utils.StringMap) bool { return entry["PP"] == nextID }); pos < 0 {
newPG["PP"] = nextID
break
}
}
header.PG = append(header.PG, newPG)
return nil
}
} | identifier_body |
simple-filters.go | package filters
import (
"log"
"math"
"math/rand"
"strconv"
"strings"
"github.com/exascience/elprep/bed"
"github.com/exascience/elprep/sam"
"github.com/exascience/elprep/utils"
)
// ReplaceReferenceSequenceDictionary returns a filter for replacing
// the reference sequence dictionary in a Header.
func ReplaceReferenceSequenceDictionary(dict []utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
if sortingOrder := sam.SortingOrder(header.HD["SO"]); sortingOrder == sam.Coordinate {
previousPos := -1
oldDict := header.SQ
for _, entry := range dict {
sn := entry["SN"]
pos := utils.Find(oldDict, func(entry utils.StringMap) bool { return entry["SN"] == sn })
if pos >= 0 {
if pos > previousPos {
previousPos = pos
} else {
header.SetHDSO(sam.Unknown)
break
}
}
}
}
dictTable := make(map[string]bool)
for _, entry := range dict {
dictTable[entry["SN"]] = true
}
header.SQ = dict
return func(aln *sam.Alignment) bool { return dictTable[aln.RNAME] }
}
}
// ReplaceReferenceSequenceDictionaryFromSamFile returns a filter for
// replacing the reference sequence dictionary in a Header with one
// parsed from the given SAM/DICT file.
func ReplaceReferenceSequenceDictionaryFromSamFile(samFile string) (f sam.Filter, err error) {
input, err := sam.Open(samFile, true)
if err != nil { | nerr := input.Close()
if err == nil {
err = nerr
}
}()
header, _, err := sam.ParseHeader(input.Reader)
if err != nil {
return nil, err
}
return ReplaceReferenceSequenceDictionary(header.SQ), nil
}
// RemoveUnmappedReads is a filter for removing unmapped sam-alignment
// instances, based on FLAG.
func RemoveUnmappedReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return (aln.FLAG & sam.Unmapped) == 0 }
}
// RemoveUnmappedReadsStrict is a filter for removing unmapped
// sam-alignment instances, based on FLAG, or POS=0, or RNAME=*.
func RemoveUnmappedReadsStrict(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
return ((aln.FLAG & sam.Unmapped) == 0) && (aln.POS != 0) && (aln.RNAME != "*")
}
}
// RemoveNonExactMappingReads is a filter that removes all reads that
// are not exact matches with the reference (soft-clipping ok), based
// on CIGAR string (only M and S allowed).
func RemoveNonExactMappingReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return !strings.ContainsAny(aln.CIGAR, "IDNHPX=") }
}
// Symbols for optional fields used for determining exact matches. See
// http://samtools.github.io/hts-specs/SAMv1.pdf - Section 1.5.
var (
X0 = utils.Intern("X0")
X1 = utils.Intern("X1")
XM = utils.Intern("XM")
XO = utils.Intern("XO")
XG = utils.Intern("XG")
)
// RemoveNonExactMappingReadsStrict is a filter that removes all reads
// that are not exact matches with the reference, based on the
// optional fields X0=1 (unique mapping), X1=0 (no suboptimal hit),
// XM=0 (no mismatch), XO=0 (no gap opening), XG=0 (no gap extension).
func RemoveNonExactMappingReadsStrict(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
if x0, ok := aln.TAGS.Get(X0); !ok || x0.(int32) != 1 {
return false
}
if x1, ok := aln.TAGS.Get(X1); !ok || x1.(int32) != 0 {
return false
}
if xm, ok := aln.TAGS.Get(XM); !ok || xm.(int32) != 0 {
return false
}
if xo, ok := aln.TAGS.Get(XO); !ok || xo.(int32) != 0 {
return false
}
if xg, ok := aln.TAGS.Get(XG); !ok || xg.(int32) != 0 {
return false
}
return true
}
}
// RemoveDuplicateReads is a filter for removing duplicate
// sam-alignment instances, based on FLAG.
func RemoveDuplicateReads(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return (aln.FLAG & sam.Duplicate) == 0 }
}
var sr = utils.Intern("sr")
// RemoveOptionalReads is a filter for removing alignments that
// represent optional information in elPrep.
func RemoveOptionalReads(header *sam.Header) sam.AlignmentFilter {
if _, found := header.UserRecords["@sr"]; found {
delete(header.UserRecords, "@sr")
return func(aln *sam.Alignment) bool { _, found := aln.TAGS.Get(sr); return !found }
}
return nil
}
// AddOrReplaceReadGroup returns a filter for adding or replacing the
// read group both in the Header and in each Alignment.
func AddOrReplaceReadGroup(readGroup utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
header.RG = []utils.StringMap{readGroup}
id := readGroup["ID"]
return func(aln *sam.Alignment) bool { aln.SetRG(id); return true }
}
}
// AddPGLine returns a filter for adding a @PG tag to a Header, and
// ensuring that it is the first one in the chain.
func AddPGLine(newPG utils.StringMap) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
id := newPG["ID"]
for utils.Find(header.PG, func(entry utils.StringMap) bool { return entry["ID"] == id }) >= 0 {
id += " "
id += strconv.FormatInt(rand.Int63n(0x10000), 16)
}
newPG["ID"] = id
for _, PG := range header.PG {
nextID := PG["ID"]
if pos := utils.Find(header.PG, func(entry utils.StringMap) bool { return entry["PP"] == nextID }); pos < 0 {
newPG["PP"] = nextID
break
}
}
header.PG = append(header.PG, newPG)
return nil
}
}
// RenameChromosomes is a filter for prepending "chr" to the reference
// sequence names in a Header, and in RNAME and RNEXT in each
// Alignment.
func RenameChromosomes(header *sam.Header) sam.AlignmentFilter {
for _, entry := range header.SQ {
if sn, found := entry["SN"]; found {
entry["SN"] = "chr" + sn
}
}
return func(aln *sam.Alignment) bool {
if (aln.RNAME != "=") && (aln.RNAME != "*") {
aln.RNAME = "chr" + aln.RNAME
}
if (aln.RNEXT != "=") && (aln.RNEXT != "*") {
aln.RNEXT = "chr" + aln.RNEXT
}
return true
}
}
// AddREFID is a filter for adding the refid (index in the reference
// sequence dictionary) to alignments as temporary values.
func AddREFID(header *sam.Header) sam.AlignmentFilter {
dictTable := make(map[string]int32)
for index, entry := range header.SQ {
dictTable[entry["SN"]] = int32(index)
}
return func(aln *sam.Alignment) bool {
value, found := dictTable[aln.RNAME]
if !found {
value = -1
}
aln.SetREFID(value)
return true
}
}
// RemoveOptionalFields returns a filter for removing optional fields
// in an alignment.
func RemoveOptionalFields(tags []string) sam.Filter {
if len(tags) == 0 {
return nil
}
// Intern the tags once.
var optionals []utils.Symbol
for _, tag := range tags {
optionals = append(optionals, utils.Intern(tag))
}
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS, _ = aln.TAGS.DeleteIf(func(key utils.Symbol, val interface{}) bool {
for _, tag := range optionals {
if tag == key {
return true
}
}
return false
})
return true
}
}
}
// KeepOptionalFields returns a filter for removing all but a list of
// given optional fields in an alignment.
func KeepOptionalFields(tags []string) sam.Filter {
if len(tags) == 0 {
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS = nil
return true
}
}
}
// Intern the tags once.
var optionals []utils.Symbol
for _, tag := range tags {
optionals = append(optionals, utils.Intern(tag))
}
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
aln.TAGS, _ = aln.TAGS.DeleteIf(func(key utils.Symbol, val interface{}) bool {
for _, tag := range optionals {
if tag == key {
return false
}
}
return true
})
return true
}
}
}
// CleanSam is a filter for soft-clipping an alignment at the end of a
// reference sequence, and set MAPQ to 0 if unmapped.
func CleanSam(header *sam.Header) sam.AlignmentFilter {
referenceSequenceTable := make(map[string]int32)
for _, sn := range header.SQ {
referenceSequenceTable[sn["SN"]], _ = sam.SQLN(sn)
}
return func(aln *sam.Alignment) bool {
if aln.IsUnmapped() {
aln.MAPQ = 0
} else if cigar, err := sam.ScanCigarString(aln.CIGAR); err != nil {
log.Fatal(err, ", while scanning a CIGAR string for ", aln.QNAME, " in CleanSam")
} else if length := referenceSequenceTable[aln.RNAME]; end(aln, cigar) > length {
clipFrom := length - aln.POS + 1
aln.CIGAR = softClipEndOfRead(clipFrom, cigar)
}
return true
}
}
// RemoveNonOverlappingReads returns a filter for removing all reads
// that do not overlap with a set of regions specified by a bed file.
func RemoveNonOverlappingReads(bed *bed.Bed) sam.Filter {
return func(header *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool {
alnStart := aln.POS
cigar, err := sam.ScanCigarString(aln.CIGAR)
if err != nil {
log.Fatal(err, ", while scanning a CIGAR string for", aln.QNAME, " in FilterNonOverlappingReads")
}
var alnEnd int32
if aln.IsUnmapped() || readLengthFromCigar(cigar) <= 0 {
alnEnd = aln.POS
} else {
alnEnd = end(aln, cigar)
}
regions := bed.RegionMap[utils.Intern(aln.RNAME)]
left := 0
right := len(regions) - 1
for left <= right {
mid := (left + right) / 2
regionStart := regions[mid].Start
regionEnd := regions[mid].End
if regionStart > alnEnd-1 {
right = mid - 1
} else if regionEnd <= alnStart-1 {
left = mid + 1
} else {
return true
}
}
return false
}
}
}
// RemoveMappingQualityLessThan is a filter for removing reads
// that do not match or exceed the given mapping quality.
func RemoveMappingQualityLessThan(mq int) sam.Filter {
if mq == 0 {
return nil // no need to add any filter because aln.MAPQ is always >= 0
}
if mq > math.MaxUint8 {
return func(_ *sam.Header) sam.AlignmentFilter {
return func(_ *sam.Alignment) bool {
return false // no aln.MAPQ can be > math.MaxUint8
}
}
}
mapq := byte(mq)
return func(_ *sam.Header) sam.AlignmentFilter {
return func(aln *sam.Alignment) bool { return aln.MAPQ >= mapq }
}
} | return nil, err
}
defer func() { | random_line_split |
scout.py | """
Scout
-----
.Genera tonos de llamado random
.Monitorea los sensores IR
.Si detecta una presencia:
.Inicia modo de percepción/clasificación
.Si se trata de especimenes de la especie de interés
.Estampa de tiempo con especie, sonido, video
.Inicia registro de video
emmanuel@interspecifics.cc
2020.01.29 // v.ii.x_:x
>> OTRAS NOTAS
https://www.raspberrypi.org/forums/viewtopic.php?t=240200
https://learn.adafruit.com/adafruit-amg8833-8x8-thermal-camera-sensor/raspberry-pi-thermal-camera
check devices
$ v4l2-ctl --list-devices
try recording
$ ffmpeg -i /dev/video7 -vcodec copy capture/cinco.mkv # 6.5Mbps sin reencodear
rangos de frecuencias
A- Hz(179-243)
B- Hz(158-174)
C- Hz(142-148)
D- Hz(128-139)
E- Hz(117-124)
F- Hz(106-115)
G- Hz(90-104)
"""
import busio, board, adafruit_amg88xx
import time, argparse, collections, random
import operator, re, os, subprocess
import cv2
import cvtf
import numpy as np
import tflite_runtime.interpreter as tflite
from PIL import Image
from oscpy.client import OSCClient
# minimal temperature difference
MIN_TEMP_DIFF = 2
MIN_MASA_DETEC = 3
# -create objects to communicate the sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
sensor_a = adafruit_amg88xx.AMG88XX(i2c_bus, 0x68)
sensor_b = adafruit_amg88xx.AMG88XX(i2c_bus, 0x69)
Category = collections.namedtuple('Category', ['id', 'score'])
# img utils
def create_blank(w, h, rgb_color=(0, 0, 0)):
""" create new image(numpy array) filled with certain color in rgb """
image = np.zeros((h, w), np.uint8)
color = tuple(reversed(rgb_color))
image[:] = 0
return image
# sensor functions
def read_sensor_pixels(sensor, verbose=False):
""" Lee los pixeles de temperatura de un sensor
Devuelve la temperatura media y una lista de temperaturas
La opción verbose muestra los valores
"""
mean_temp = 0
array_temps = []
for row in sensor.pixels:
array_temps.extend(row)
mean_temp = sum(array_temps)
mean_temp = mean_temp / len(array_temps)
if verbose:
print("\n")
print ('[Tm]: {0:.2f}'.format(mean_temp))
for row in sensor.pixels:
ls = ['{0:.1f}'.format(temp) for temp in row]
print(' '.join(ls))
print("\n")
return mean_temp, array_temps
def dual_detect(verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print("\n")
print ('[t1]:{0:.1f}\t[t2]:{1:.1f}'.format(m_tb, m_ta))
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
print(lb,'\t',la)
print ('[o1]:{0:d}\t\t[o2]:{1:d}'.format(nb, na))
print("\n")
return na, nb
def dual_detect(arg_name, verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor (mas los data_sens para log)
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print("\n")
print ('[t{2}]:{0:.1f}\t[t{3}]:{1:.1f}'.format(m_tb, m_ta, arg_name[1], arg_name[0]))
sens_a = ""
sens_b = ""
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
sens_a+=la+'\n'
sens_b+=lb+'\n'
if verbose:
print(lb,'\t',la)
if verbose:
print ('[o{2}]:{0:d}\t\t[o{3}]:{1:d}'.format(nb, na, arg_name[1], arg_name[0]))
print("\n")
return na, nb, [sens_a, sens_b, m_ta, m_tb]
# detection functions
def load_labels(path):
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
def get_output(interpreter, top_k, score_threshold):
"""Returns no more than top_k categories with score >= score_threshold."""
scores = cvtf.output_tensor(interpreter, 0)
categories = [
Category(i, scores[i])
for i in np.argpartition(scores, -top_k)[-top_k:]
if scores[i] >= score_threshold
]
return sorted(categories, key=operator.itemgetter(1), reverse=True)
def append_results_to_img(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
def parse_results(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
# define callbacks
def human_callback(witch, arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
def label_callback(label, witch, arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
#label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
# soundsys
def update_soundsystem(arg_recfile, arg_name, osc_c):
"""
envía mensajes a sc que disparan notas aleatorias en los rangos establecidos
registra las notas en el archivo de log
"""
# generate note and send osc message
note_val = random.randint(0,6)
synthnames = ['A','B', 'C', 'D', 'E', 'F', 'G']
ruta = '/scout/note/'+arg_name+'/' + synthnames[note_val]
ruta = ruta.encode()
osc_c.send_message(ruta, [1])
# log to record file
timetag = time.strftime("%Y%m%d_%H%M%S")
record_file = open(arg_recfile, 'a+')
record_file.write("\n[scout.note]: <{0}> {1}\n".format(timetag, ruta.decode()))
record_file.close()
return
# -main
def main():
# -parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='Path of capture folder', default="/media/pi/DATA/capture/video/")
parser.add_argument('--recfile', help='Path of capture folder', default="/media/pi/DATA/capture/record/")
parser.add_argument('--name', help='Name of the directions to scout [NE || SW]', default="NE")
parser.add_argument('--verbose', help='Show additional info for debugging', default=False)
parser.add_argument('--show', help='Show video', default=False)
parser.add_argument('--ip', help='OSC ip', default="192.168.1.207")
parser.add_argument('--port', help='OSC port', default="57120")
args = parser.parse_args()
# -init osc client
osc_addr = args.ip
osc_port = int(args.port)
osc_client = OSCClient(osc_addr, osc_port)
# -load model and labels for detection
default_model_dir = '/home/pi/Dev/animals/train'
default_model = 'animals_duo_model.tflite'
default_labels = 'animals_duo_model.txt'
args_model = os.path.join(default_model_dir, default_model)
args_labels = os.path.join(default_model_dir, default_labels)
args_top_k = 1
args_camera_idx = 0
args_threshold = 0.1
os.makedirs(args.path, exist_ok=True)
os.makedirs(args.recfile, exist_ok=True)
# -create the detection interpreter
print('Cargando {} con {} categorias de objetos.'.format(args_model, args_labels))
interpreter = cvtf.make_interpreter(args_model)
interpreter.allocate_tensors()
labels = load_labels(args_labels)
# -record file
timetag = time.strftime("%Y%m%d_%H%M%S")
arg_recfile = args.recfile + timetag + ".log"
record_file = open(arg_recfile, 'w+')
record_file.write("[scout.record.start]:\t----\t----\t-- <{0}>: \n".format(timetag));
record_file.close()
# -create a capture object and connect to cam
cam = None
witch = 0
empty = create_blank(640, 480, rgb_color=(0,0,0))
buffstream = ''
# -the loop (hole)
t0 = time.time() | # -check sensors,
if (time.time()-t0 > 1):
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
t0 = time.time()
# -then setup capture device
if (witch == 0):
if (nc_a > MIN_MASA_DETEC):
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
cam = cv2.VideoCapture(0)
witch = 2
else:
#continue
time.sleep(1)
pass
elif(witch == 1):
if (nc_a > MIN_MASA_DETEC):
#continue
pass
elif(nc_b > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(0)
witch = 2
else:
cam.release()
witch = 0
elif(witch == 2):
if (nc_a > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
#continue
pass
else:
cam.release()
witch = 0
# luego, cuando haya un dispositivo activo
if (witch > 0):
if (cam.isOpened()):
# read and convert
ret, frame = cam.read()
if not ret:
print("-.-* No Video Source")
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im_rgb)
# make the classification
cvtf.set_input(interpreter, pil_im)
interpreter.invoke()
results = get_output(interpreter, args_top_k, args_threshold)
# parse and print results, compare, count!
# cv2_im = append_results_to_img(cv2_im, results, labels)
label = labels[results[0][0]]
percent = int(100 * results[0].score)
tag = '{}% {}'.format(percent, label)
ch = ''
if (label=='Jaguar'): ch='J'
elif(label=='MexicanGrayWolf'): ch='w'
elif(label=='Human'): ch='H'
else: ch = ' '
# update the buffstream
buffstream += ch
if (len(buffstream) > 20):
buffstream = buffstream[1:]
if (args.verbose == True):
print(buffstream+'/n')
# count and trigger events, reset buff
c_J = buffstream.count('J')
c_W = buffstream.count('w')
c_H = len(list(filter(lambda x: x == 'H', buffstream)))
if (c_J>15):
lab = "JAGUAR"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_W>15):
lab = "MexGrayWOLF"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_H>15):
print("\n\n[->] .. t[._.]H\n")
human_callback(witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
# draw image
if (args.show==True): cv2.imshow('frame', cv2_im)
else:
if (args.show==True): cv2.imshow('frame', empty)
# pass
# actualiza la maquina de sonido
if (time.time() - t2 > 30):
update_soundsystem(arg_recfile, args.name, osc_client)
t2 = time.time()
# - detect break key
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
# ----
if __name__ == '__main__':
main() | t2 = time.time()
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
while True: | random_line_split |
scout.py | """
Scout
-----
.Genera tonos de llamado random
.Monitorea los sensores IR
.Si detecta una presencia:
.Inicia modo de percepción/clasificación
.Si se trata de especimenes de la especie de interés
.Estampa de tiempo con especie, sonido, video
.Inicia registro de video
emmanuel@interspecifics.cc
2020.01.29 // v.ii.x_:x
>> OTRAS NOTAS
https://www.raspberrypi.org/forums/viewtopic.php?t=240200
https://learn.adafruit.com/adafruit-amg8833-8x8-thermal-camera-sensor/raspberry-pi-thermal-camera
check devices
$ v4l2-ctl --list-devices
try recording
$ ffmpeg -i /dev/video7 -vcodec copy capture/cinco.mkv # 6.5Mbps sin reencodear
rangos de frecuencias
A- Hz(179-243)
B- Hz(158-174)
C- Hz(142-148)
D- Hz(128-139)
E- Hz(117-124)
F- Hz(106-115)
G- Hz(90-104)
"""
import busio, board, adafruit_amg88xx
import time, argparse, collections, random
import operator, re, os, subprocess
import cv2
import cvtf
import numpy as np
import tflite_runtime.interpreter as tflite
from PIL import Image
from oscpy.client import OSCClient
# minimal temperature difference
MIN_TEMP_DIFF = 2
MIN_MASA_DETEC = 3
# -create objects to communicate the sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
sensor_a = adafruit_amg88xx.AMG88XX(i2c_bus, 0x68)
sensor_b = adafruit_amg88xx.AMG88XX(i2c_bus, 0x69)
Category = collections.namedtuple('Category', ['id', 'score'])
# img utils
def create_blank(w, h, rgb_color=(0, 0, 0)):
""" create new image(numpy array) filled with certain color in rgb """
image = np.zeros((h, w), np.uint8)
color = tuple(reversed(rgb_color))
image[:] = 0
return image
# sensor functions
def read_sensor_pixels(sensor, verbose=False):
""" Lee los pixeles de temperatura de un sensor
Devuelve la temperatura media y una lista de temperaturas
La opción verbose muestra los valores
"""
mean_temp = 0
array_temps = []
for row in sensor.pixels:
array_temps.extend(row)
mean_temp = sum(array_temps)
mean_temp = mean_temp / len(array_temps)
if verbose:
print("\n")
print ('[Tm]: {0:.2f}'.format(mean_temp))
for row in sensor.pixels:
ls = ['{0:.1f}'.format(temp) for temp in row]
print(' '.join(ls))
print("\n")
return mean_temp, array_temps
def dual_detect(verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print("\n")
print ('[t1]:{0:.1f}\t[t2]:{1:.1f}'.format(m_tb, m_ta))
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
print(lb,'\t',la)
print ('[o1]:{0:d}\t\t[o2]:{1:d}'.format(nb, na))
print("\n")
return na, nb
def dual_detect(arg_name, verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor (mas los data_sens para log)
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print("\n")
print ('[t{2}]:{0:.1f}\t[t{3}]:{1:.1f}'.format(m_tb, m_ta, arg_name[1], arg_name[0]))
sens_a = ""
sens_b = ""
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
sens_a+=la+'\n'
sens_b+=lb+'\n'
if verbose:
print(lb,'\t',la)
if verbose:
print ('[o{2}]:{0:d}\t\t[o{3}]:{1:d}'.format(nb, na, arg_name[1], arg_name[0]))
print("\n")
return na, nb, [sens_a, sens_b, m_ta, m_tb]
# detection functions
def load_labels(path):
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
def get_output(interpreter, top_k, score_threshold):
"""Returns no more than top_k categories with score >= score_threshold."""
scores = cvtf.output_tensor(interpreter, 0)
categories = [
Category(i, scores[i])
for i in np.argpartition(scores, -top_k)[-top_k:]
if scores[i] >= score_threshold
]
return sorted(categories, key=operator.itemgetter(1), reverse=True)
def append_results_to_img(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
def parse_results(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
# define callbacks
def human_ca | arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
def label_callback(label, witch, arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
#label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
# soundsys
def update_soundsystem(arg_recfile, arg_name, osc_c):
"""
envía mensajes a sc que disparan notas aleatorias en los rangos establecidos
registra las notas en el archivo de log
"""
# generate note and send osc message
note_val = random.randint(0,6)
synthnames = ['A','B', 'C', 'D', 'E', 'F', 'G']
ruta = '/scout/note/'+arg_name+'/' + synthnames[note_val]
ruta = ruta.encode()
osc_c.send_message(ruta, [1])
# log to record file
timetag = time.strftime("%Y%m%d_%H%M%S")
record_file = open(arg_recfile, 'a+')
record_file.write("\n[scout.note]: <{0}> {1}\n".format(timetag, ruta.decode()))
record_file.close()
return
# -main
def main():
# -parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='Path of capture folder', default="/media/pi/DATA/capture/video/")
parser.add_argument('--recfile', help='Path of capture folder', default="/media/pi/DATA/capture/record/")
parser.add_argument('--name', help='Name of the directions to scout [NE || SW]', default="NE")
parser.add_argument('--verbose', help='Show additional info for debugging', default=False)
parser.add_argument('--show', help='Show video', default=False)
parser.add_argument('--ip', help='OSC ip', default="192.168.1.207")
parser.add_argument('--port', help='OSC port', default="57120")
args = parser.parse_args()
# -init osc client
osc_addr = args.ip
osc_port = int(args.port)
osc_client = OSCClient(osc_addr, osc_port)
# -load model and labels for detection
default_model_dir = '/home/pi/Dev/animals/train'
default_model = 'animals_duo_model.tflite'
default_labels = 'animals_duo_model.txt'
args_model = os.path.join(default_model_dir, default_model)
args_labels = os.path.join(default_model_dir, default_labels)
args_top_k = 1
args_camera_idx = 0
args_threshold = 0.1
os.makedirs(args.path, exist_ok=True)
os.makedirs(args.recfile, exist_ok=True)
# -create the detection interpreter
print('Cargando {} con {} categorias de objetos.'.format(args_model, args_labels))
interpreter = cvtf.make_interpreter(args_model)
interpreter.allocate_tensors()
labels = load_labels(args_labels)
# -record file
timetag = time.strftime("%Y%m%d_%H%M%S")
arg_recfile = args.recfile + timetag + ".log"
record_file = open(arg_recfile, 'w+')
record_file.write("[scout.record.start]:\t----\t----\t-- <{0}>: \n".format(timetag));
record_file.close()
# -create a capture object and connect to cam
cam = None
witch = 0
empty = create_blank(640, 480, rgb_color=(0,0,0))
buffstream = ''
# -the loop (hole)
t0 = time.time()
t2 = time.time()
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
while True:
# -check sensors,
if (time.time()-t0 > 1):
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
t0 = time.time()
# -then setup capture device
if (witch == 0):
if (nc_a > MIN_MASA_DETEC):
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
cam = cv2.VideoCapture(0)
witch = 2
else:
#continue
time.sleep(1)
pass
elif(witch == 1):
if (nc_a > MIN_MASA_DETEC):
#continue
pass
elif(nc_b > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(0)
witch = 2
else:
cam.release()
witch = 0
elif(witch == 2):
if (nc_a > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
#continue
pass
else:
cam.release()
witch = 0
# luego, cuando haya un dispositivo activo
if (witch > 0):
if (cam.isOpened()):
# read and convert
ret, frame = cam.read()
if not ret:
print("-.-* No Video Source")
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im_rgb)
# make the classification
cvtf.set_input(interpreter, pil_im)
interpreter.invoke()
results = get_output(interpreter, args_top_k, args_threshold)
# parse and print results, compare, count!
# cv2_im = append_results_to_img(cv2_im, results, labels)
label = labels[results[0][0]]
percent = int(100 * results[0].score)
tag = '{}% {}'.format(percent, label)
ch = ''
if (label=='Jaguar'): ch='J'
elif(label=='MexicanGrayWolf'): ch='w'
elif(label=='Human'): ch='H'
else: ch = ' '
# update the buffstream
buffstream += ch
if (len(buffstream) > 20):
buffstream = buffstream[1:]
if (args.verbose == True):
print(buffstream+'/n')
# count and trigger events, reset buff
c_J = buffstream.count('J')
c_W = buffstream.count('w')
c_H = len(list(filter(lambda x: x == 'H', buffstream)))
if (c_J>15):
lab = "JAGUAR"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_W>15):
lab = "MexGrayWOLF"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_H>15):
print("\n\n[->] .. t[._.]H\n")
human_callback(witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
# draw image
if (args.show==True): cv2.imshow('frame', cv2_im)
else:
if (args.show==True): cv2.imshow('frame', empty)
# pass
# actualiza la maquina de sonido
if (time.time() - t2 > 30):
update_soundsystem(arg_recfile, args.name, osc_client)
t2 = time.time()
# - detect break key
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
# ----
if __name__ == '__main__':
main() | llback(witch, | identifier_name |
scout.py | """
Scout
-----
.Genera tonos de llamado random
.Monitorea los sensores IR
.Si detecta una presencia:
.Inicia modo de percepción/clasificación
.Si se trata de especimenes de la especie de interés
.Estampa de tiempo con especie, sonido, video
.Inicia registro de video
emmanuel@interspecifics.cc
2020.01.29 // v.ii.x_:x
>> OTRAS NOTAS
https://www.raspberrypi.org/forums/viewtopic.php?t=240200
https://learn.adafruit.com/adafruit-amg8833-8x8-thermal-camera-sensor/raspberry-pi-thermal-camera
check devices
$ v4l2-ctl --list-devices
try recording
$ ffmpeg -i /dev/video7 -vcodec copy capture/cinco.mkv # 6.5Mbps sin reencodear
rangos de frecuencias
A- Hz(179-243)
B- Hz(158-174)
C- Hz(142-148)
D- Hz(128-139)
E- Hz(117-124)
F- Hz(106-115)
G- Hz(90-104)
"""
import busio, board, adafruit_amg88xx
import time, argparse, collections, random
import operator, re, os, subprocess
import cv2
import cvtf
import numpy as np
import tflite_runtime.interpreter as tflite
from PIL import Image
from oscpy.client import OSCClient
# minimal temperature difference
MIN_TEMP_DIFF = 2
MIN_MASA_DETEC = 3
# -create objects to communicate the sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
sensor_a = adafruit_amg88xx.AMG88XX(i2c_bus, 0x68)
sensor_b = adafruit_amg88xx.AMG88XX(i2c_bus, 0x69)
Category = collections.namedtuple('Category', ['id', 'score'])
# img utils
def create_blank(w, h, rgb_color=(0, 0, 0)):
""" create new image(numpy array) filled with certain color in rgb """
image = np.zeros((h, w), np.uint8)
color = tuple(reversed(rgb_color))
image[:] = 0
return image
# sensor functions
def read_sensor_pixels(sensor, verbose=False):
""" Lee los pixeles de temperatura de un sensor
Devuelve la temperatura media y una lista de temperaturas
La opción verbose muestra los valores
"""
mean_temp = 0
array_temps = []
for row in sensor.pixels:
array_temps.extend(row)
mean_temp = sum(array_temps)
mean_temp = mean_temp / len(array_temps)
if verbose:
print("\n")
print ('[Tm]: {0:.2f}'.format(mean_temp))
for row in sensor.pixels:
ls = ['{0:.1f}'.format(temp) for temp in row]
print(' '.join(ls))
print("\n")
return mean_temp, array_temps
def dual_detect(verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print( | eturn na, nb
def dual_detect(arg_name, verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor (mas los data_sens para log)
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print("\n")
print ('[t{2}]:{0:.1f}\t[t{3}]:{1:.1f}'.format(m_tb, m_ta, arg_name[1], arg_name[0]))
sens_a = ""
sens_b = ""
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
sens_a+=la+'\n'
sens_b+=lb+'\n'
if verbose:
print(lb,'\t',la)
if verbose:
print ('[o{2}]:{0:d}\t\t[o{3}]:{1:d}'.format(nb, na, arg_name[1], arg_name[0]))
print("\n")
return na, nb, [sens_a, sens_b, m_ta, m_tb]
# detection functions
def load_labels(path):
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
def get_output(interpreter, top_k, score_threshold):
"""Returns no more than top_k categories with score >= score_threshold."""
scores = cvtf.output_tensor(interpreter, 0)
categories = [
Category(i, scores[i])
for i in np.argpartition(scores, -top_k)[-top_k:]
if scores[i] >= score_threshold
]
return sorted(categories, key=operator.itemgetter(1), reverse=True)
def append_results_to_img(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
def parse_results(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
# define callbacks
def human_callback(witch, arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
def label_callback(label, witch, arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
#label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
# soundsys
def update_soundsystem(arg_recfile, arg_name, osc_c):
"""
envía mensajes a sc que disparan notas aleatorias en los rangos establecidos
registra las notas en el archivo de log
"""
# generate note and send osc message
note_val = random.randint(0,6)
synthnames = ['A','B', 'C', 'D', 'E', 'F', 'G']
ruta = '/scout/note/'+arg_name+'/' + synthnames[note_val]
ruta = ruta.encode()
osc_c.send_message(ruta, [1])
# log to record file
timetag = time.strftime("%Y%m%d_%H%M%S")
record_file = open(arg_recfile, 'a+')
record_file.write("\n[scout.note]: <{0}> {1}\n".format(timetag, ruta.decode()))
record_file.close()
return
# -main
def main():
# -parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='Path of capture folder', default="/media/pi/DATA/capture/video/")
parser.add_argument('--recfile', help='Path of capture folder', default="/media/pi/DATA/capture/record/")
parser.add_argument('--name', help='Name of the directions to scout [NE || SW]', default="NE")
parser.add_argument('--verbose', help='Show additional info for debugging', default=False)
parser.add_argument('--show', help='Show video', default=False)
parser.add_argument('--ip', help='OSC ip', default="192.168.1.207")
parser.add_argument('--port', help='OSC port', default="57120")
args = parser.parse_args()
# -init osc client
osc_addr = args.ip
osc_port = int(args.port)
osc_client = OSCClient(osc_addr, osc_port)
# -load model and labels for detection
default_model_dir = '/home/pi/Dev/animals/train'
default_model = 'animals_duo_model.tflite'
default_labels = 'animals_duo_model.txt'
args_model = os.path.join(default_model_dir, default_model)
args_labels = os.path.join(default_model_dir, default_labels)
args_top_k = 1
args_camera_idx = 0
args_threshold = 0.1
os.makedirs(args.path, exist_ok=True)
os.makedirs(args.recfile, exist_ok=True)
# -create the detection interpreter
print('Cargando {} con {} categorias de objetos.'.format(args_model, args_labels))
interpreter = cvtf.make_interpreter(args_model)
interpreter.allocate_tensors()
labels = load_labels(args_labels)
# -record file
timetag = time.strftime("%Y%m%d_%H%M%S")
arg_recfile = args.recfile + timetag + ".log"
record_file = open(arg_recfile, 'w+')
record_file.write("[scout.record.start]:\t----\t----\t-- <{0}>: \n".format(timetag));
record_file.close()
# -create a capture object and connect to cam
cam = None
witch = 0
empty = create_blank(640, 480, rgb_color=(0,0,0))
buffstream = ''
# -the loop (hole)
t0 = time.time()
t2 = time.time()
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
while True:
# -check sensors,
if (time.time()-t0 > 1):
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
t0 = time.time()
# -then setup capture device
if (witch == 0):
if (nc_a > MIN_MASA_DETEC):
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
cam = cv2.VideoCapture(0)
witch = 2
else:
#continue
time.sleep(1)
pass
elif(witch == 1):
if (nc_a > MIN_MASA_DETEC):
#continue
pass
elif(nc_b > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(0)
witch = 2
else:
cam.release()
witch = 0
elif(witch == 2):
if (nc_a > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
#continue
pass
else:
cam.release()
witch = 0
# luego, cuando haya un dispositivo activo
if (witch > 0):
if (cam.isOpened()):
# read and convert
ret, frame = cam.read()
if not ret:
print("-.-* No Video Source")
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im_rgb)
# make the classification
cvtf.set_input(interpreter, pil_im)
interpreter.invoke()
results = get_output(interpreter, args_top_k, args_threshold)
# parse and print results, compare, count!
# cv2_im = append_results_to_img(cv2_im, results, labels)
label = labels[results[0][0]]
percent = int(100 * results[0].score)
tag = '{}% {}'.format(percent, label)
ch = ''
if (label=='Jaguar'): ch='J'
elif(label=='MexicanGrayWolf'): ch='w'
elif(label=='Human'): ch='H'
else: ch = ' '
# update the buffstream
buffstream += ch
if (len(buffstream) > 20):
buffstream = buffstream[1:]
if (args.verbose == True):
print(buffstream+'/n')
# count and trigger events, reset buff
c_J = buffstream.count('J')
c_W = buffstream.count('w')
c_H = len(list(filter(lambda x: x == 'H', buffstream)))
if (c_J>15):
lab = "JAGUAR"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_W>15):
lab = "MexGrayWOLF"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_H>15):
print("\n\n[->] .. t[._.]H\n")
human_callback(witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
# draw image
if (args.show==True): cv2.imshow('frame', cv2_im)
else:
if (args.show==True): cv2.imshow('frame', empty)
# pass
# actualiza la maquina de sonido
if (time.time() - t2 > 30):
update_soundsystem(arg_recfile, args.name, osc_client)
t2 = time.time()
# - detect break key
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
# ----
if __name__ == '__main__':
main() | "\n")
print ('[t1]:{0:.1f}\t[t2]:{1:.1f}'.format(m_tb, m_ta))
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
print(lb,'\t',la)
print ('[o1]:{0:d}\t\t[o2]:{1:d}'.format(nb, na))
print("\n")
r | conditional_block |
scout.py | """
Scout
-----
.Genera tonos de llamado random
.Monitorea los sensores IR
.Si detecta una presencia:
.Inicia modo de percepción/clasificación
.Si se trata de especimenes de la especie de interés
.Estampa de tiempo con especie, sonido, video
.Inicia registro de video
emmanuel@interspecifics.cc
2020.01.29 // v.ii.x_:x
>> OTRAS NOTAS
https://www.raspberrypi.org/forums/viewtopic.php?t=240200
https://learn.adafruit.com/adafruit-amg8833-8x8-thermal-camera-sensor/raspberry-pi-thermal-camera
check devices
$ v4l2-ctl --list-devices
try recording
$ ffmpeg -i /dev/video7 -vcodec copy capture/cinco.mkv # 6.5Mbps sin reencodear
rangos de frecuencias
A- Hz(179-243)
B- Hz(158-174)
C- Hz(142-148)
D- Hz(128-139)
E- Hz(117-124)
F- Hz(106-115)
G- Hz(90-104)
"""
import busio, board, adafruit_amg88xx
import time, argparse, collections, random
import operator, re, os, subprocess
import cv2
import cvtf
import numpy as np
import tflite_runtime.interpreter as tflite
from PIL import Image
from oscpy.client import OSCClient
# minimal temperature difference
MIN_TEMP_DIFF = 2
MIN_MASA_DETEC = 3
# -create objects to communicate the sensor
i2c_bus = busio.I2C(board.SCL, board.SDA)
sensor_a = adafruit_amg88xx.AMG88XX(i2c_bus, 0x68)
sensor_b = adafruit_amg88xx.AMG88XX(i2c_bus, 0x69)
Category = collections.namedtuple('Category', ['id', 'score'])
# img utils
def create_blank(w, h, rgb_color=(0, 0, 0)):
""" create new image(numpy array) filled with certain color in rgb """
image = np.zeros((h, w), np.uint8)
color = tuple(reversed(rgb_color))
image[:] = 0
return image
# sensor functions
def read_sensor_pixels(sensor, verbose=False):
""" Lee los pixeles de temperatura de un sensor
Devuelve la temperatura media y una lista de temperaturas
La opción verbose muestra los valores
"""
mean_temp = 0
array_temps = []
for row in sensor.pixels:
array_temps.extend(row)
mean_temp = sum(array_temps)
mean_temp = mean_temp / len(array_temps)
if verbose:
print("\n")
print ('[Tm]: {0:.2f}'.format(mean_temp))
for row in sensor.pixels:
ls = ['{0:.1f}'.format(temp) for temp in row]
print(' '.join(ls))
print("\n")
return mean_temp, array_temps
def dual_detect(verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print("\n")
print ('[t1]:{0:.1f}\t[t2]:{1:.1f}'.format(m_tb, m_ta))
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
print(lb,'\t',la)
print ('[o1]:{0:d}\t\t[o2]:{1:d}'.format(nb, na))
print("\n")
return na, nb
def dual_detect(arg_name, verbose=False):
""" Llama a read_sensor_pixels una vez por cada sensor
Devuelve el número de celdas ocupadas en cada sensor (mas los data_sens para log)
Con verbose muestra paneles de detección
"""
m_ta, arr_ta = read_sensor_pixels(sensor_a)
m_tb, arr_tb = read_sensor_pixels(sensor_b)
na = len(list(filter(lambda x: (x - m_ta) >= MIN_TEMP_DIFF, arr_ta)))
nb = len(list(filter(lambda x: (x - m_tb) >= MIN_TEMP_DIFF, arr_tb)))
if verbose:
print("\n")
print ('[t{2}]:{0:.1f}\t[t{3}]:{1:.1f}'.format(m_tb, m_ta, arg_name[1], arg_name[0]))
sens_a = ""
sens_b = ""
for ix in range(8):
la = ''.join(['.' if (arr_ta[iy * 8 + ix] - m_ta) < MIN_TEMP_DIFF else '+' for iy in range(8)])
lb = ''.join(['.' if (arr_tb[iy * 8 + ix] - m_tb) < MIN_TEMP_DIFF else '+' for iy in range(8)])
sens_a+=la+'\n'
sens_b+=lb+'\n'
if verbose:
print(lb,'\t',la)
if verbose:
print ('[o{2}]:{0:d}\t\t[o{3}]:{1:d}'.format(nb, na, arg_name[1], arg_name[0]))
print("\n")
return na, nb, [sens_a, sens_b, m_ta, m_tb]
# detection functions
def load_labels(path):
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
def get_output(interpreter, top_k, score_threshold):
"""Retur | pend_results_to_img(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
def parse_results(cv2_im, results, labels):
height, width, channels = cv2_im.shape
for ii, res in enumerate(results):
percent = int(100 * res.score)
label = '{}% {}'.format(percent, labels[res[0]])
cv2_im = cv2.putText(cv2_im, label, (600, 20+ii*30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 1)
return cv2_im
# define callbacks
def human_callback(witch, arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:{0}\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
def label_callback(label, witch, arg_path, arg_name, arg_recfile, data_sens):
# choosw from witch
#label = "HUMAN"
timetag = time.strftime("%Y%m%d_%H%M%S")
# log to record file
record_file = open(arg_recfile, 'a+')
if (witch==1): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[0]))
elif (witch==2): record_file.write("[scout.detection]: <{0}> <{1}>\n".format(timetag, arg_name[1]))
record_file.write(">> [label]: {}\n".format(label));
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[1]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[3]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[1])
record_file.write('>> [sensor.name]:[{0}]\n'.format(arg_name[0]))
record_file.write('>> [mean_temperature]: {0:.2f} C\n'.format(data_sens[2]))
record_file.write('>> [data]: \n')
record_file.write(data_sens[0])
# cmd exck
out_filename = ''
if (witch==1):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[0] +".mkv"
#cmd = "ffmpeg -i /dev/video6 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video6 -t 15 -vcodec copy " + out_filename
elif(witch==2):
out_filename = arg_path + timetag +"_"+label+"_"+ arg_name[1] +".mkv"
#cmd = "ffmpeg -i /dev/video2 -vcodec h264_omx -b:v 2M -t 15 " + out_filename
cmd = "ffmpeg -i /dev/video2 -t 15 -vcodec copy " + out_filename
else:
pass
list_cmd = cmd.split(' ')
# actualiza y cierra registro
record_file.write('>> [video.capture]:{0}\n\n\n'.format(out_filename))
record_file.close()
# ejecuta
cmd_out = subprocess.run(list_cmd, stdout=subprocess.PIPE)
# print(cmd_out.stdout.decode('utf-8'))
return cmd_out.stdout.decode('utf-8')
# soundsys
def update_soundsystem(arg_recfile, arg_name, osc_c):
"""
envía mensajes a sc que disparan notas aleatorias en los rangos establecidos
registra las notas en el archivo de log
"""
# generate note and send osc message
note_val = random.randint(0,6)
synthnames = ['A','B', 'C', 'D', 'E', 'F', 'G']
ruta = '/scout/note/'+arg_name+'/' + synthnames[note_val]
ruta = ruta.encode()
osc_c.send_message(ruta, [1])
# log to record file
timetag = time.strftime("%Y%m%d_%H%M%S")
record_file = open(arg_recfile, 'a+')
record_file.write("\n[scout.note]: <{0}> {1}\n".format(timetag, ruta.decode()))
record_file.close()
return
# -main
def main():
# -parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='Path of capture folder', default="/media/pi/DATA/capture/video/")
parser.add_argument('--recfile', help='Path of capture folder', default="/media/pi/DATA/capture/record/")
parser.add_argument('--name', help='Name of the directions to scout [NE || SW]', default="NE")
parser.add_argument('--verbose', help='Show additional info for debugging', default=False)
parser.add_argument('--show', help='Show video', default=False)
parser.add_argument('--ip', help='OSC ip', default="192.168.1.207")
parser.add_argument('--port', help='OSC port', default="57120")
args = parser.parse_args()
# -init osc client
osc_addr = args.ip
osc_port = int(args.port)
osc_client = OSCClient(osc_addr, osc_port)
# -load model and labels for detection
default_model_dir = '/home/pi/Dev/animals/train'
default_model = 'animals_duo_model.tflite'
default_labels = 'animals_duo_model.txt'
args_model = os.path.join(default_model_dir, default_model)
args_labels = os.path.join(default_model_dir, default_labels)
args_top_k = 1
args_camera_idx = 0
args_threshold = 0.1
os.makedirs(args.path, exist_ok=True)
os.makedirs(args.recfile, exist_ok=True)
# -create the detection interpreter
print('Cargando {} con {} categorias de objetos.'.format(args_model, args_labels))
interpreter = cvtf.make_interpreter(args_model)
interpreter.allocate_tensors()
labels = load_labels(args_labels)
# -record file
timetag = time.strftime("%Y%m%d_%H%M%S")
arg_recfile = args.recfile + timetag + ".log"
record_file = open(arg_recfile, 'w+')
record_file.write("[scout.record.start]:\t----\t----\t-- <{0}>: \n".format(timetag));
record_file.close()
# -create a capture object and connect to cam
cam = None
witch = 0
empty = create_blank(640, 480, rgb_color=(0,0,0))
buffstream = ''
# -the loop (hole)
t0 = time.time()
t2 = time.time()
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
while True:
# -check sensors,
if (time.time()-t0 > 1):
nc_a, nc_b, data_sens = dual_detect(args.name, args.verbose)
t0 = time.time()
# -then setup capture device
if (witch == 0):
if (nc_a > MIN_MASA_DETEC):
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
cam = cv2.VideoCapture(0)
witch = 2
else:
#continue
time.sleep(1)
pass
elif(witch == 1):
if (nc_a > MIN_MASA_DETEC):
#continue
pass
elif(nc_b > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(0)
witch = 2
else:
cam.release()
witch = 0
elif(witch == 2):
if (nc_a > MIN_MASA_DETEC):
cam.release()
cam = cv2.VideoCapture(4)
witch = 1
elif(nc_b > MIN_MASA_DETEC):
#continue
pass
else:
cam.release()
witch = 0
# luego, cuando haya un dispositivo activo
if (witch > 0):
if (cam.isOpened()):
# read and convert
ret, frame = cam.read()
if not ret:
print("-.-* No Video Source")
break
cv2_im = frame
cv2_im_rgb = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im_rgb)
# make the classification
cvtf.set_input(interpreter, pil_im)
interpreter.invoke()
results = get_output(interpreter, args_top_k, args_threshold)
# parse and print results, compare, count!
# cv2_im = append_results_to_img(cv2_im, results, labels)
label = labels[results[0][0]]
percent = int(100 * results[0].score)
tag = '{}% {}'.format(percent, label)
ch = ''
if (label=='Jaguar'): ch='J'
elif(label=='MexicanGrayWolf'): ch='w'
elif(label=='Human'): ch='H'
else: ch = ' '
# update the buffstream
buffstream += ch
if (len(buffstream) > 20):
buffstream = buffstream[1:]
if (args.verbose == True):
print(buffstream+'/n')
# count and trigger events, reset buff
c_J = buffstream.count('J')
c_W = buffstream.count('w')
c_H = len(list(filter(lambda x: x == 'H', buffstream)))
if (c_J>15):
lab = "JAGUAR"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_W>15):
lab = "MexGrayWOLF"
print("\n\n[->] {0}\n".format(lab))
label_callback(lab, witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
if (c_H>15):
print("\n\n[->] .. t[._.]H\n")
human_callback(witch, args.path, args.name, arg_recfile, data_sens)
buffstream = ''
# draw image
if (args.show==True): cv2.imshow('frame', cv2_im)
else:
if (args.show==True): cv2.imshow('frame', empty)
# pass
# actualiza la maquina de sonido
if (time.time() - t2 > 30):
update_soundsystem(arg_recfile, args.name, osc_client)
t2 = time.time()
# - detect break key
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
# ----
if __name__ == '__main__':
main() | ns no more than top_k categories with score >= score_threshold."""
scores = cvtf.output_tensor(interpreter, 0)
categories = [
Category(i, scores[i])
for i in np.argpartition(scores, -top_k)[-top_k:]
if scores[i] >= score_threshold
]
return sorted(categories, key=operator.itemgetter(1), reverse=True)
def ap | identifier_body |
mla_1c_v0.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 19:10:13 2019
@author: chiara
"""
import os
import numpy as np # scientific calculation
import pandas as pd # data analysis
import matplotlib.pyplot as plt # data plot
#import matplotlib
#from datetime import datetime,date # date objects
import seaborn as sns # data plot
# import statsmodels.api as sm
#import networkx as nx
from sklearn.ensemble import RandomForestRegressor
# Set working paths
mainPath="/home/chiara/kaggle/1C_PYproject/scripts/"
os.chdir(mainPath)
#from my_functions_1c import my_prepareTrain
import my_functions_1c as ct
########################
filePath="/home/chiara/kaggle/1C_PYproject/data/competitive-data-science-predict-future-sales/"+"sales_train_v2.csv"
data=pd.read_csv(filePath, index_col=False)
data.head(5)
data.shape
data.tail(5)
######################## LOAD DATA TRAIN
filePath="working_data/"+"1C_small_training.csv"
data=pd.read_csv(filePath, index_col=False)
data=data.drop("Unnamed: 0",axis=1)
data.keys()
data.head()
dataTrain=ct.my_prepareTrain(data) #921400 rows x 9 columns
dataTrain.keys()
#["date_block_num","item_id","shop_id","item_freq","shop_freq",
# "category_freq", "month","item_price","month_cnt"]
dataTrain.reset_index()
dataTrain.iloc[10:20,0:5]
dataTrain.plot(subplots=True)
##############################################################################
##############################################################################
############# CHECKS/SUMMARIES
## is the item price fixed among shops? over months?
# price is not fixed among shops
# price is not fixed among months
dataPriceXShop=dataTrain[{"date_block_num","item_id","shop_id","item_price"}]
dataPriceXShop.head()
dataPriceXShop.shape
dataItemXShop_price=pd.pivot_table(dataPriceXShop,
index=["date_block_num","item_id"],
values="item_price",columns=["shop_id"])
dataItemXShop_price #[135451 rows x 55 columns]
dataItemXShop_price.keys()
dataItemXShop_price.index
dataItemXShop_price.loc[(33,33)]
# all shops priced item 33 199, but shop 49 priced it 159
dataItemXShop_price.loc[(12,33)]
# which items are consistent/present among shops? over months?
33-12+1 # 22 months
nan_indices=dataItemXShop_price.isnull()
#dataItemXShop_count=pd.pivot_table(nan_indices,
# index="item_id",columns=[""]
dataItemXShop_count=nan_indices.groupby("item_id").sum() #over months
dataItemXShop_count.max(axis=1).idxmax()
#item 30 occurs 22 times in at least 1 shop
dataItemXShop_count.max(axis=1).max()
dataItemXShop_count.max(axis=1).idxmin()
##item 0 occurs 1 times in at least 1 shop
dataItemXShop_count.max(axis=1).min()
itemPresence=dataItemXShop_count.sum(axis=1)/55
#stability of item presence on average
itemPresence.plot(kind="hist",bins=22,figsize=(10,5),
title="Number of item occurrences in 22 month period") #sort_values(ascending=False).
# most items appear only once
sns.set(rc={'figure.figsize':(10,12)})
fig, ax = plt.subplots(1, 1)
sns.heatmap(dataItemXShop_count,ax=ax)
ax.set_title("Monthly appeareances of items in shops")
fig
######
dataItemXMonth_price=pd.pivot_table(dataTrain[{"date_block_num","item_id","item_price"}],
index=["item_id"],values="item_price",
columns=["date_block_num"],aggfunc={np.min,np.max})
dataItemXMonth_price.keys()
# item 22167
dataItemXMonth_price.loc[(22167)]
# item 22167 varys min price from 284 to 155
nan_indices2=dataItemXMonth_price.iloc[:,range(0,22)].isnull()
#sum(nan_indices2.values.tolist()==nan_indices.values.tolist())
nan_indices2.iloc[0:10,0:10] #itemXmonths
nan_indices.iloc[0:10,0:10] #itemXshops
####
# each month, in how many shops each item occurs?
dataItemXMonth_count=pd.pivot_table(dataTrain[{"date_block_num","item_id","shop_id"}],
index=["item_id"],values="shop_id",
columns=["date_block_num"],aggfunc=pd.value_counts)
dataItemXMonth_count.iloc[17000:17005,0:5]
dataItemXMonth_count=dataItemXMonth_count.applymap(lambda x: np.nansum(x))
dataItemXMonth_count.keys()
dataItemXMonth_count.iloc[0:40,].transpose().plot.line()
sns.set(rc={'figure.figsize':(10,12)})
fig, ax = plt.subplots(1, 1)
sns.heatmap(dataItemXMonth_count,ax=ax)
ax.set_title("Item appearences in each month")
fig
# most items appear only a few times.
# none item has a regular high appearence
#
#dataItemXMonth_count=dataItemXMonth_count.reset_index()
#dataItemXMonth_count.columns=["item_id",12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,33]
#dataItemXMonth_count.iloc[0:5,].transpose().plot.line()
#dataItemXMonth_count.keys()
####
# how many items each shop sell each month?
dataShopXMonth_count=pd.pivot_table(dataTrain[{"date_block_num","item_id","shop_id"}],
index=["shop_id"],values="item_id",
columns=["date_block_num"],aggfunc="count")
dataShopXMonth_perc=dataShopXMonth_count.applymap(lambda x: (x/17054)*100)
#dataShopXMonth_count.max().max()
sns.set(rc={'figure.figsize':(10,12)})
fig, ax = plt.subplots(1, 2)
sns.heatmap(dataShopXMonth_count,ax=ax[0])
sns.heatmap(dataShopXMonth_perc,ax=ax[1])
ax[0].set_title("Items sold in each shop for month")
ax[1].set_title("% of items sold in each shop for month")
fig
# shop 9,11,13,17,20,25,29,30,31..have more variety
# only 20% of items are sold in each shop,
# and none is continuosly sold
###############################################################################
###############################################################################
############################### CREATE DF for prediction
dataTrain.plot(subplots=True)
# *keys
# date_block_num *
# item_id *
# shop_id *
# category_freq <-
# item_price
# item_freq <-
# shop_freq <-
# month
# month_cnt !!!!TARGET
dataTrain.keys()
dataTrain.set_index(["date_block_num","shop_id","item_id"])
dataTrain.iloc[20:30,2:8]
#sum(dataTrain["item_freq"]==dataTrain["shop_freq"])
## Calculate correlation between variables
# all variables are highly correlated with "month_cnt" except the price
CC=dataTrain[["item_price","month_cnt","month"]].corr()#"item_freq",
CC
# item_freq category_id item_price month_cnt
#item_freq 1.000000 -0.073820 0.067416 0.521578
#category_id -0.073820 1.000000 -0.228345 -0.010741
#item_price 0.067416 -0.228345 1.000000 0.022186
#month_cnt 0.521578 -0.010741 0.022186 1.000000
# Transform it in a links data frame (3 columns only):
links = C.stack().reset_index()
links.columns =["var1","var2","corr_val"]
# remove self correlation (cor(A,A)=1)
links_filtered=links.loc[ (links['var1'] != links['var2']) ]
links_filtered
# Build your graph
#G = nx.Graph()
G = nx.path_graph(0)
graph = {"freq":["price","count"],"price":["freq","count"],
"count":["price","freq"]}
leng=1
#[('freq', 'price'), ('freq', 'count'), ('price', 'count')]
values=[0.067,0.522,0.022]
for vertex, edges in graph.items():
G.add_node("%s" % vertex)
# leng+=1
for edge in edges:
G.add_node("%s" % edge)
G.add_edge("%s" % vertex, "%s" % edge, weight = leng)
# print("'%s' connects with '%s'" % (vertex,edge))
# Create positions of all nodes and save them
#pos = nx.spring_layout(G)
pos={"price": [1.5,1.5],"freq": [0.5,1.5],"count": [1,1]}
labels ={('freq', 'price'): values[0], ('freq', 'count'): values[1],
('price', 'count'): values[2]}
# Draw the graph according to node positions
nx.draw(G, pos, with_labels=True,node_size=3000)
# Create edge labels
#labels = {edg: str(values[G.edges[edg]]) for edg in G.edges}
# Draw edge labels according to node positions
pos_lab={"price": [1.25,1.25],"freq": [0.75,1.25],"count": [1,1.5]}
nx.draw_networkx_edge_labels(G, pos,font_color='red',edge_labels=labels)
plt.axis('off')
plt.show()
################
#import statsmodels.formula.api as smf
# Instantiate a gamma family model with the default link function.
#poisson_model = sm.GLM(data.endog, data.exog, family=sm.families.Gamma())
#form="month_cnt ~ date_block_num + item_id + shop_id + item_freq + category_id + month + item_price"
#form="month_cnt ~ date_block_num + item_freq + month + item_price"
#poisson_model = smf.glm(formula=form, data=dataTrain, family=sm.families.Poisson())
#poisson_fit = poisson_model.fit()
#dir(poisson_fit.mle_settings)
#poisson_fit.use_t
#print(poisson_fit.summary())
#
# Generalized Linear Model Regression Results
#==============================================================================
#Dep. Variable: month_cnt No. Observations: 921400
#Model: GLM Df Residuals: 921392
#Model Family: Poisson Df Model: 7
#Link Function: log Scale: 1.0000
#Method: IRLS Log-Likelihood:* -inf
#Date: Fri, 15 Nov 2019 Deviance: * 8.7344e+05
#Time: 18:15:41 Pearson chi2: 3.83e+06
#No. Iterations: 7 *non-defined for Poisson family
#Covariance Type: nonrobust * non defined for scale=1
#==================================================================================
# coef std err z P>|z| [0.025 0.975]
#----------------------------------------------------------------------------------
#Intercept 0.5517 0.003 163.637 0.000 0.545 0.558
#date_block_num 0.0013 0.000 10.540 0.000 0.001 0.002
#item_id -9.174e-06 1.23e-07 -74.511 0.000 -9.41e-06 -8.93e-06
#shop_id -0.0012 4.26e-05 -27.026 0.000 -0.001 -0.001
#item_freq 0.1936 8.63e-05 2244.772 0.000 0.193 0.194
#category_id -0.0055 4.5e-05 -123.243 0.000 -0.006 -0.005
#month 0.0017 0.000 7.667 0.000 0.001 0.002
#item_price 1.289e-05 3.19e-07 40.347 0.000 1.23e-05 1.35e-05
#==================================================================================
# item_id, category_id have small weight
# Generalized Linear Model Regression Results
#==============================================================================
#Dep. Variable: month_cnt No. Observations: 921400
#Model: GLM Df Residuals: 921395
#Model Family: Poisson Df Model: 4
#Link Function: log Scale: 1.0000
#Method: IRLS Log-Likelihood: -inf
#Date: Fri, 15 Nov 2019 Deviance: 9.1019e+05
#Time: 18:40:30 Pearson chi2: 3.78e+06
#No. Iterations: 7
#Covariance Type: nonrobust
#==================================================================================
# coef std err z P>|z| [0.025 0.975]
#----------------------------------------------------------------------------------
#Intercept 0.2137 0.003 81.395 0.000 0.209 0.219
#date_block_num 0.0004 0.000 3.000 0.003 0.000 0.001
#item_freq 0.1881 8.02e-05 2346.055 0.000 0.188 0.188
#month 0.0024 0.000 11.216 0.000 0.002 0.003
#item_price 2.899e-05 2.82e-07 102.951 0.000 2.84e-05 2.95e-05
#==================================================================================
# item_freq is obviously the larger coeff
filePath="working_data/"+"1C_ctrl_training.csv"
data2=pd.read_csv(filePath, index_col=False)
data2=data2.drop(["Unnamed: 0",'Unnamed: 0.1', 'Unnamed: 0.1.1'],axis=1)
data2.keys()
data2.head()
dataCtrl=ct.my_prepareTrain(data2)
dataCtrl.keys()
dataCtrlHM=ct.my_summaryHistoricFunc(dataCtrl,f_mean=True,f_sum=False) #takes almost 10 minutes
#dataCtrl=pd.get_dummies(dataCtrl)
dataCtrl.reset_index()
dataCtrlHM.reset_index()
| #err=abs(target-predictions)
#err.plot()
#err.mean()
#err.max()
#err.min()
#
#rmse=my_rmse(target,predictions) #15.141159663472205
## not that bad...i should see the mean, std of the counts
#poisson_fit.params
#poisson_fit
dataTrainHM=ct.my_summaryHistoricFunc(dataTrain,f_mean=True,f_sum=False) #15:54-15:09
#dataTrainHM=ct.my_summaryHistoMean(dataTrain) #takes almost 10 minutes
#dataTrain=pd.get_dummies(dataTrain)
dataTrain.reset_index()
dataTrainHM.reset_index()
D=pd.merge(dataTrain,dataTrainHM,how="left",on=["date_block_num","item_id","shop_id"])
#D=D.drop("histo_f_cnt",axis=1)
CC=D.corr()
CC["month_cnt"]
sum(abs(CC.values)>0.4)
#models_param=[["month_cnt ~ date_block_num + item_freq + month + item_price","GLM","poisson"],
# ["month_cnt ~ date_block_num + item_id + shop_id + item_freq + category_id + month + item_price","GLM","poisson"]
# ]
models_param=[[D.keys(),"GLM","poisson"]]#,[D.keys(),"GLM","poisson"]
i=0
modelRes=pd.DataFrame(columns=["model","formula","family","aic",
"scale","log-likel","deviance","chi2",
"mean_err_perc","sign_pval_perc",
"rmse_in","rmse_out","acc_in","acc_out"])
for i in range(0,len(models_param)):
aux=ct.my_compareFitModels(D,models_param[i][0],models_param[i][1],models_param[i][2],C)
modelRes=modelRes.append(aux,sort=False).reset_index() #18:1018:13
modelRes.iloc[0:1,0:11]
[y,X]=ct.my_df2arry_endo_exog(D,"month_cnt")
model = sm.GLM(y,X, family=sm.families.Poisson())
fitModel=model.fit(method='nm', maxiter=100, maxfun=100)#18:15-18:16
predictions=fitModel.predict(exog=X, transform=True)
err=abs(y-predictions)
acc=100*(len([e for e in err if e<1])/len(err)) # <1:53,74% <2: 88,28%
acc
err.mean()
#rmse_in=ct.my_calculateAccuracy(dataTrain,"month_cnt",fitModel)
#rmse_out=ct.my_calculateAccuracy(dataTest,"month_cnt",fitModel)
import my_functions_1c as ct
fitModel.summary()
#Dep. Variable: y No. Observations: 921400
#Model: GLM Df Residuals: 921393
#Model Family: Poisson Df Model: 6
#Link Function: log Scale: 1.0000
#Method: nm Log-Likelihood: -inf
#Date: Sat, 30 Nov 2019 Deviance: 1.0246e+07
#Time: 18:24:07 Pearson chi2: 2.08e+07
#No. Iterations: 556
#Covariance Type: nonrobust
#==============================================================================
# coef std err z P>|z| [0.025 0.975]
#------------------------------------------------------------------------------
#x1 0.0211 8.18e-05 258.570 0.000 0.021 0.021
#x2 -1.245e-05 9.22e-08 -134.923 0.000 -1.26e-05 -1.23e-05
#x3 0.0075 3.68e-05 204.837 0.000 0.007 0.008
#x4 -0.0013 3.61e-05 -35.865 0.000 -0.001 -0.001
#x5 0.0181 0.000 97.250 0.000 0.018 0.018
#x6 4.68e-05 2.6e-07 180.244 0.000 4.63e-05 4.73e-05
#x7 0.0112 1.04e-06 1.07e+04 0.000 0.011 0.011
#==============================================================================
[y,X]=ct.my_df2arry_endo_exog(D,"month_cnt")
rfModel=RandomForestRegressor(n_estimators=500,max_depth=10,random_state=18)
rfFit=rfModel.fit(X,y) #17:09-17:26
pred=rfFit.predict(X) #17:26-17:27
err=abs(y-pred)
err2=y-pred
np.mean(err) #1.3330819427844776
np.max(err) #1166.5251575783172
np.min(err) #e-05
100*(len([e for e in err2 if e>0])/len(err2)) #25.951378337312786
100*(len([e for e in err2 if e<0])/len(err2)) #74.04862166268722
100*(len([e for e in err if e<1])/len(err)) #69.638, tuning: 72.93043195137834
100*(len([e for e in err if e<2])/len(err)) # n_estimators=500 :88.0572
np.mean([e for e in err2 if e<0]) #-0.89704447
dataCtrlHM=ct.my_summaryHistoricFunc(dataCtrl,f_mean=True,f_sum=True) #takes almost 10 minutes
#dataCtrl=pd.get_dummies(dataCtrl)
dataCtrl.reset_index()
dataCtrlHM.reset_index()
C=pd.merge(dataCtrl,dataCtrlHM,how="left",on=["date_block_num","item_id","shop_id"])
[y_c,X_c]=ct.my_df2arry_endo_exog(C,"month_cnt")
rfFit_c=rfModel.fit(X_c,y_c)
pred_c=rfFit_c.predict(X_c)
err_c=abs(y_c-pred_c)
err2_c=y_c-pred_c
np.mean(err_c) #1.3580242780446712
np.max(err_c) # 442.8908487407861
np.min(err_c) # e-05
100*(len([e for e in err2_c if e>0])/len(err2_c)) #24.33243568640908
100*(len([e for e in err2_c if e<0])/len(err2_c)) # 75.66756431359092
100*(len([e for e in err_c if e<1])/len(err_c)) # 68.2189366664534
np.mean([e for e in err2_c if e<0]) #-0.89704447
sns.set(rc={'figure.figsize':(12,6)})
fig, ax1 = plt.subplots(2, 3)
sns.regplot(x=y,y=pred,ax=ax1[0,0])
ax1[0,0].set_title("[train] x: actual | y: predicted")
sns.regplot(y,err,ax=ax1[0,1])
ax1[0,1].set_title("[train] x: actual | y: abs(act-pred)")
sns.regplot(y,err2,ax=ax1[0,2])
ax1[0,2].set_title("[train] x: actual | y: act-pred")
sns.regplot(y_c,pred_c,ax=ax1[1,0])
ax1[1,0].set_title("[control] x: actual | y: predicted")
sns.regplot(y_c,err_c,ax=ax1[1,1])
ax1[1,1].set_title("[control] x: actual | y: abs(act-pred)")
sns.regplot(y_c,err2_c,ax=ax1[1,2])
ax1[1,2].set_title("[control] x: actual | y: act-pred")
fig
######### plot tree
from sklearn.tree import export_graphviz
import pydot
featureNames=[col for col in dataTrain.columns if col != "month_cnt"]
tree=rfModel.estimators_[1]
export_graphviz(tree,out_file="tree.dot",rounded=True,precision=1,
feature_names=featureNames)
(graph,)=pydot.graph_from_dot_file("tree.dot")
graph.write_png("tree.png")
featureNames=[col for col in D.columns if col != "month_cnt"]
featImp=list(rfModel.feature_importances_)
feat_imp=[(feat,round(imp,2)) for (feat,imp) in zip(featureNames,featImp)]
feat_imp
# linear regression actual vs err (non absolute)
from sklearn.linear_model import LinearRegression
model_pred = LinearRegression().fit(y.reshape(-1,1), pred)
model_pred.coef_ #0.67778392 with pred
model_err = LinearRegression().fit(y.reshape(-1,1), err2)
model_err.coef_ #0.32221608 with err
#model.intercept_ #0.47178088839165866 with err
r2p = model_pred.score(y.reshape(-1,1), pred) # 0.7592726478685866
r2e = model_err.score(y.reshape(-1,1), err2) #0.41616991455938823
#df=ct.my_summaryHistoMean(dataTrain,)
#
#aa=ct.my_historicMean(dataTrain,"month_cnt")
import my_functions_1c as ct
aaa=ct.my_historicMean(dataTrain,14,"month_cnt",replaceNaN=True)
aa=ct.my_historicMean(dataTrain,12,"month_cnt",replaceNaN=True)
DD=pd.DataFrame({"date_block_num":[1,1,1,1,2,2,2,2,3,3,3,3],
"item_id":[10,15,20,25,10,30,35,40,10,20,30,55],
"shop_id":[200,200,203,203,200,210,212,212,200,203,210,230],
"month_cnt":[1,2,3,4,2,4,6,8,3,6,9,12]})
bb=ct.my_summaryHistoMean(DD)
bb
bb2=ct.my_historicMean(DD,3,"month_cnt")
bb2=ct.my_historicSum(DD,3,"month_cnt")
bb2
#item_shop=DD[DD["date_block_num"]==1][["item_id","shop_id"]].values.tolist()
#DDg=DD.groupby(["item_id","shop_id"])
#DD2=pd.concat(DDg.get_group(tuple(g)) for g in item_shop)
#DD[DD[["item_id","shop_id"]]==item_shop[0]][["item_id","shop_id"]]
print(DD)
####################### eliminate item_freq
#dataTrain2=dataTrain.drop("item_freq",axis=1)
#dataTrain2.keys()
#rfModel2=RandomForestRegressor(n_estimators=1000,max_depth=10,random_state=18)
#[y2,X2]=ct.my_df2arry_endo_exog(dataTrain2,"month_cnt")
#
#
#rfFit2=rfModel2.fit(X2,y2)
#pred2=rfFit2.predict(X2)
#err2=abs(y2-pred2)
#err22=y2-pred2
#np.mean(err2) #1.3341
#np.max(err2) #1212.77
#np.min(err2) #0.000115
#100*(len([e for e in err22 if e>0])/len(err22)) #25.72747992185804
#100*(len([e for e in err22 if e<0])/len(err22)) #74.27252007814195
#100*(len([e for e in err2 if e<1])/len(err2)) #69.00672889081832
#
#
#dataCtrl2=dataCtrl.drop("item_freq",axis=1)
#[y2_c,X2_c]=ct.my_df2arry_endo_exog(dataCtrl2,"month_cnt")
#rfFit2_c=rfModel2.fit(X2_c,y2_c)
#pred2_c=rfFit2_c.predict(X2_c)
#err2_c=abs(y2_c-pred2_c)
#err22_c=y2_c-pred2_c
#np.mean(err2_c) #1.3543138339118084
#np.max(err2_c) #444.1032
#np.min(err2_c) #1.4240848584812227e-06
#100*(len([e for e in err22_c if e>0])/len(err22_c)) #24.342905002588246
#100*(len([e for e in err22_c if e<0])/len(err22_c)) #75.65709499741176
#100*(len([e for e in err2_c if e<1])/len(err2_c)) # 68.27390057639403
#np.mean([e for e in err2_c if e<0])
#
#sns.set(rc={'figure.figsize':(12,6)})
#fig, ax1 = plt.subplots(2, 3)
#sns.regplot(x=y2,y=pred2,ax=ax1[0,0])
#ax1[0,0].set_title("[train] x: actual | y: predicted")
#sns.regplot(y2,err2,ax=ax1[0,1])
#ax1[0,1].set_title("[train] x: actual | y: abs(act-pred)")
#sns.regplot(y2,err22,ax=ax1[0,2])
#ax1[0,2].set_title("[train] x: actual | y: act-pred")
#
#sns.regplot(y2_c,pred2_c,ax=ax1[1,0])
#ax1[1,0].set_title("[control] x: actual | y: predicted")
#sns.regplot(y2_c,err2_c,ax=ax1[1,1])
#ax1[1,1].set_title("[control] x: actual | y: abs(act-pred)")
#sns.regplot(y2_c,err22_c,ax=ax1[1,2])
#ax1[1,2].set_title("[control] x: actual | y: act-pred")
#
#fig
#
#from sklearn.linear_model import LinearRegression
#model = LinearRegression().fit(y2.reshape(-1,1), err2)
#model.coef_ #0.63934656 with pred
#model.coef_ #0.381 with err
#model.intercept_ #0.47178088839165866 with err
#r2 = model.score(y2.reshape(-1,1), pred2) # 0.7181413899627395
#r2 = model.score(y2.reshape(-1,1), err2) #0.5377096163365533
########## plot tree
#from sklearn.tree import export_graphviz
#import pydot
#featureNames2=[col for col in dataTrain2.columns if col != "month_cnt"]
tree=rfModel.estimators_[1]
export_graphviz(tree,out_file="tree.dot",rounded=True,precision=1,
feature_names=featureNames)
(graph,)=pydot.graph_from_dot_file("tree.dot")
graph.write_png("tree.png")
featImp2=list(rfModel2.feature_importances_)
feat_imp2=[(feat,round(imp,2)) for (feat,imp) in zip(featureNames2,featImp2)]
df=pd.get_dummies(dataTrain) | C=pd.merge(dataCtrl,dataCtrlHM,how="left",on=["date_block_num","item_id","shop_id"])
#target=dataCtrl["month_cnt"]
#dataCtrl=dataCtrl.drop("month_cnt",axis=1)
#predictions=poisson_fit.predict(exog=dataCtrl, transform=True) | random_line_split |
mla_1c_v0.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 19:10:13 2019
@author: chiara
"""
import os
import numpy as np # scientific calculation
import pandas as pd # data analysis
import matplotlib.pyplot as plt # data plot
#import matplotlib
#from datetime import datetime,date # date objects
import seaborn as sns # data plot
# import statsmodels.api as sm
#import networkx as nx
from sklearn.ensemble import RandomForestRegressor
# Set working paths
mainPath="/home/chiara/kaggle/1C_PYproject/scripts/"
os.chdir(mainPath)
#from my_functions_1c import my_prepareTrain
import my_functions_1c as ct
########################
filePath="/home/chiara/kaggle/1C_PYproject/data/competitive-data-science-predict-future-sales/"+"sales_train_v2.csv"
data=pd.read_csv(filePath, index_col=False)
data.head(5)
data.shape
data.tail(5)
######################## LOAD DATA TRAIN
filePath="working_data/"+"1C_small_training.csv"
data=pd.read_csv(filePath, index_col=False)
data=data.drop("Unnamed: 0",axis=1)
data.keys()
data.head()
dataTrain=ct.my_prepareTrain(data) #921400 rows x 9 columns
dataTrain.keys()
#["date_block_num","item_id","shop_id","item_freq","shop_freq",
# "category_freq", "month","item_price","month_cnt"]
dataTrain.reset_index()
dataTrain.iloc[10:20,0:5]
dataTrain.plot(subplots=True)
##############################################################################
##############################################################################
############# CHECKS/SUMMARIES
## is the item price fixed among shops? over months?
# price is not fixed among shops
# price is not fixed among months
dataPriceXShop=dataTrain[{"date_block_num","item_id","shop_id","item_price"}]
dataPriceXShop.head()
dataPriceXShop.shape
dataItemXShop_price=pd.pivot_table(dataPriceXShop,
index=["date_block_num","item_id"],
values="item_price",columns=["shop_id"])
dataItemXShop_price #[135451 rows x 55 columns]
dataItemXShop_price.keys()
dataItemXShop_price.index
dataItemXShop_price.loc[(33,33)]
# all shops priced item 33 199, but shop 49 priced it 159
dataItemXShop_price.loc[(12,33)]
# which items are consistent/present among shops? over months?
33-12+1 # 22 months
nan_indices=dataItemXShop_price.isnull()
#dataItemXShop_count=pd.pivot_table(nan_indices,
# index="item_id",columns=[""]
dataItemXShop_count=nan_indices.groupby("item_id").sum() #over months
dataItemXShop_count.max(axis=1).idxmax()
#item 30 occurs 22 times in at least 1 shop
dataItemXShop_count.max(axis=1).max()
dataItemXShop_count.max(axis=1).idxmin()
##item 0 occurs 1 times in at least 1 shop
dataItemXShop_count.max(axis=1).min()
itemPresence=dataItemXShop_count.sum(axis=1)/55
#stability of item presence on average
itemPresence.plot(kind="hist",bins=22,figsize=(10,5),
title="Number of item occurrences in 22 month period") #sort_values(ascending=False).
# most items appear only once
sns.set(rc={'figure.figsize':(10,12)})
fig, ax = plt.subplots(1, 1)
sns.heatmap(dataItemXShop_count,ax=ax)
ax.set_title("Monthly appeareances of items in shops")
fig
######
dataItemXMonth_price=pd.pivot_table(dataTrain[{"date_block_num","item_id","item_price"}],
index=["item_id"],values="item_price",
columns=["date_block_num"],aggfunc={np.min,np.max})
dataItemXMonth_price.keys()
# item 22167
dataItemXMonth_price.loc[(22167)]
# item 22167 varys min price from 284 to 155
nan_indices2=dataItemXMonth_price.iloc[:,range(0,22)].isnull()
#sum(nan_indices2.values.tolist()==nan_indices.values.tolist())
nan_indices2.iloc[0:10,0:10] #itemXmonths
nan_indices.iloc[0:10,0:10] #itemXshops
####
# each month, in how many shops each item occurs?
dataItemXMonth_count=pd.pivot_table(dataTrain[{"date_block_num","item_id","shop_id"}],
index=["item_id"],values="shop_id",
columns=["date_block_num"],aggfunc=pd.value_counts)
dataItemXMonth_count.iloc[17000:17005,0:5]
dataItemXMonth_count=dataItemXMonth_count.applymap(lambda x: np.nansum(x))
dataItemXMonth_count.keys()
dataItemXMonth_count.iloc[0:40,].transpose().plot.line()
sns.set(rc={'figure.figsize':(10,12)})
fig, ax = plt.subplots(1, 1)
sns.heatmap(dataItemXMonth_count,ax=ax)
ax.set_title("Item appearences in each month")
fig
# most items appear only a few times.
# none item has a regular high appearence
#
#dataItemXMonth_count=dataItemXMonth_count.reset_index()
#dataItemXMonth_count.columns=["item_id",12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,33]
#dataItemXMonth_count.iloc[0:5,].transpose().plot.line()
#dataItemXMonth_count.keys()
####
# how many items each shop sell each month?
dataShopXMonth_count=pd.pivot_table(dataTrain[{"date_block_num","item_id","shop_id"}],
index=["shop_id"],values="item_id",
columns=["date_block_num"],aggfunc="count")
dataShopXMonth_perc=dataShopXMonth_count.applymap(lambda x: (x/17054)*100)
#dataShopXMonth_count.max().max()
sns.set(rc={'figure.figsize':(10,12)})
fig, ax = plt.subplots(1, 2)
sns.heatmap(dataShopXMonth_count,ax=ax[0])
sns.heatmap(dataShopXMonth_perc,ax=ax[1])
ax[0].set_title("Items sold in each shop for month")
ax[1].set_title("% of items sold in each shop for month")
fig
# shop 9,11,13,17,20,25,29,30,31..have more variety
# only 20% of items are sold in each shop,
# and none is continuosly sold
###############################################################################
###############################################################################
############################### CREATE DF for prediction
dataTrain.plot(subplots=True)
# *keys
# date_block_num *
# item_id *
# shop_id *
# category_freq <-
# item_price
# item_freq <-
# shop_freq <-
# month
# month_cnt !!!!TARGET
dataTrain.keys()
dataTrain.set_index(["date_block_num","shop_id","item_id"])
dataTrain.iloc[20:30,2:8]
#sum(dataTrain["item_freq"]==dataTrain["shop_freq"])
## Calculate correlation between variables
# all variables are highly correlated with "month_cnt" except the price
CC=dataTrain[["item_price","month_cnt","month"]].corr()#"item_freq",
CC
# item_freq category_id item_price month_cnt
#item_freq 1.000000 -0.073820 0.067416 0.521578
#category_id -0.073820 1.000000 -0.228345 -0.010741
#item_price 0.067416 -0.228345 1.000000 0.022186
#month_cnt 0.521578 -0.010741 0.022186 1.000000
# Transform it in a links data frame (3 columns only):
links = C.stack().reset_index()
links.columns =["var1","var2","corr_val"]
# remove self correlation (cor(A,A)=1)
links_filtered=links.loc[ (links['var1'] != links['var2']) ]
links_filtered
# Build your graph
#G = nx.Graph()
G = nx.path_graph(0)
graph = {"freq":["price","count"],"price":["freq","count"],
"count":["price","freq"]}
leng=1
#[('freq', 'price'), ('freq', 'count'), ('price', 'count')]
values=[0.067,0.522,0.022]
for vertex, edges in graph.items():
|
# print("'%s' connects with '%s'" % (vertex,edge))
# Create positions of all nodes and save them
#pos = nx.spring_layout(G)
pos={"price": [1.5,1.5],"freq": [0.5,1.5],"count": [1,1]}
labels ={('freq', 'price'): values[0], ('freq', 'count'): values[1],
('price', 'count'): values[2]}
# Draw the graph according to node positions
nx.draw(G, pos, with_labels=True,node_size=3000)
# Create edge labels
#labels = {edg: str(values[G.edges[edg]]) for edg in G.edges}
# Draw edge labels according to node positions
pos_lab={"price": [1.25,1.25],"freq": [0.75,1.25],"count": [1,1.5]}
nx.draw_networkx_edge_labels(G, pos,font_color='red',edge_labels=labels)
plt.axis('off')
plt.show()
################
#import statsmodels.formula.api as smf
# Instantiate a gamma family model with the default link function.
#poisson_model = sm.GLM(data.endog, data.exog, family=sm.families.Gamma())
#form="month_cnt ~ date_block_num + item_id + shop_id + item_freq + category_id + month + item_price"
#form="month_cnt ~ date_block_num + item_freq + month + item_price"
#poisson_model = smf.glm(formula=form, data=dataTrain, family=sm.families.Poisson())
#poisson_fit = poisson_model.fit()
#dir(poisson_fit.mle_settings)
#poisson_fit.use_t
#print(poisson_fit.summary())
#
# Generalized Linear Model Regression Results
#==============================================================================
#Dep. Variable: month_cnt No. Observations: 921400
#Model: GLM Df Residuals: 921392
#Model Family: Poisson Df Model: 7
#Link Function: log Scale: 1.0000
#Method: IRLS Log-Likelihood:* -inf
#Date: Fri, 15 Nov 2019 Deviance: * 8.7344e+05
#Time: 18:15:41 Pearson chi2: 3.83e+06
#No. Iterations: 7 *non-defined for Poisson family
#Covariance Type: nonrobust * non defined for scale=1
#==================================================================================
# coef std err z P>|z| [0.025 0.975]
#----------------------------------------------------------------------------------
#Intercept 0.5517 0.003 163.637 0.000 0.545 0.558
#date_block_num 0.0013 0.000 10.540 0.000 0.001 0.002
#item_id -9.174e-06 1.23e-07 -74.511 0.000 -9.41e-06 -8.93e-06
#shop_id -0.0012 4.26e-05 -27.026 0.000 -0.001 -0.001
#item_freq 0.1936 8.63e-05 2244.772 0.000 0.193 0.194
#category_id -0.0055 4.5e-05 -123.243 0.000 -0.006 -0.005
#month 0.0017 0.000 7.667 0.000 0.001 0.002
#item_price 1.289e-05 3.19e-07 40.347 0.000 1.23e-05 1.35e-05
#==================================================================================
# item_id, category_id have small weight
# Generalized Linear Model Regression Results
#==============================================================================
#Dep. Variable: month_cnt No. Observations: 921400
#Model: GLM Df Residuals: 921395
#Model Family: Poisson Df Model: 4
#Link Function: log Scale: 1.0000
#Method: IRLS Log-Likelihood: -inf
#Date: Fri, 15 Nov 2019 Deviance: 9.1019e+05
#Time: 18:40:30 Pearson chi2: 3.78e+06
#No. Iterations: 7
#Covariance Type: nonrobust
#==================================================================================
# coef std err z P>|z| [0.025 0.975]
#----------------------------------------------------------------------------------
#Intercept 0.2137 0.003 81.395 0.000 0.209 0.219
#date_block_num 0.0004 0.000 3.000 0.003 0.000 0.001
#item_freq 0.1881 8.02e-05 2346.055 0.000 0.188 0.188
#month 0.0024 0.000 11.216 0.000 0.002 0.003
#item_price 2.899e-05 2.82e-07 102.951 0.000 2.84e-05 2.95e-05
#==================================================================================
# item_freq is obviously the larger coeff
filePath="working_data/"+"1C_ctrl_training.csv"
data2=pd.read_csv(filePath, index_col=False)
data2=data2.drop(["Unnamed: 0",'Unnamed: 0.1', 'Unnamed: 0.1.1'],axis=1)
data2.keys()
data2.head()
dataCtrl=ct.my_prepareTrain(data2)
dataCtrl.keys()
dataCtrlHM=ct.my_summaryHistoricFunc(dataCtrl,f_mean=True,f_sum=False) #takes almost 10 minutes
#dataCtrl=pd.get_dummies(dataCtrl)
dataCtrl.reset_index()
dataCtrlHM.reset_index()
C=pd.merge(dataCtrl,dataCtrlHM,how="left",on=["date_block_num","item_id","shop_id"])
#target=dataCtrl["month_cnt"]
#dataCtrl=dataCtrl.drop("month_cnt",axis=1)
#predictions=poisson_fit.predict(exog=dataCtrl, transform=True)
#err=abs(target-predictions)
#err.plot()
#err.mean()
#err.max()
#err.min()
#
#rmse=my_rmse(target,predictions) #15.141159663472205
## not that bad...i should see the mean, std of the counts
#poisson_fit.params
#poisson_fit
dataTrainHM=ct.my_summaryHistoricFunc(dataTrain,f_mean=True,f_sum=False) #15:54-15:09
#dataTrainHM=ct.my_summaryHistoMean(dataTrain) #takes almost 10 minutes
#dataTrain=pd.get_dummies(dataTrain)
dataTrain.reset_index()
dataTrainHM.reset_index()
D=pd.merge(dataTrain,dataTrainHM,how="left",on=["date_block_num","item_id","shop_id"])
#D=D.drop("histo_f_cnt",axis=1)
CC=D.corr()
CC["month_cnt"]
sum(abs(CC.values)>0.4)
#models_param=[["month_cnt ~ date_block_num + item_freq + month + item_price","GLM","poisson"],
# ["month_cnt ~ date_block_num + item_id + shop_id + item_freq + category_id + month + item_price","GLM","poisson"]
# ]
models_param=[[D.keys(),"GLM","poisson"]]#,[D.keys(),"GLM","poisson"]
i=0
modelRes=pd.DataFrame(columns=["model","formula","family","aic",
"scale","log-likel","deviance","chi2",
"mean_err_perc","sign_pval_perc",
"rmse_in","rmse_out","acc_in","acc_out"])
for i in range(0,len(models_param)):
aux=ct.my_compareFitModels(D,models_param[i][0],models_param[i][1],models_param[i][2],C)
modelRes=modelRes.append(aux,sort=False).reset_index() #18:1018:13
modelRes.iloc[0:1,0:11]
[y,X]=ct.my_df2arry_endo_exog(D,"month_cnt")
model = sm.GLM(y,X, family=sm.families.Poisson())
fitModel=model.fit(method='nm', maxiter=100, maxfun=100)#18:15-18:16
predictions=fitModel.predict(exog=X, transform=True)
err=abs(y-predictions)
acc=100*(len([e for e in err if e<1])/len(err)) # <1:53,74% <2: 88,28%
acc
err.mean()
#rmse_in=ct.my_calculateAccuracy(dataTrain,"month_cnt",fitModel)
#rmse_out=ct.my_calculateAccuracy(dataTest,"month_cnt",fitModel)
import my_functions_1c as ct
fitModel.summary()
#Dep. Variable: y No. Observations: 921400
#Model: GLM Df Residuals: 921393
#Model Family: Poisson Df Model: 6
#Link Function: log Scale: 1.0000
#Method: nm Log-Likelihood: -inf
#Date: Sat, 30 Nov 2019 Deviance: 1.0246e+07
#Time: 18:24:07 Pearson chi2: 2.08e+07
#No. Iterations: 556
#Covariance Type: nonrobust
#==============================================================================
# coef std err z P>|z| [0.025 0.975]
#------------------------------------------------------------------------------
#x1 0.0211 8.18e-05 258.570 0.000 0.021 0.021
#x2 -1.245e-05 9.22e-08 -134.923 0.000 -1.26e-05 -1.23e-05
#x3 0.0075 3.68e-05 204.837 0.000 0.007 0.008
#x4 -0.0013 3.61e-05 -35.865 0.000 -0.001 -0.001
#x5 0.0181 0.000 97.250 0.000 0.018 0.018
#x6 4.68e-05 2.6e-07 180.244 0.000 4.63e-05 4.73e-05
#x7 0.0112 1.04e-06 1.07e+04 0.000 0.011 0.011
#==============================================================================
[y,X]=ct.my_df2arry_endo_exog(D,"month_cnt")
rfModel=RandomForestRegressor(n_estimators=500,max_depth=10,random_state=18)
rfFit=rfModel.fit(X,y) #17:09-17:26
pred=rfFit.predict(X) #17:26-17:27
err=abs(y-pred)
err2=y-pred
np.mean(err) #1.3330819427844776
np.max(err) #1166.5251575783172
np.min(err) #e-05
100*(len([e for e in err2 if e>0])/len(err2)) #25.951378337312786
100*(len([e for e in err2 if e<0])/len(err2)) #74.04862166268722
100*(len([e for e in err if e<1])/len(err)) #69.638, tuning: 72.93043195137834
100*(len([e for e in err if e<2])/len(err)) # n_estimators=500 :88.0572
np.mean([e for e in err2 if e<0]) #-0.89704447
dataCtrlHM=ct.my_summaryHistoricFunc(dataCtrl,f_mean=True,f_sum=True) #takes almost 10 minutes
#dataCtrl=pd.get_dummies(dataCtrl)
dataCtrl.reset_index()
dataCtrlHM.reset_index()
C=pd.merge(dataCtrl,dataCtrlHM,how="left",on=["date_block_num","item_id","shop_id"])
[y_c,X_c]=ct.my_df2arry_endo_exog(C,"month_cnt")
rfFit_c=rfModel.fit(X_c,y_c)
pred_c=rfFit_c.predict(X_c)
err_c=abs(y_c-pred_c)
err2_c=y_c-pred_c
np.mean(err_c) #1.3580242780446712
np.max(err_c) # 442.8908487407861
np.min(err_c) # e-05
100*(len([e for e in err2_c if e>0])/len(err2_c)) #24.33243568640908
100*(len([e for e in err2_c if e<0])/len(err2_c)) # 75.66756431359092
100*(len([e for e in err_c if e<1])/len(err_c)) # 68.2189366664534
np.mean([e for e in err2_c if e<0]) #-0.89704447
sns.set(rc={'figure.figsize':(12,6)})
fig, ax1 = plt.subplots(2, 3)
sns.regplot(x=y,y=pred,ax=ax1[0,0])
ax1[0,0].set_title("[train] x: actual | y: predicted")
sns.regplot(y,err,ax=ax1[0,1])
ax1[0,1].set_title("[train] x: actual | y: abs(act-pred)")
sns.regplot(y,err2,ax=ax1[0,2])
ax1[0,2].set_title("[train] x: actual | y: act-pred")
sns.regplot(y_c,pred_c,ax=ax1[1,0])
ax1[1,0].set_title("[control] x: actual | y: predicted")
sns.regplot(y_c,err_c,ax=ax1[1,1])
ax1[1,1].set_title("[control] x: actual | y: abs(act-pred)")
sns.regplot(y_c,err2_c,ax=ax1[1,2])
ax1[1,2].set_title("[control] x: actual | y: act-pred")
fig
######### plot tree
from sklearn.tree import export_graphviz
import pydot
featureNames=[col for col in dataTrain.columns if col != "month_cnt"]
tree=rfModel.estimators_[1]
export_graphviz(tree,out_file="tree.dot",rounded=True,precision=1,
feature_names=featureNames)
(graph,)=pydot.graph_from_dot_file("tree.dot")
graph.write_png("tree.png")
featureNames=[col for col in D.columns if col != "month_cnt"]
featImp=list(rfModel.feature_importances_)
feat_imp=[(feat,round(imp,2)) for (feat,imp) in zip(featureNames,featImp)]
feat_imp
# linear regression actual vs err (non absolute)
from sklearn.linear_model import LinearRegression
model_pred = LinearRegression().fit(y.reshape(-1,1), pred)
model_pred.coef_ #0.67778392 with pred
model_err = LinearRegression().fit(y.reshape(-1,1), err2)
model_err.coef_ #0.32221608 with err
#model.intercept_ #0.47178088839165866 with err
r2p = model_pred.score(y.reshape(-1,1), pred) # 0.7592726478685866
r2e = model_err.score(y.reshape(-1,1), err2) #0.41616991455938823
#df=ct.my_summaryHistoMean(dataTrain,)
#
#aa=ct.my_historicMean(dataTrain,"month_cnt")
import my_functions_1c as ct
aaa=ct.my_historicMean(dataTrain,14,"month_cnt",replaceNaN=True)
aa=ct.my_historicMean(dataTrain,12,"month_cnt",replaceNaN=True)
DD=pd.DataFrame({"date_block_num":[1,1,1,1,2,2,2,2,3,3,3,3],
"item_id":[10,15,20,25,10,30,35,40,10,20,30,55],
"shop_id":[200,200,203,203,200,210,212,212,200,203,210,230],
"month_cnt":[1,2,3,4,2,4,6,8,3,6,9,12]})
bb=ct.my_summaryHistoMean(DD)
bb
bb2=ct.my_historicMean(DD,3,"month_cnt")
bb2=ct.my_historicSum(DD,3,"month_cnt")
bb2
#item_shop=DD[DD["date_block_num"]==1][["item_id","shop_id"]].values.tolist()
#DDg=DD.groupby(["item_id","shop_id"])
#DD2=pd.concat(DDg.get_group(tuple(g)) for g in item_shop)
#DD[DD[["item_id","shop_id"]]==item_shop[0]][["item_id","shop_id"]]
print(DD)
####################### eliminate item_freq
#dataTrain2=dataTrain.drop("item_freq",axis=1)
#dataTrain2.keys()
#rfModel2=RandomForestRegressor(n_estimators=1000,max_depth=10,random_state=18)
#[y2,X2]=ct.my_df2arry_endo_exog(dataTrain2,"month_cnt")
#
#
#rfFit2=rfModel2.fit(X2,y2)
#pred2=rfFit2.predict(X2)
#err2=abs(y2-pred2)
#err22=y2-pred2
#np.mean(err2) #1.3341
#np.max(err2) #1212.77
#np.min(err2) #0.000115
#100*(len([e for e in err22 if e>0])/len(err22)) #25.72747992185804
#100*(len([e for e in err22 if e<0])/len(err22)) #74.27252007814195
#100*(len([e for e in err2 if e<1])/len(err2)) #69.00672889081832
#
#
#dataCtrl2=dataCtrl.drop("item_freq",axis=1)
#[y2_c,X2_c]=ct.my_df2arry_endo_exog(dataCtrl2,"month_cnt")
#rfFit2_c=rfModel2.fit(X2_c,y2_c)
#pred2_c=rfFit2_c.predict(X2_c)
#err2_c=abs(y2_c-pred2_c)
#err22_c=y2_c-pred2_c
#np.mean(err2_c) #1.3543138339118084
#np.max(err2_c) #444.1032
#np.min(err2_c) #1.4240848584812227e-06
#100*(len([e for e in err22_c if e>0])/len(err22_c)) #24.342905002588246
#100*(len([e for e in err22_c if e<0])/len(err22_c)) #75.65709499741176
#100*(len([e for e in err2_c if e<1])/len(err2_c)) # 68.27390057639403
#np.mean([e for e in err2_c if e<0])
#
#sns.set(rc={'figure.figsize':(12,6)})
#fig, ax1 = plt.subplots(2, 3)
#sns.regplot(x=y2,y=pred2,ax=ax1[0,0])
#ax1[0,0].set_title("[train] x: actual | y: predicted")
#sns.regplot(y2,err2,ax=ax1[0,1])
#ax1[0,1].set_title("[train] x: actual | y: abs(act-pred)")
#sns.regplot(y2,err22,ax=ax1[0,2])
#ax1[0,2].set_title("[train] x: actual | y: act-pred")
#
#sns.regplot(y2_c,pred2_c,ax=ax1[1,0])
#ax1[1,0].set_title("[control] x: actual | y: predicted")
#sns.regplot(y2_c,err2_c,ax=ax1[1,1])
#ax1[1,1].set_title("[control] x: actual | y: abs(act-pred)")
#sns.regplot(y2_c,err22_c,ax=ax1[1,2])
#ax1[1,2].set_title("[control] x: actual | y: act-pred")
#
#fig
#
#from sklearn.linear_model import LinearRegression
#model = LinearRegression().fit(y2.reshape(-1,1), err2)
#model.coef_ #0.63934656 with pred
#model.coef_ #0.381 with err
#model.intercept_ #0.47178088839165866 with err
#r2 = model.score(y2.reshape(-1,1), pred2) # 0.7181413899627395
#r2 = model.score(y2.reshape(-1,1), err2) #0.5377096163365533
########## plot tree
#from sklearn.tree import export_graphviz
#import pydot
#featureNames2=[col for col in dataTrain2.columns if col != "month_cnt"]
tree=rfModel.estimators_[1]
export_graphviz(tree,out_file="tree.dot",rounded=True,precision=1,
feature_names=featureNames)
(graph,)=pydot.graph_from_dot_file("tree.dot")
graph.write_png("tree.png")
featImp2=list(rfModel2.feature_importances_)
feat_imp2=[(feat,round(imp,2)) for (feat,imp) in zip(featureNames2,featImp2)]
df=pd.get_dummies(dataTrain)
| G.add_node("%s" % vertex)
# leng+=1
for edge in edges:
G.add_node("%s" % edge)
G.add_edge("%s" % vertex, "%s" % edge, weight = leng) | conditional_block |
lib.rs | use http::{
Method,
StatusCode,
Uri,
Version,
};
use http::header::{
HeaderMap,
HeaderName,
HeaderValue,
InvalidHeaderName,
InvalidHeaderValue,
};
use http::method::InvalidMethod;
use http::uri::InvalidUriBytes;
use lazy_static::lazy_static;
use regex::bytes::Regex;
use std::io;
use std::io::{BufRead, BufWriter, Read, Write};
pub mod media_type;
static QUOTED_STRING_1G: &str =
r#""([\t !#-\[\]-~\x80-\xFF]|\\[\t !-~\x80-\xFF])*""#;
static TOKEN: &str = r"[!#$%&'*+.^_`|~0-9A-Za-z-]+";
#[derive(Debug)] | pub struct RequestHeader {
pub method: Method,
pub uri: Uri,
pub version: Version,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub struct ResponseHeader {
pub status_code: StatusCode,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub enum InvalidRequestHeader {
Format,
RequestLine(InvalidRequestLine),
HeaderField(InvalidHeaderField),
Io(io::Error),
}
impl From<InvalidRequestLine> for InvalidRequestHeader {
fn from(e: InvalidRequestLine) -> Self {
InvalidRequestHeader::RequestLine(e)
}
}
impl From<InvalidHeaderField> for InvalidRequestHeader {
fn from(e: InvalidHeaderField) -> Self {
InvalidRequestHeader::HeaderField(e)
}
}
impl From<io::Error> for InvalidRequestHeader {
fn from(e: io::Error) -> Self {
InvalidRequestHeader::Io(e)
}
}
const LINE_CAP: usize = 16384;
pub fn parse_request_header<B: BufRead>(mut stream: B)
-> Result<RequestHeader, InvalidRequestHeader>
{
// TODO: Why does removing the type from `line` here cause errors?
let next_line = |stream: &mut B, line: &mut Vec<u8>| {
line.clear();
let count = stream
.take(LINE_CAP as u64)
.read_until('\n' as u8, line)?;
match count {
0 => Err(InvalidRequestHeader::Format), // FIXME?
LINE_CAP => Err(InvalidRequestHeader::Format), // FIXME
_ => Ok(()),
}
};
let mut line = Vec::with_capacity(LINE_CAP);
next_line(&mut stream, &mut line)?;
if !line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
let (method, uri, version) = parse_request_line(&line[..])?;
let mut fields = HeaderMap::new();
loop {
next_line(&mut stream, &mut line)?;
if !line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
if line == b"" {
return Ok(RequestHeader { method, uri, version, fields });
}
let (name, value) = parse_header_field(&line)?;
// TODO: append is okay, right? No syntax issues because we haven't
// seralized anything yet.
fields.append(name, value); // TODO: we should care about result, right?
}
}
pub fn write_response_header<W: Write>(header: &ResponseHeader, stream: W)
-> io::Result<()>
{
let mut stream = BufWriter::new(stream);
// TODO: Is this the way you're supposed to format bytes?
stream.write_all(b"HTTP/1.1")?;
stream.write_all(b" ")?;
stream.write_all(header.status_code.as_str().as_bytes())?;
stream.write_all(b" ")?;
stream.write_all(
header
.status_code
.canonical_reason()
.unwrap_or("Unknown Reason")
.as_bytes()
)?;
stream.write_all(b"\r\n")?;
for key in header.fields.keys() {
let mut values = header.fields.get_all(key).into_iter().peekable();
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
match values.next() {
Some(v) => stream.write_all(v.as_bytes())?,
None => panic!("what?"),
}
if values.peek().is_some() {
let separate_fields = key == "set-cookie";
for v in values {
if separate_fields {
stream.write_all(b"\r\n")?;
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
} else {
stream.write_all(b",")?;
}
stream.write_all(v.as_bytes())?;
}
}
stream.write_all(b"\r\n")?;
}
stream.write_all(b"\r\n")?;
Ok(())
}
#[derive(Debug)]
pub enum InvalidRequestLine {
Format,
Method(InvalidMethod),
Uri(InvalidUriBytes),
Version,
}
impl From<InvalidMethod> for InvalidRequestLine {
fn from(e: InvalidMethod) -> Self {
InvalidRequestLine::Method(e)
}
}
impl From<InvalidUriBytes> for InvalidRequestLine {
fn from(e: InvalidUriBytes) -> Self {
InvalidRequestLine::Uri(e)
}
}
pub fn parse_request_line(s: &[u8])
-> Result<(Method, Uri, Version), InvalidRequestLine>
{
lazy_static! {
static ref R: Regex = Regex::new(
// method SP request-target SP HTTP-version
r"(?-u)^(\S+) (\S+) (\S+)$"
).unwrap();
}
let cap = R.captures(s).ok_or(InvalidRequestLine::Format)?;
Ok((
Method::from_bytes(&cap[1])?,
Uri::from_shared(cap[2].into())?,
match &cap[3] {
// rfc 7230 section A: "Any server that implements name-based
// virtual hosts ought to disable support for HTTP/0.9."
b"HTTP/1.0" => Version::HTTP_10,
b"HTTP/1.1" => Version::HTTP_11,
// We don't support HTTP 0.9 or 2.0. 2.0 support may be added later.
// FIXME: Can we respond to an invalid version with 505 HTTP
// Version Not Supported? If not, unsupported major versions need a
// different error than invalid versions.
// FIXME: We should probably accept requests with version 1.2 and
// higher. Check the spec.
_ => return Err(InvalidRequestLine::Version),
},
))
}
#[derive(Debug)]
pub enum InvalidHeaderField {
Format,
Name(InvalidHeaderName),
Value(InvalidHeaderValue),
}
impl From<InvalidHeaderName> for InvalidHeaderField {
fn from(e: InvalidHeaderName) -> Self {
InvalidHeaderField::Name(e)
}
}
impl From<InvalidHeaderValue> for InvalidHeaderField {
fn from(e: InvalidHeaderValue) -> Self {
InvalidHeaderField::Value(e)
}
}
pub fn parse_header_field(s: &[u8])
-> Result<(HeaderName, HeaderValue), InvalidHeaderField>
{
// TODO: support obs-fold e.g. within message/http
// (see rfc7230 section 3.2.4)
// rfc7230 section 3.2.4: Server MUST return 400 if there's whitespace
// between field name and colon.
// rfc7230 section 3.2.4: If obs-fold is used outside a message/http body,
// server MUST either return 400 or replace each such obs-fold with one or
// more SP chars.
lazy_static! {
static ref R: Regex = Regex::new(&(String::new()
// token ":" OWS *field-content OWS
+ r"(?-u)^(" + TOKEN + "):"
+ r"[\t ]*"
+ r"([!-~\x80-\xFF]([\t !-~\x80-\xFF]*[!-~\x80-\xFF])?)"
+ r"[\t ]*$"
)).unwrap();
}
let cap = R.captures(s).ok_or(InvalidHeaderField::Format)?;
Ok((
HeaderName::from_bytes(&cap[1])?,
// TODO: HeaderValue might not fully validate input.
HeaderValue::from_bytes(&cap[2])?,
))
}
#[cfg(test)]
mod test {
use crate::{
parse_request_header,
parse_request_line,
parse_header_field,
ResponseHeader,
write_response_header,
};
use http::header::{
HeaderMap,
HeaderValue,
};
use http::{
Method,
StatusCode,
Version,
};
#[test]
fn test_parse_request_header() {
let mut s = Vec::new();
// TODO: There's a better way to do this, right?
s.extend(
&b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.1\r\n"[..]
);
s.extend(&b"Host: foo.example.com\r\n"[..]);
s.extend(&b"Content-Type: application/json\r\n"[..]);
s.extend(&b"\r\n"[..]);
let h = parse_request_header(&s[..]).unwrap();
assert_eq!(h.method, Method::POST);
assert_eq!(h.uri.scheme_str().unwrap(), "http");
assert_eq!(h.uri.host().unwrap(), "foo.example.com");
assert_eq!(h.uri.port_part(), None);
assert_eq!(h.uri.path(), "/bar");
assert_eq!(h.uri.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(h.version, Version::HTTP_11);
assert_eq!(h.fields["host"], "foo.example.com");
assert_eq!(h.fields["content-type"], "application/json");
}
#[test]
fn test_write_response_header() {
let mut s = Vec::new();
let mut h = ResponseHeader {
status_code: StatusCode::from_u16(404).unwrap(),
fields: HeaderMap::new(),
};
write_response_header(&h, &mut s).unwrap();
assert_eq!(s, b"HTTP/1.1 404 Not Found\r\n\r\n");
h.fields.append("set-cookie", HeaderValue::from_static(
"FOO=\"some text\""
));
h.fields.append("Set-cookie", HeaderValue::from_static(
"BAR=\"some other text\""
));
h.fields.append("LOCATION", HeaderValue::from_static(
"http://example.com:3180/foo&bar"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"en"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"de"
));
s.clear();
write_response_header(&h, &mut s).unwrap();
assert!(s.starts_with(b"HTTP/1.1 404 Not Found\r\n"));
}
#[test]
fn test_parse_request_line() {
let s = b"OPTIONS * HTTP/1.1";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::OPTIONS);
assert_eq!(u.path(), "*");
assert_eq!(v, Version::HTTP_11);
let s = b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.0";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::POST);
assert_eq!(u.scheme_str().unwrap(), "http");
assert_eq!(u.host().unwrap(), "foo.example.com");
assert_eq!(u.port_part(), None);
assert_eq!(u.path(), "/bar");
assert_eq!(u.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(v, Version::HTTP_10);
}
#[test]
fn test_parse_header_field() {
let s = b"Content-Type: application/json; charset=\"\xAA\xBB\xCC\"";
let (h, v) = parse_header_field(s).unwrap();
assert_eq!(
h,
http::header::CONTENT_TYPE,
);
assert_eq!(
v,
HeaderValue::from_bytes(
&b"application/json; charset=\"\xAA\xBB\xCC\""[..]
).unwrap(),
);
}
} | random_line_split | |
lib.rs | use http::{
Method,
StatusCode,
Uri,
Version,
};
use http::header::{
HeaderMap,
HeaderName,
HeaderValue,
InvalidHeaderName,
InvalidHeaderValue,
};
use http::method::InvalidMethod;
use http::uri::InvalidUriBytes;
use lazy_static::lazy_static;
use regex::bytes::Regex;
use std::io;
use std::io::{BufRead, BufWriter, Read, Write};
pub mod media_type;
static QUOTED_STRING_1G: &str =
r#""([\t !#-\[\]-~\x80-\xFF]|\\[\t !-~\x80-\xFF])*""#;
static TOKEN: &str = r"[!#$%&'*+.^_`|~0-9A-Za-z-]+";
#[derive(Debug)]
pub struct RequestHeader {
pub method: Method,
pub uri: Uri,
pub version: Version,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub struct ResponseHeader {
pub status_code: StatusCode,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub enum InvalidRequestHeader {
Format,
RequestLine(InvalidRequestLine),
HeaderField(InvalidHeaderField),
Io(io::Error),
}
impl From<InvalidRequestLine> for InvalidRequestHeader {
fn from(e: InvalidRequestLine) -> Self {
InvalidRequestHeader::RequestLine(e)
}
}
impl From<InvalidHeaderField> for InvalidRequestHeader {
fn from(e: InvalidHeaderField) -> Self {
InvalidRequestHeader::HeaderField(e)
}
}
impl From<io::Error> for InvalidRequestHeader {
fn from(e: io::Error) -> Self {
InvalidRequestHeader::Io(e)
}
}
const LINE_CAP: usize = 16384;
pub fn parse_request_header<B: BufRead>(mut stream: B)
-> Result<RequestHeader, InvalidRequestHeader>
{
// TODO: Why does removing the type from `line` here cause errors?
let next_line = |stream: &mut B, line: &mut Vec<u8>| {
line.clear();
let count = stream
.take(LINE_CAP as u64)
.read_until('\n' as u8, line)?;
match count {
0 => Err(InvalidRequestHeader::Format), // FIXME?
LINE_CAP => Err(InvalidRequestHeader::Format), // FIXME
_ => Ok(()),
}
};
let mut line = Vec::with_capacity(LINE_CAP);
next_line(&mut stream, &mut line)?;
if !line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
let (method, uri, version) = parse_request_line(&line[..])?;
let mut fields = HeaderMap::new();
loop {
next_line(&mut stream, &mut line)?;
if !line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
if line == b"" {
return Ok(RequestHeader { method, uri, version, fields });
}
let (name, value) = parse_header_field(&line)?;
// TODO: append is okay, right? No syntax issues because we haven't
// seralized anything yet.
fields.append(name, value); // TODO: we should care about result, right?
}
}
pub fn write_response_header<W: Write>(header: &ResponseHeader, stream: W)
-> io::Result<()>
{
let mut stream = BufWriter::new(stream);
// TODO: Is this the way you're supposed to format bytes?
stream.write_all(b"HTTP/1.1")?;
stream.write_all(b" ")?;
stream.write_all(header.status_code.as_str().as_bytes())?;
stream.write_all(b" ")?;
stream.write_all(
header
.status_code
.canonical_reason()
.unwrap_or("Unknown Reason")
.as_bytes()
)?;
stream.write_all(b"\r\n")?;
for key in header.fields.keys() {
let mut values = header.fields.get_all(key).into_iter().peekable();
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
match values.next() {
Some(v) => stream.write_all(v.as_bytes())?,
None => panic!("what?"),
}
if values.peek().is_some() {
let separate_fields = key == "set-cookie";
for v in values {
if separate_fields {
stream.write_all(b"\r\n")?;
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
} else {
stream.write_all(b",")?;
}
stream.write_all(v.as_bytes())?;
}
}
stream.write_all(b"\r\n")?;
}
stream.write_all(b"\r\n")?;
Ok(())
}
#[derive(Debug)]
pub enum InvalidRequestLine {
Format,
Method(InvalidMethod),
Uri(InvalidUriBytes),
Version,
}
impl From<InvalidMethod> for InvalidRequestLine {
fn from(e: InvalidMethod) -> Self {
InvalidRequestLine::Method(e)
}
}
impl From<InvalidUriBytes> for InvalidRequestLine {
fn from(e: InvalidUriBytes) -> Self {
InvalidRequestLine::Uri(e)
}
}
pub fn parse_request_line(s: &[u8])
-> Result<(Method, Uri, Version), InvalidRequestLine>
{
lazy_static! {
static ref R: Regex = Regex::new(
// method SP request-target SP HTTP-version
r"(?-u)^(\S+) (\S+) (\S+)$"
).unwrap();
}
let cap = R.captures(s).ok_or(InvalidRequestLine::Format)?;
Ok((
Method::from_bytes(&cap[1])?,
Uri::from_shared(cap[2].into())?,
match &cap[3] {
// rfc 7230 section A: "Any server that implements name-based
// virtual hosts ought to disable support for HTTP/0.9."
b"HTTP/1.0" => Version::HTTP_10,
b"HTTP/1.1" => Version::HTTP_11,
// We don't support HTTP 0.9 or 2.0. 2.0 support may be added later.
// FIXME: Can we respond to an invalid version with 505 HTTP
// Version Not Supported? If not, unsupported major versions need a
// different error than invalid versions.
// FIXME: We should probably accept requests with version 1.2 and
// higher. Check the spec.
_ => return Err(InvalidRequestLine::Version),
},
))
}
#[derive(Debug)]
pub enum InvalidHeaderField {
Format,
Name(InvalidHeaderName),
Value(InvalidHeaderValue),
}
impl From<InvalidHeaderName> for InvalidHeaderField {
fn from(e: InvalidHeaderName) -> Self {
InvalidHeaderField::Name(e)
}
}
impl From<InvalidHeaderValue> for InvalidHeaderField {
fn from(e: InvalidHeaderValue) -> Self {
InvalidHeaderField::Value(e)
}
}
pub fn parse_header_field(s: &[u8])
-> Result<(HeaderName, HeaderValue), InvalidHeaderField>
{
// TODO: support obs-fold e.g. within message/http
// (see rfc7230 section 3.2.4)
// rfc7230 section 3.2.4: Server MUST return 400 if there's whitespace
// between field name and colon.
// rfc7230 section 3.2.4: If obs-fold is used outside a message/http body,
// server MUST either return 400 or replace each such obs-fold with one or
// more SP chars.
lazy_static! {
static ref R: Regex = Regex::new(&(String::new()
// token ":" OWS *field-content OWS
+ r"(?-u)^(" + TOKEN + "):"
+ r"[\t ]*"
+ r"([!-~\x80-\xFF]([\t !-~\x80-\xFF]*[!-~\x80-\xFF])?)"
+ r"[\t ]*$"
)).unwrap();
}
let cap = R.captures(s).ok_or(InvalidHeaderField::Format)?;
Ok((
HeaderName::from_bytes(&cap[1])?,
// TODO: HeaderValue might not fully validate input.
HeaderValue::from_bytes(&cap[2])?,
))
}
#[cfg(test)]
mod test {
use crate::{
parse_request_header,
parse_request_line,
parse_header_field,
ResponseHeader,
write_response_header,
};
use http::header::{
HeaderMap,
HeaderValue,
};
use http::{
Method,
StatusCode,
Version,
};
#[test]
fn test_parse_request_header() {
let mut s = Vec::new();
// TODO: There's a better way to do this, right?
s.extend(
&b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.1\r\n"[..]
);
s.extend(&b"Host: foo.example.com\r\n"[..]);
s.extend(&b"Content-Type: application/json\r\n"[..]);
s.extend(&b"\r\n"[..]);
let h = parse_request_header(&s[..]).unwrap();
assert_eq!(h.method, Method::POST);
assert_eq!(h.uri.scheme_str().unwrap(), "http");
assert_eq!(h.uri.host().unwrap(), "foo.example.com");
assert_eq!(h.uri.port_part(), None);
assert_eq!(h.uri.path(), "/bar");
assert_eq!(h.uri.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(h.version, Version::HTTP_11);
assert_eq!(h.fields["host"], "foo.example.com");
assert_eq!(h.fields["content-type"], "application/json");
}
#[test]
fn test_write_response_header() {
let mut s = Vec::new();
let mut h = ResponseHeader {
status_code: StatusCode::from_u16(404).unwrap(),
fields: HeaderMap::new(),
};
write_response_header(&h, &mut s).unwrap();
assert_eq!(s, b"HTTP/1.1 404 Not Found\r\n\r\n");
h.fields.append("set-cookie", HeaderValue::from_static(
"FOO=\"some text\""
));
h.fields.append("Set-cookie", HeaderValue::from_static(
"BAR=\"some other text\""
));
h.fields.append("LOCATION", HeaderValue::from_static(
"http://example.com:3180/foo&bar"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"en"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"de"
));
s.clear();
write_response_header(&h, &mut s).unwrap();
assert!(s.starts_with(b"HTTP/1.1 404 Not Found\r\n"));
}
#[test]
fn test_parse_request_line() |
#[test]
fn test_parse_header_field() {
let s = b"Content-Type: application/json; charset=\"\xAA\xBB\xCC\"";
let (h, v) = parse_header_field(s).unwrap();
assert_eq!(
h,
http::header::CONTENT_TYPE,
);
assert_eq!(
v,
HeaderValue::from_bytes(
&b"application/json; charset=\"\xAA\xBB\xCC\""[..]
).unwrap(),
);
}
}
| {
let s = b"OPTIONS * HTTP/1.1";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::OPTIONS);
assert_eq!(u.path(), "*");
assert_eq!(v, Version::HTTP_11);
let s = b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.0";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::POST);
assert_eq!(u.scheme_str().unwrap(), "http");
assert_eq!(u.host().unwrap(), "foo.example.com");
assert_eq!(u.port_part(), None);
assert_eq!(u.path(), "/bar");
assert_eq!(u.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(v, Version::HTTP_10);
} | identifier_body |
lib.rs | use http::{
Method,
StatusCode,
Uri,
Version,
};
use http::header::{
HeaderMap,
HeaderName,
HeaderValue,
InvalidHeaderName,
InvalidHeaderValue,
};
use http::method::InvalidMethod;
use http::uri::InvalidUriBytes;
use lazy_static::lazy_static;
use regex::bytes::Regex;
use std::io;
use std::io::{BufRead, BufWriter, Read, Write};
pub mod media_type;
static QUOTED_STRING_1G: &str =
r#""([\t !#-\[\]-~\x80-\xFF]|\\[\t !-~\x80-\xFF])*""#;
static TOKEN: &str = r"[!#$%&'*+.^_`|~0-9A-Za-z-]+";
#[derive(Debug)]
pub struct RequestHeader {
pub method: Method,
pub uri: Uri,
pub version: Version,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub struct ResponseHeader {
pub status_code: StatusCode,
pub fields: HeaderMap,
}
#[derive(Debug)]
pub enum InvalidRequestHeader {
Format,
RequestLine(InvalidRequestLine),
HeaderField(InvalidHeaderField),
Io(io::Error),
}
impl From<InvalidRequestLine> for InvalidRequestHeader {
fn from(e: InvalidRequestLine) -> Self {
InvalidRequestHeader::RequestLine(e)
}
}
impl From<InvalidHeaderField> for InvalidRequestHeader {
fn | (e: InvalidHeaderField) -> Self {
InvalidRequestHeader::HeaderField(e)
}
}
impl From<io::Error> for InvalidRequestHeader {
fn from(e: io::Error) -> Self {
InvalidRequestHeader::Io(e)
}
}
const LINE_CAP: usize = 16384;
pub fn parse_request_header<B: BufRead>(mut stream: B)
-> Result<RequestHeader, InvalidRequestHeader>
{
// TODO: Why does removing the type from `line` here cause errors?
let next_line = |stream: &mut B, line: &mut Vec<u8>| {
line.clear();
let count = stream
.take(LINE_CAP as u64)
.read_until('\n' as u8, line)?;
match count {
0 => Err(InvalidRequestHeader::Format), // FIXME?
LINE_CAP => Err(InvalidRequestHeader::Format), // FIXME
_ => Ok(()),
}
};
let mut line = Vec::with_capacity(LINE_CAP);
next_line(&mut stream, &mut line)?;
if !line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
let (method, uri, version) = parse_request_line(&line[..])?;
let mut fields = HeaderMap::new();
loop {
next_line(&mut stream, &mut line)?;
if !line.ends_with(b"\r\n") {
return Err(InvalidRequestHeader::Format);
}
line.truncate(line.len() - 2);
if line == b"" {
return Ok(RequestHeader { method, uri, version, fields });
}
let (name, value) = parse_header_field(&line)?;
// TODO: append is okay, right? No syntax issues because we haven't
// seralized anything yet.
fields.append(name, value); // TODO: we should care about result, right?
}
}
pub fn write_response_header<W: Write>(header: &ResponseHeader, stream: W)
-> io::Result<()>
{
let mut stream = BufWriter::new(stream);
// TODO: Is this the way you're supposed to format bytes?
stream.write_all(b"HTTP/1.1")?;
stream.write_all(b" ")?;
stream.write_all(header.status_code.as_str().as_bytes())?;
stream.write_all(b" ")?;
stream.write_all(
header
.status_code
.canonical_reason()
.unwrap_or("Unknown Reason")
.as_bytes()
)?;
stream.write_all(b"\r\n")?;
for key in header.fields.keys() {
let mut values = header.fields.get_all(key).into_iter().peekable();
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
match values.next() {
Some(v) => stream.write_all(v.as_bytes())?,
None => panic!("what?"),
}
if values.peek().is_some() {
let separate_fields = key == "set-cookie";
for v in values {
if separate_fields {
stream.write_all(b"\r\n")?;
stream.write_all(key.as_str().as_bytes())?;
stream.write_all(b": ")?;
} else {
stream.write_all(b",")?;
}
stream.write_all(v.as_bytes())?;
}
}
stream.write_all(b"\r\n")?;
}
stream.write_all(b"\r\n")?;
Ok(())
}
#[derive(Debug)]
pub enum InvalidRequestLine {
Format,
Method(InvalidMethod),
Uri(InvalidUriBytes),
Version,
}
impl From<InvalidMethod> for InvalidRequestLine {
fn from(e: InvalidMethod) -> Self {
InvalidRequestLine::Method(e)
}
}
impl From<InvalidUriBytes> for InvalidRequestLine {
fn from(e: InvalidUriBytes) -> Self {
InvalidRequestLine::Uri(e)
}
}
pub fn parse_request_line(s: &[u8])
-> Result<(Method, Uri, Version), InvalidRequestLine>
{
lazy_static! {
static ref R: Regex = Regex::new(
// method SP request-target SP HTTP-version
r"(?-u)^(\S+) (\S+) (\S+)$"
).unwrap();
}
let cap = R.captures(s).ok_or(InvalidRequestLine::Format)?;
Ok((
Method::from_bytes(&cap[1])?,
Uri::from_shared(cap[2].into())?,
match &cap[3] {
// rfc 7230 section A: "Any server that implements name-based
// virtual hosts ought to disable support for HTTP/0.9."
b"HTTP/1.0" => Version::HTTP_10,
b"HTTP/1.1" => Version::HTTP_11,
// We don't support HTTP 0.9 or 2.0. 2.0 support may be added later.
// FIXME: Can we respond to an invalid version with 505 HTTP
// Version Not Supported? If not, unsupported major versions need a
// different error than invalid versions.
// FIXME: We should probably accept requests with version 1.2 and
// higher. Check the spec.
_ => return Err(InvalidRequestLine::Version),
},
))
}
#[derive(Debug)]
pub enum InvalidHeaderField {
Format,
Name(InvalidHeaderName),
Value(InvalidHeaderValue),
}
impl From<InvalidHeaderName> for InvalidHeaderField {
fn from(e: InvalidHeaderName) -> Self {
InvalidHeaderField::Name(e)
}
}
impl From<InvalidHeaderValue> for InvalidHeaderField {
fn from(e: InvalidHeaderValue) -> Self {
InvalidHeaderField::Value(e)
}
}
pub fn parse_header_field(s: &[u8])
-> Result<(HeaderName, HeaderValue), InvalidHeaderField>
{
// TODO: support obs-fold e.g. within message/http
// (see rfc7230 section 3.2.4)
// rfc7230 section 3.2.4: Server MUST return 400 if there's whitespace
// between field name and colon.
// rfc7230 section 3.2.4: If obs-fold is used outside a message/http body,
// server MUST either return 400 or replace each such obs-fold with one or
// more SP chars.
lazy_static! {
static ref R: Regex = Regex::new(&(String::new()
// token ":" OWS *field-content OWS
+ r"(?-u)^(" + TOKEN + "):"
+ r"[\t ]*"
+ r"([!-~\x80-\xFF]([\t !-~\x80-\xFF]*[!-~\x80-\xFF])?)"
+ r"[\t ]*$"
)).unwrap();
}
let cap = R.captures(s).ok_or(InvalidHeaderField::Format)?;
Ok((
HeaderName::from_bytes(&cap[1])?,
// TODO: HeaderValue might not fully validate input.
HeaderValue::from_bytes(&cap[2])?,
))
}
#[cfg(test)]
mod test {
use crate::{
parse_request_header,
parse_request_line,
parse_header_field,
ResponseHeader,
write_response_header,
};
use http::header::{
HeaderMap,
HeaderValue,
};
use http::{
Method,
StatusCode,
Version,
};
#[test]
fn test_parse_request_header() {
let mut s = Vec::new();
// TODO: There's a better way to do this, right?
s.extend(
&b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.1\r\n"[..]
);
s.extend(&b"Host: foo.example.com\r\n"[..]);
s.extend(&b"Content-Type: application/json\r\n"[..]);
s.extend(&b"\r\n"[..]);
let h = parse_request_header(&s[..]).unwrap();
assert_eq!(h.method, Method::POST);
assert_eq!(h.uri.scheme_str().unwrap(), "http");
assert_eq!(h.uri.host().unwrap(), "foo.example.com");
assert_eq!(h.uri.port_part(), None);
assert_eq!(h.uri.path(), "/bar");
assert_eq!(h.uri.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(h.version, Version::HTTP_11);
assert_eq!(h.fields["host"], "foo.example.com");
assert_eq!(h.fields["content-type"], "application/json");
}
#[test]
fn test_write_response_header() {
let mut s = Vec::new();
let mut h = ResponseHeader {
status_code: StatusCode::from_u16(404).unwrap(),
fields: HeaderMap::new(),
};
write_response_header(&h, &mut s).unwrap();
assert_eq!(s, b"HTTP/1.1 404 Not Found\r\n\r\n");
h.fields.append("set-cookie", HeaderValue::from_static(
"FOO=\"some text\""
));
h.fields.append("Set-cookie", HeaderValue::from_static(
"BAR=\"some other text\""
));
h.fields.append("LOCATION", HeaderValue::from_static(
"http://example.com:3180/foo&bar"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"en"
));
h.fields.append("Content-Language", HeaderValue::from_static(
"de"
));
s.clear();
write_response_header(&h, &mut s).unwrap();
assert!(s.starts_with(b"HTTP/1.1 404 Not Found\r\n"));
}
#[test]
fn test_parse_request_line() {
let s = b"OPTIONS * HTTP/1.1";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::OPTIONS);
assert_eq!(u.path(), "*");
assert_eq!(v, Version::HTTP_11);
let s = b"POST http://foo.example.com/bar?qux=19&qux=xyz HTTP/1.0";
let (m, u, v) = parse_request_line(s).unwrap();
assert_eq!(m, Method::POST);
assert_eq!(u.scheme_str().unwrap(), "http");
assert_eq!(u.host().unwrap(), "foo.example.com");
assert_eq!(u.port_part(), None);
assert_eq!(u.path(), "/bar");
assert_eq!(u.query().unwrap(), "qux=19&qux=xyz");
assert_eq!(v, Version::HTTP_10);
}
#[test]
fn test_parse_header_field() {
let s = b"Content-Type: application/json; charset=\"\xAA\xBB\xCC\"";
let (h, v) = parse_header_field(s).unwrap();
assert_eq!(
h,
http::header::CONTENT_TYPE,
);
assert_eq!(
v,
HeaderValue::from_bytes(
&b"application/json; charset=\"\xAA\xBB\xCC\""[..]
).unwrap(),
);
}
}
| from | identifier_name |
api_export_tools.py | # -*- coding: utf-8 -*-
"""
API export utility functions.
"""
import json
import os
import sys
from datetime import datetime
from google.auth.transport.requests import Request
from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials # noqa
from django.conf import settings
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
import six
from celery.backends.rpc import BacklogLimitExceeded
from celery.result import AsyncResult
from kombu.exceptions import OperationalError
from rest_framework import exceptions, status
from rest_framework.response import Response
from rest_framework.reverse import reverse
try:
from savReaderWriter import SPSSIOError
except ImportError:
SPSSIOError = Exception
from onadata.apps.main.models import TokenStorageModel
from onadata.apps.viewer import tasks as viewer_task
from onadata.apps.viewer.models.export import Export, ExportConnectionError
from onadata.libs.exceptions import (
J2XException,
NoRecordsFoundError,
NoRecordsPermission,
ServiceUnavailable,
)
from onadata.libs.permissions import filter_queryset_xform_meta_perms_sql
from onadata.libs.utils import log
from onadata.libs.utils.async_status import (
FAILED,
PENDING,
SUCCESSFUL,
async_status,
celery_state_to_status,
)
from onadata.libs.utils.common_tags import (
DATAVIEW_EXPORT,
GROUPNAME_REMOVED_FLAG,
OSM,
SUBMISSION_TIME,
)
from onadata.libs.utils.common_tools import report_exception
from onadata.libs.utils.export_tools import (
check_pending_export,
generate_attachments_zip_export,
generate_export,
generate_external_export,
generate_geojson_export,
generate_kml_export,
generate_osm_export,
newest_export_for,
parse_request_export_options,
should_create_new_export,
)
from onadata.libs.utils.google import create_flow
from onadata.libs.utils.logger_tools import response_with_mimetype_and_name
from onadata.libs.utils.model_tools import get_columns_with_hxl
from onadata.settings.common import XLS_EXTENSIONS
# Supported external exports
EXTERNAL_EXPORT_TYPES = ["xlsx"]
EXPORT_EXT = {
"xlsx": Export.XLSX_EXPORT,
"csv": Export.CSV_EXPORT,
"csvzip": Export.CSV_ZIP_EXPORT,
"savzip": Export.SAV_ZIP_EXPORT,
"uuid": Export.EXTERNAL_EXPORT,
"kml": Export.KML_EXPORT,
"zip": Export.ZIP_EXPORT,
OSM: Export.OSM_EXPORT,
"gsheets": Export.GOOGLE_SHEETS_EXPORT,
"geojson": Export.GEOJSON_EXPORT,
}
def get_metadata_format(data_value):
"""Returns metadata format/extension"""
fmt = "csv"
if data_value.startswith("xform_geojson") or data_value.startswith(
"dataview_geojson"
):
fmt = "geojson"
return fmt
def include_hxl_row(dv_columns, hxl_columns):
"""
This function returns a boolean value. If the dataview's columns are not
part of the hxl columns, we return False. Returning False would mean that
we don't have to add the hxl column row if there aren't any hxl columns
in the dataview.
:param dv_columns - dataview columns
:param hxl_columns - hxl columns from the dataview's xform
:return True or False
"""
return bool(set(hxl_columns).intersection(set(dv_columns)))
def _get_export_type(export_type):
if export_type not in EXPORT_EXT or (
export_type == Export.GOOGLE_SHEETS_EXPORT
and not getattr(settings, "GOOGLE_EXPORT", False)
):
raise exceptions.ParseError(
_(f"'{export_type}' format not known or not implemented!")
)
return EXPORT_EXT[export_type]
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
def custom_response_handler( # noqa: C0901
request,
xform,
query,
export_type,
token=None,
meta=None,
dataview=False,
filename=None,
metadata=None,
):
"""
Returns a HTTP response with export file for download.
"""
export_type = _get_export_type(export_type)
if (
export_type in EXTERNAL_EXPORT_TYPES
and (token is not None)
or (meta is not None)
):
export_type = Export.EXTERNAL_EXPORT
options = parse_request_export_options(request.query_params)
dataview_pk = hasattr(dataview, "pk") and dataview.pk
options["dataview_pk"] = dataview_pk
options["host"] = request.get_host()
if dataview:
columns_with_hxl = get_columns_with_hxl(xform.survey.get("children"))
if columns_with_hxl:
options["include_hxl"] = include_hxl_row(
dataview.columns, list(columns_with_hxl)
)
try:
query = filter_queryset_xform_meta_perms_sql(xform, request.user, query)
except NoRecordsPermission:
return Response(
data=json.dumps({"details": _("You don't have permission")}),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
if query:
options["query"] = query
remove_group_name = options.get("remove_group_name")
export_id = request.query_params.get("export_id")
if export_id:
export = get_object_or_404(Export, id=export_id, xform=xform)
else:
if export_type == Export.GOOGLE_SHEETS_EXPORT:
return Response(
data=json.dumps(
{"details": _("Sheets export only supported in async mode")}
),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
# check if we need to re-generate,
# we always re-generate if a filter is specified
def _new_export():
return _generate_new_export(
request,
xform,
query,
export_type,
dataview_pk=dataview_pk,
metadata=metadata,
)
if should_create_new_export(xform, export_type, options, request=request):
export = _new_export()
else:
export = newest_export_for(xform, export_type, options)
if not export.filename and not export.error_message:
export = _new_export()
log_export(request, xform, export_type)
if export_type == Export.EXTERNAL_EXPORT:
return external_export_response(export)
if export.filename is None and export.error_message:
|
# get extension from file_path, exporter could modify to
# xlsx if it exceeds limits
_path, ext = os.path.splitext(export.filename)
ext = ext[1:]
show_date = True
if filename is None and export.status == Export.SUCCESSFUL:
filename = _generate_filename(
request, xform, remove_group_name, dataview_pk=dataview_pk
)
else:
show_date = False
response = response_with_mimetype_and_name(
Export.EXPORT_MIMES[ext],
filename,
extension=ext,
show_date=show_date,
file_path=export.filepath,
)
return response
def _generate_new_export( # noqa: C0901
request, xform, query, export_type, dataview_pk=False, metadata=None
):
query = _set_start_end_params(request, query)
extension = _get_extension_from_export_type(export_type)
options = {
"extension": extension,
"username": xform.user.username,
"id_string": xform.id_string,
"host": request.get_host(),
"sort": request.query_params.get('sort')
}
if query:
options["query"] = query
options["dataview_pk"] = dataview_pk
if export_type == Export.GOOGLE_SHEETS_EXPORT:
options["google_credentials"] = _get_google_credential(request).to_json()
try:
if export_type == Export.EXTERNAL_EXPORT:
options["token"] = request.GET.get("token")
options["data_id"] = request.GET.get("data_id")
options["meta"] = request.GET.get("meta")
export = generate_external_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.OSM_EXPORT:
export = generate_osm_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.ZIP_EXPORT:
export = generate_attachments_zip_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.KML_EXPORT:
export = generate_kml_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.GEOJSON_EXPORT:
export = generate_geojson_export(
export_type,
xform.user.username,
xform.id_string,
metadata,
None,
options,
xform=xform,
)
else:
options.update(parse_request_export_options(request.query_params))
export = generate_export(export_type, xform, None, options)
audit = {"xform": xform.id_string, "export_type": export_type}
log.audit_log(
log.Actions.EXPORT_CREATED,
request.user,
xform.user,
_("Created %(export_type)s export on '%(id_string)s'.")
% {"id_string": xform.id_string, "export_type": export_type.upper()},
audit,
request,
)
except NoRecordsFoundError as e:
raise Http404(_("No records found to export")) from e
except J2XException as e:
# j2x exception
return async_status(FAILED, str(e))
except SPSSIOError as e:
raise exceptions.ParseError(str(e)) from e
else:
return export
def log_export(request, xform, export_type):
"""
Logs audit logs of export requests.
"""
# log download as well
audit = {"xform": xform.id_string, "export_type": export_type}
log.audit_log(
log.Actions.EXPORT_DOWNLOADED,
request.user,
xform.user,
_(f"Downloaded {export_type.upper()} export on '{xform.id_string}'."),
audit,
request,
)
def external_export_response(export):
"""
Redirects to export_url of XLSReports successful export. In case of a
failure, returns a 400 HTTP JSON response with the error message.
"""
if isinstance(export, Export) and export.internal_status == Export.SUCCESSFUL:
return HttpResponseRedirect(export.export_url)
http_status = status.HTTP_400_BAD_REQUEST
return Response(json.dumps(export), http_status, content_type="application/json")
def _generate_filename(request, xform, remove_group_name=False, dataview_pk=False):
if request.GET.get("raw"):
filename = None
else:
# append group name removed flag otherwise use the form id_string
if remove_group_name:
filename = f"{xform.id_string}-{GROUPNAME_REMOVED_FLAG}"
elif dataview_pk:
filename = f"{xform.id_string}-{DATAVIEW_EXPORT}"
else:
filename = xform.id_string
return filename
def _set_start_end_params(request, query):
# check for start and end params
if "start" in request.GET or "end" in request.GET:
query = json.loads(query) if isinstance(query, six.string_types) else query
query[SUBMISSION_TIME] = {}
try:
if request.GET.get("start"):
query[SUBMISSION_TIME]["$gte"] = _format_date_for_mongo(
request.GET["start"]
)
if request.GET.get("end"):
query[SUBMISSION_TIME]["$lte"] = _format_date_for_mongo(
request.GET["end"]
)
except ValueError as e:
raise exceptions.ParseError(
_("Dates must be in the format YY_MM_DD_hh_mm_ss")
) from e
else:
query = json.dumps(query)
return query
def _get_extension_from_export_type(export_type):
extension = export_type
if export_type == Export.XLSX_EXPORT:
extension = "xlsx"
elif export_type in [Export.CSV_ZIP_EXPORT, Export.SAV_ZIP_EXPORT]:
extension = "zip"
return extension
# pylint: disable=invalid-name
def _format_date_for_mongo(datetime_str):
return datetime.strptime(datetime_str, "%y_%m_%d_%H_%M_%S").strftime(
"%Y-%m-%dT%H:%M:%S"
)
def process_async_export(request, xform, export_type, options=None):
"""
Check if should generate export or just return the latest export.
Rules for regenerating an export are:
1. Filter included on the exports.
2. New submission done.
3. Always regenerate external exports.
(External exports uses templates and the template might have
changed)
:param request:
:param xform:
:param export_type:
:param options: additional export params that may include
query: export filter
token: template url for xls external reports
meta: metadataid that contains the external xls report template url
remove_group_name: Flag to determine if group names should appear
:return: response dictionary
"""
# maintain the order of keys while processing the export
export_type = _get_export_type(export_type)
token = options.get("token")
meta = options.get("meta")
query = options.get("query")
force_xlsx = options.get("force_xlsx")
try:
query = filter_queryset_xform_meta_perms_sql(xform, request.user, query)
except NoRecordsPermission:
payload = {"details": _("You don't have permission")}
return Response(
data=json.dumps(payload),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
else:
if query:
options["query"] = query
if (
export_type in EXTERNAL_EXPORT_TYPES
and (token is not None)
or (meta is not None)
):
export_type = Export.EXTERNAL_EXPORT
if export_type == Export.GOOGLE_SHEETS_EXPORT:
credential = _get_google_credential(request)
if isinstance(credential, HttpResponseRedirect):
return credential
options["google_credentials"] = credential.to_json()
if (
should_create_new_export(xform, export_type, options, request=request)
or export_type == Export.EXTERNAL_EXPORT
):
resp = {
"job_uuid": _create_export_async(
xform, export_type, query, force_xlsx, options=options
)
}
else:
print("Do not create a new export.")
export = newest_export_for(xform, export_type, options)
if not export.filename:
# tends to happen when using newest_export_for.
resp = {
"job_uuid": _create_export_async(
xform, export_type, query, force_xlsx, options=options
)
}
else:
resp = export_async_export_response(request, export)
return resp
def _create_export_async(
xform, export_type, query=None, force_xlsx=False, options=None
):
"""
Creates async exports
:param xform:
:param export_type:
:param query:
:param force_xlsx:
:param options:
:return:
job_uuid generated
"""
export = check_pending_export(xform, export_type, options)
if export:
return export.task_id
try:
export, async_result = viewer_task.create_async_export(
xform, export_type, query, force_xlsx, options=options
)
except ExportConnectionError as e:
raise ServiceUnavailable from e
return async_result.task_id
def export_async_export_response(request, export):
"""
Checks the export status and generates the reponse
:param request:
:param export:
:return: response dict example {"job_status": "Success", "export_url": ...}
"""
if export.status == Export.SUCCESSFUL:
if export.export_type not in [
Export.EXTERNAL_EXPORT,
Export.GOOGLE_SHEETS_EXPORT,
]:
export_url = reverse(
"export-detail", kwargs={"pk": export.pk}, request=request
)
else:
export_url = export.export_url
resp = async_status(SUCCESSFUL)
resp["export_url"] = export_url
elif export.status == Export.PENDING:
resp = async_status(PENDING)
else:
resp = async_status(FAILED, export.error_message)
return resp
def get_async_response(job_uuid, request, xform, count=0):
"""
Returns the status of an async task for the given job_uuid.
"""
def _get_response():
export = get_object_or_404(Export, task_id=job_uuid)
return export_async_export_response(request, export)
try:
job = AsyncResult(job_uuid)
if job.state == "SUCCESS":
resp = _get_response()
else:
resp = async_status(celery_state_to_status(job.state))
# append task result to the response
if job.result:
result = job.result
if isinstance(result, dict):
resp.update(result)
else:
resp.update({"progress": str(result)})
except (OperationalError, ConnectionError) as e:
report_exception("Connection Error", e, sys.exc_info())
if count > 0:
raise ServiceUnavailable from e
return get_async_response(job_uuid, request, xform, count + 1)
except BacklogLimitExceeded:
# most likely still processing
resp = async_status(celery_state_to_status("PENDING"))
return resp
# pylint: disable=redefined-builtin
def response_for_format(data, format=None):
"""
Return appropriately formatted data in Response().
"""
if format == "xml":
formatted_data = data.xml
elif format in ("xls", "xlsx"):
if not data.xls or not data.xls.storage.exists(data.xls.name):
raise Http404()
formatted_data = data.xls
else:
formatted_data = (
json.loads(data.json) if isinstance(data.json, str) else data.json
)
return Response(formatted_data)
def get_existing_file_format(data, format):
"""
Util function to extract the existing form extension
"""
if format in XLS_EXTENSIONS:
existing_file_format = data.name.split(".")[-1]
return existing_file_format
return format
def generate_google_web_flow(request):
"""
Returns a OAuth2WebServerFlow object from the request redirect_uri.
"""
if "redirect_uri" in request.GET:
redirect_uri = request.GET.get("redirect_uri")
elif "redirect_uri" in request.POST:
redirect_uri = request.POST.get("redirect_uri")
elif "redirect_uri" in request.query_params:
redirect_uri = request.query_params.get("redirect_uri")
elif "redirect_uri" in request.data:
redirect_uri = request.data.get("redirect_uri")
else:
redirect_uri = settings.GOOGLE_STEP2_URI
return create_flow(redirect_uri)
def _get_google_credential(request):
credential = None
storage = None
if request.user.is_authenticated:
try:
storage = TokenStorageModel.objects.get(id=request.user)
credential = storage.credential
except TokenStorageModel.DoesNotExist:
pass
elif request.session.get("access_token"):
credential = Credentials(token=request.session["access_token"])
if credential and not credential.valid:
try:
credential.refresh(Request())
storage.credential = credential
storage.save()
except RefreshError:
storage.delete()
credential = None
if not credential:
google_flow = generate_google_web_flow(request)
authorization_url, _state = google_flow.authorization_url(
access_type="offline", include_granted_scopes="true", prompt="consent"
)
return HttpResponseRedirect(authorization_url)
return credential
| raise exceptions.ParseError(export.error_message) | conditional_block |
api_export_tools.py | # -*- coding: utf-8 -*-
"""
API export utility functions.
"""
import json
import os
import sys
from datetime import datetime
from google.auth.transport.requests import Request
from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials # noqa
from django.conf import settings
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
import six
from celery.backends.rpc import BacklogLimitExceeded
from celery.result import AsyncResult
from kombu.exceptions import OperationalError
from rest_framework import exceptions, status
from rest_framework.response import Response
from rest_framework.reverse import reverse
try:
from savReaderWriter import SPSSIOError
except ImportError:
SPSSIOError = Exception
from onadata.apps.main.models import TokenStorageModel
from onadata.apps.viewer import tasks as viewer_task
from onadata.apps.viewer.models.export import Export, ExportConnectionError
from onadata.libs.exceptions import (
J2XException,
NoRecordsFoundError,
NoRecordsPermission,
ServiceUnavailable,
)
from onadata.libs.permissions import filter_queryset_xform_meta_perms_sql
from onadata.libs.utils import log
from onadata.libs.utils.async_status import (
FAILED,
PENDING,
SUCCESSFUL,
async_status,
celery_state_to_status,
)
from onadata.libs.utils.common_tags import (
DATAVIEW_EXPORT,
GROUPNAME_REMOVED_FLAG,
OSM,
SUBMISSION_TIME,
)
from onadata.libs.utils.common_tools import report_exception
from onadata.libs.utils.export_tools import (
check_pending_export,
generate_attachments_zip_export,
generate_export,
generate_external_export,
generate_geojson_export,
generate_kml_export,
generate_osm_export,
newest_export_for,
parse_request_export_options,
should_create_new_export,
)
from onadata.libs.utils.google import create_flow
from onadata.libs.utils.logger_tools import response_with_mimetype_and_name
from onadata.libs.utils.model_tools import get_columns_with_hxl
from onadata.settings.common import XLS_EXTENSIONS
# Supported external exports
EXTERNAL_EXPORT_TYPES = ["xlsx"]
EXPORT_EXT = {
"xlsx": Export.XLSX_EXPORT,
"csv": Export.CSV_EXPORT,
"csvzip": Export.CSV_ZIP_EXPORT,
"savzip": Export.SAV_ZIP_EXPORT,
"uuid": Export.EXTERNAL_EXPORT,
"kml": Export.KML_EXPORT,
"zip": Export.ZIP_EXPORT,
OSM: Export.OSM_EXPORT,
"gsheets": Export.GOOGLE_SHEETS_EXPORT,
"geojson": Export.GEOJSON_EXPORT,
}
def get_metadata_format(data_value):
"""Returns metadata format/extension"""
fmt = "csv"
if data_value.startswith("xform_geojson") or data_value.startswith(
"dataview_geojson"
):
fmt = "geojson"
return fmt
def include_hxl_row(dv_columns, hxl_columns):
"""
This function returns a boolean value. If the dataview's columns are not
part of the hxl columns, we return False. Returning False would mean that
we don't have to add the hxl column row if there aren't any hxl columns
in the dataview.
:param dv_columns - dataview columns
:param hxl_columns - hxl columns from the dataview's xform
:return True or False
"""
return bool(set(hxl_columns).intersection(set(dv_columns)))
def _get_export_type(export_type):
if export_type not in EXPORT_EXT or (
export_type == Export.GOOGLE_SHEETS_EXPORT
and not getattr(settings, "GOOGLE_EXPORT", False)
):
raise exceptions.ParseError(
_(f"'{export_type}' format not known or not implemented!")
)
return EXPORT_EXT[export_type]
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
def custom_response_handler( # noqa: C0901
request,
xform,
query,
export_type,
token=None,
meta=None,
dataview=False,
filename=None,
metadata=None,
):
"""
Returns a HTTP response with export file for download.
"""
export_type = _get_export_type(export_type)
if (
export_type in EXTERNAL_EXPORT_TYPES
and (token is not None)
or (meta is not None)
):
export_type = Export.EXTERNAL_EXPORT
options = parse_request_export_options(request.query_params)
dataview_pk = hasattr(dataview, "pk") and dataview.pk
options["dataview_pk"] = dataview_pk
options["host"] = request.get_host()
if dataview:
columns_with_hxl = get_columns_with_hxl(xform.survey.get("children"))
if columns_with_hxl:
options["include_hxl"] = include_hxl_row(
dataview.columns, list(columns_with_hxl)
)
try:
query = filter_queryset_xform_meta_perms_sql(xform, request.user, query)
except NoRecordsPermission:
return Response(
data=json.dumps({"details": _("You don't have permission")}),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
if query:
options["query"] = query
remove_group_name = options.get("remove_group_name")
export_id = request.query_params.get("export_id")
if export_id:
export = get_object_or_404(Export, id=export_id, xform=xform)
else:
if export_type == Export.GOOGLE_SHEETS_EXPORT:
return Response(
data=json.dumps(
{"details": _("Sheets export only supported in async mode")}
),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
# check if we need to re-generate,
# we always re-generate if a filter is specified
def _new_export():
return _generate_new_export(
request,
xform,
query,
export_type,
dataview_pk=dataview_pk,
metadata=metadata,
)
if should_create_new_export(xform, export_type, options, request=request):
export = _new_export()
else:
export = newest_export_for(xform, export_type, options)
if not export.filename and not export.error_message:
export = _new_export()
log_export(request, xform, export_type)
if export_type == Export.EXTERNAL_EXPORT:
return external_export_response(export)
if export.filename is None and export.error_message:
raise exceptions.ParseError(export.error_message)
# get extension from file_path, exporter could modify to
# xlsx if it exceeds limits
_path, ext = os.path.splitext(export.filename)
ext = ext[1:]
show_date = True
if filename is None and export.status == Export.SUCCESSFUL:
filename = _generate_filename(
request, xform, remove_group_name, dataview_pk=dataview_pk
)
else:
show_date = False
response = response_with_mimetype_and_name(
Export.EXPORT_MIMES[ext],
filename,
extension=ext,
show_date=show_date,
file_path=export.filepath,
)
return response
def _generate_new_export( # noqa: C0901
request, xform, query, export_type, dataview_pk=False, metadata=None
):
query = _set_start_end_params(request, query)
extension = _get_extension_from_export_type(export_type)
options = {
"extension": extension,
"username": xform.user.username,
"id_string": xform.id_string,
"host": request.get_host(),
"sort": request.query_params.get('sort')
}
if query:
options["query"] = query
options["dataview_pk"] = dataview_pk
if export_type == Export.GOOGLE_SHEETS_EXPORT:
options["google_credentials"] = _get_google_credential(request).to_json()
try:
if export_type == Export.EXTERNAL_EXPORT:
options["token"] = request.GET.get("token")
options["data_id"] = request.GET.get("data_id")
options["meta"] = request.GET.get("meta")
export = generate_external_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.OSM_EXPORT:
export = generate_osm_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.ZIP_EXPORT:
export = generate_attachments_zip_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.KML_EXPORT:
export = generate_kml_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.GEOJSON_EXPORT:
export = generate_geojson_export(
export_type,
xform.user.username,
xform.id_string,
metadata,
None,
options,
xform=xform,
)
else:
options.update(parse_request_export_options(request.query_params))
export = generate_export(export_type, xform, None, options)
audit = {"xform": xform.id_string, "export_type": export_type}
log.audit_log(
log.Actions.EXPORT_CREATED,
request.user,
xform.user,
_("Created %(export_type)s export on '%(id_string)s'.")
% {"id_string": xform.id_string, "export_type": export_type.upper()},
audit,
request,
)
except NoRecordsFoundError as e:
raise Http404(_("No records found to export")) from e
except J2XException as e:
# j2x exception
return async_status(FAILED, str(e))
except SPSSIOError as e:
raise exceptions.ParseError(str(e)) from e
else:
return export
def log_export(request, xform, export_type):
"""
Logs audit logs of export requests.
"""
# log download as well
audit = {"xform": xform.id_string, "export_type": export_type}
log.audit_log(
log.Actions.EXPORT_DOWNLOADED,
request.user,
xform.user,
_(f"Downloaded {export_type.upper()} export on '{xform.id_string}'."),
audit,
request,
)
def external_export_response(export):
|
def _generate_filename(request, xform, remove_group_name=False, dataview_pk=False):
if request.GET.get("raw"):
filename = None
else:
# append group name removed flag otherwise use the form id_string
if remove_group_name:
filename = f"{xform.id_string}-{GROUPNAME_REMOVED_FLAG}"
elif dataview_pk:
filename = f"{xform.id_string}-{DATAVIEW_EXPORT}"
else:
filename = xform.id_string
return filename
def _set_start_end_params(request, query):
# check for start and end params
if "start" in request.GET or "end" in request.GET:
query = json.loads(query) if isinstance(query, six.string_types) else query
query[SUBMISSION_TIME] = {}
try:
if request.GET.get("start"):
query[SUBMISSION_TIME]["$gte"] = _format_date_for_mongo(
request.GET["start"]
)
if request.GET.get("end"):
query[SUBMISSION_TIME]["$lte"] = _format_date_for_mongo(
request.GET["end"]
)
except ValueError as e:
raise exceptions.ParseError(
_("Dates must be in the format YY_MM_DD_hh_mm_ss")
) from e
else:
query = json.dumps(query)
return query
def _get_extension_from_export_type(export_type):
extension = export_type
if export_type == Export.XLSX_EXPORT:
extension = "xlsx"
elif export_type in [Export.CSV_ZIP_EXPORT, Export.SAV_ZIP_EXPORT]:
extension = "zip"
return extension
# pylint: disable=invalid-name
def _format_date_for_mongo(datetime_str):
return datetime.strptime(datetime_str, "%y_%m_%d_%H_%M_%S").strftime(
"%Y-%m-%dT%H:%M:%S"
)
def process_async_export(request, xform, export_type, options=None):
"""
Check if should generate export or just return the latest export.
Rules for regenerating an export are:
1. Filter included on the exports.
2. New submission done.
3. Always regenerate external exports.
(External exports uses templates and the template might have
changed)
:param request:
:param xform:
:param export_type:
:param options: additional export params that may include
query: export filter
token: template url for xls external reports
meta: metadataid that contains the external xls report template url
remove_group_name: Flag to determine if group names should appear
:return: response dictionary
"""
# maintain the order of keys while processing the export
export_type = _get_export_type(export_type)
token = options.get("token")
meta = options.get("meta")
query = options.get("query")
force_xlsx = options.get("force_xlsx")
try:
query = filter_queryset_xform_meta_perms_sql(xform, request.user, query)
except NoRecordsPermission:
payload = {"details": _("You don't have permission")}
return Response(
data=json.dumps(payload),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
else:
if query:
options["query"] = query
if (
export_type in EXTERNAL_EXPORT_TYPES
and (token is not None)
or (meta is not None)
):
export_type = Export.EXTERNAL_EXPORT
if export_type == Export.GOOGLE_SHEETS_EXPORT:
credential = _get_google_credential(request)
if isinstance(credential, HttpResponseRedirect):
return credential
options["google_credentials"] = credential.to_json()
if (
should_create_new_export(xform, export_type, options, request=request)
or export_type == Export.EXTERNAL_EXPORT
):
resp = {
"job_uuid": _create_export_async(
xform, export_type, query, force_xlsx, options=options
)
}
else:
print("Do not create a new export.")
export = newest_export_for(xform, export_type, options)
if not export.filename:
# tends to happen when using newest_export_for.
resp = {
"job_uuid": _create_export_async(
xform, export_type, query, force_xlsx, options=options
)
}
else:
resp = export_async_export_response(request, export)
return resp
def _create_export_async(
xform, export_type, query=None, force_xlsx=False, options=None
):
"""
Creates async exports
:param xform:
:param export_type:
:param query:
:param force_xlsx:
:param options:
:return:
job_uuid generated
"""
export = check_pending_export(xform, export_type, options)
if export:
return export.task_id
try:
export, async_result = viewer_task.create_async_export(
xform, export_type, query, force_xlsx, options=options
)
except ExportConnectionError as e:
raise ServiceUnavailable from e
return async_result.task_id
def export_async_export_response(request, export):
"""
Checks the export status and generates the reponse
:param request:
:param export:
:return: response dict example {"job_status": "Success", "export_url": ...}
"""
if export.status == Export.SUCCESSFUL:
if export.export_type not in [
Export.EXTERNAL_EXPORT,
Export.GOOGLE_SHEETS_EXPORT,
]:
export_url = reverse(
"export-detail", kwargs={"pk": export.pk}, request=request
)
else:
export_url = export.export_url
resp = async_status(SUCCESSFUL)
resp["export_url"] = export_url
elif export.status == Export.PENDING:
resp = async_status(PENDING)
else:
resp = async_status(FAILED, export.error_message)
return resp
def get_async_response(job_uuid, request, xform, count=0):
"""
Returns the status of an async task for the given job_uuid.
"""
def _get_response():
export = get_object_or_404(Export, task_id=job_uuid)
return export_async_export_response(request, export)
try:
job = AsyncResult(job_uuid)
if job.state == "SUCCESS":
resp = _get_response()
else:
resp = async_status(celery_state_to_status(job.state))
# append task result to the response
if job.result:
result = job.result
if isinstance(result, dict):
resp.update(result)
else:
resp.update({"progress": str(result)})
except (OperationalError, ConnectionError) as e:
report_exception("Connection Error", e, sys.exc_info())
if count > 0:
raise ServiceUnavailable from e
return get_async_response(job_uuid, request, xform, count + 1)
except BacklogLimitExceeded:
# most likely still processing
resp = async_status(celery_state_to_status("PENDING"))
return resp
# pylint: disable=redefined-builtin
def response_for_format(data, format=None):
"""
Return appropriately formatted data in Response().
"""
if format == "xml":
formatted_data = data.xml
elif format in ("xls", "xlsx"):
if not data.xls or not data.xls.storage.exists(data.xls.name):
raise Http404()
formatted_data = data.xls
else:
formatted_data = (
json.loads(data.json) if isinstance(data.json, str) else data.json
)
return Response(formatted_data)
def get_existing_file_format(data, format):
"""
Util function to extract the existing form extension
"""
if format in XLS_EXTENSIONS:
existing_file_format = data.name.split(".")[-1]
return existing_file_format
return format
def generate_google_web_flow(request):
"""
Returns a OAuth2WebServerFlow object from the request redirect_uri.
"""
if "redirect_uri" in request.GET:
redirect_uri = request.GET.get("redirect_uri")
elif "redirect_uri" in request.POST:
redirect_uri = request.POST.get("redirect_uri")
elif "redirect_uri" in request.query_params:
redirect_uri = request.query_params.get("redirect_uri")
elif "redirect_uri" in request.data:
redirect_uri = request.data.get("redirect_uri")
else:
redirect_uri = settings.GOOGLE_STEP2_URI
return create_flow(redirect_uri)
def _get_google_credential(request):
credential = None
storage = None
if request.user.is_authenticated:
try:
storage = TokenStorageModel.objects.get(id=request.user)
credential = storage.credential
except TokenStorageModel.DoesNotExist:
pass
elif request.session.get("access_token"):
credential = Credentials(token=request.session["access_token"])
if credential and not credential.valid:
try:
credential.refresh(Request())
storage.credential = credential
storage.save()
except RefreshError:
storage.delete()
credential = None
if not credential:
google_flow = generate_google_web_flow(request)
authorization_url, _state = google_flow.authorization_url(
access_type="offline", include_granted_scopes="true", prompt="consent"
)
return HttpResponseRedirect(authorization_url)
return credential
| """
Redirects to export_url of XLSReports successful export. In case of a
failure, returns a 400 HTTP JSON response with the error message.
"""
if isinstance(export, Export) and export.internal_status == Export.SUCCESSFUL:
return HttpResponseRedirect(export.export_url)
http_status = status.HTTP_400_BAD_REQUEST
return Response(json.dumps(export), http_status, content_type="application/json") | identifier_body |
api_export_tools.py | # -*- coding: utf-8 -*-
"""
API export utility functions.
"""
import json
import os
import sys
from datetime import datetime
from google.auth.transport.requests import Request
from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials # noqa
from django.conf import settings
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
import six
from celery.backends.rpc import BacklogLimitExceeded
from celery.result import AsyncResult
from kombu.exceptions import OperationalError
from rest_framework import exceptions, status
from rest_framework.response import Response
from rest_framework.reverse import reverse
try:
from savReaderWriter import SPSSIOError
except ImportError:
SPSSIOError = Exception
from onadata.apps.main.models import TokenStorageModel
from onadata.apps.viewer import tasks as viewer_task
from onadata.apps.viewer.models.export import Export, ExportConnectionError
from onadata.libs.exceptions import (
J2XException,
NoRecordsFoundError,
NoRecordsPermission,
ServiceUnavailable,
)
from onadata.libs.permissions import filter_queryset_xform_meta_perms_sql
from onadata.libs.utils import log
from onadata.libs.utils.async_status import (
FAILED,
PENDING,
SUCCESSFUL,
async_status,
celery_state_to_status,
)
from onadata.libs.utils.common_tags import (
DATAVIEW_EXPORT,
GROUPNAME_REMOVED_FLAG,
OSM,
SUBMISSION_TIME,
)
from onadata.libs.utils.common_tools import report_exception
from onadata.libs.utils.export_tools import (
check_pending_export,
generate_attachments_zip_export,
generate_export,
generate_external_export,
generate_geojson_export,
generate_kml_export,
generate_osm_export,
newest_export_for,
parse_request_export_options,
should_create_new_export,
)
from onadata.libs.utils.google import create_flow
from onadata.libs.utils.logger_tools import response_with_mimetype_and_name
from onadata.libs.utils.model_tools import get_columns_with_hxl
from onadata.settings.common import XLS_EXTENSIONS
# Supported external exports
EXTERNAL_EXPORT_TYPES = ["xlsx"]
EXPORT_EXT = {
"xlsx": Export.XLSX_EXPORT,
"csv": Export.CSV_EXPORT,
"csvzip": Export.CSV_ZIP_EXPORT,
"savzip": Export.SAV_ZIP_EXPORT,
"uuid": Export.EXTERNAL_EXPORT,
"kml": Export.KML_EXPORT,
"zip": Export.ZIP_EXPORT,
OSM: Export.OSM_EXPORT,
"gsheets": Export.GOOGLE_SHEETS_EXPORT,
"geojson": Export.GEOJSON_EXPORT,
}
def get_metadata_format(data_value):
"""Returns metadata format/extension"""
fmt = "csv"
if data_value.startswith("xform_geojson") or data_value.startswith(
"dataview_geojson"
):
fmt = "geojson"
return fmt
def include_hxl_row(dv_columns, hxl_columns):
"""
This function returns a boolean value. If the dataview's columns are not
part of the hxl columns, we return False. Returning False would mean that
we don't have to add the hxl column row if there aren't any hxl columns
in the dataview.
:param dv_columns - dataview columns
:param hxl_columns - hxl columns from the dataview's xform
:return True or False
"""
return bool(set(hxl_columns).intersection(set(dv_columns)))
def _get_export_type(export_type):
if export_type not in EXPORT_EXT or (
export_type == Export.GOOGLE_SHEETS_EXPORT
and not getattr(settings, "GOOGLE_EXPORT", False)
):
raise exceptions.ParseError(
_(f"'{export_type}' format not known or not implemented!")
)
return EXPORT_EXT[export_type]
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
def custom_response_handler( # noqa: C0901
request,
xform,
query,
export_type,
token=None,
meta=None,
dataview=False,
filename=None,
metadata=None,
):
"""
Returns a HTTP response with export file for download.
"""
export_type = _get_export_type(export_type)
if (
export_type in EXTERNAL_EXPORT_TYPES
and (token is not None)
or (meta is not None)
):
export_type = Export.EXTERNAL_EXPORT
options = parse_request_export_options(request.query_params)
dataview_pk = hasattr(dataview, "pk") and dataview.pk
options["dataview_pk"] = dataview_pk
options["host"] = request.get_host()
if dataview:
columns_with_hxl = get_columns_with_hxl(xform.survey.get("children"))
if columns_with_hxl:
options["include_hxl"] = include_hxl_row(
dataview.columns, list(columns_with_hxl)
)
try:
query = filter_queryset_xform_meta_perms_sql(xform, request.user, query)
except NoRecordsPermission:
return Response(
data=json.dumps({"details": _("You don't have permission")}),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
if query:
options["query"] = query
remove_group_name = options.get("remove_group_name")
export_id = request.query_params.get("export_id")
if export_id:
export = get_object_or_404(Export, id=export_id, xform=xform)
else:
if export_type == Export.GOOGLE_SHEETS_EXPORT:
return Response(
data=json.dumps(
{"details": _("Sheets export only supported in async mode")}
),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
# check if we need to re-generate,
# we always re-generate if a filter is specified
def _new_export():
return _generate_new_export(
request,
xform,
query,
export_type,
dataview_pk=dataview_pk,
metadata=metadata,
)
if should_create_new_export(xform, export_type, options, request=request):
export = _new_export()
else:
export = newest_export_for(xform, export_type, options)
if not export.filename and not export.error_message:
export = _new_export()
log_export(request, xform, export_type)
if export_type == Export.EXTERNAL_EXPORT:
return external_export_response(export)
if export.filename is None and export.error_message:
raise exceptions.ParseError(export.error_message)
# get extension from file_path, exporter could modify to
# xlsx if it exceeds limits
_path, ext = os.path.splitext(export.filename)
ext = ext[1:]
show_date = True
if filename is None and export.status == Export.SUCCESSFUL:
filename = _generate_filename(
request, xform, remove_group_name, dataview_pk=dataview_pk
)
else:
show_date = False
response = response_with_mimetype_and_name(
Export.EXPORT_MIMES[ext],
filename,
extension=ext,
show_date=show_date,
file_path=export.filepath,
)
return response
def _generate_new_export( # noqa: C0901
request, xform, query, export_type, dataview_pk=False, metadata=None
):
query = _set_start_end_params(request, query)
extension = _get_extension_from_export_type(export_type)
options = {
"extension": extension,
"username": xform.user.username,
"id_string": xform.id_string,
"host": request.get_host(),
"sort": request.query_params.get('sort')
}
if query:
options["query"] = query
options["dataview_pk"] = dataview_pk
if export_type == Export.GOOGLE_SHEETS_EXPORT:
options["google_credentials"] = _get_google_credential(request).to_json()
try:
if export_type == Export.EXTERNAL_EXPORT:
options["token"] = request.GET.get("token")
options["data_id"] = request.GET.get("data_id")
options["meta"] = request.GET.get("meta")
export = generate_external_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.OSM_EXPORT:
export = generate_osm_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.ZIP_EXPORT:
export = generate_attachments_zip_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.KML_EXPORT:
export = generate_kml_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.GEOJSON_EXPORT:
export = generate_geojson_export(
export_type,
xform.user.username,
xform.id_string,
metadata,
None,
options,
xform=xform,
)
else:
options.update(parse_request_export_options(request.query_params))
export = generate_export(export_type, xform, None, options)
audit = {"xform": xform.id_string, "export_type": export_type}
log.audit_log(
log.Actions.EXPORT_CREATED,
request.user,
xform.user,
_("Created %(export_type)s export on '%(id_string)s'.")
% {"id_string": xform.id_string, "export_type": export_type.upper()},
audit,
request,
)
except NoRecordsFoundError as e:
raise Http404(_("No records found to export")) from e
except J2XException as e:
# j2x exception
return async_status(FAILED, str(e))
except SPSSIOError as e:
raise exceptions.ParseError(str(e)) from e
else:
return export
def log_export(request, xform, export_type):
"""
Logs audit logs of export requests.
"""
# log download as well
audit = {"xform": xform.id_string, "export_type": export_type}
log.audit_log(
log.Actions.EXPORT_DOWNLOADED,
request.user,
xform.user,
_(f"Downloaded {export_type.upper()} export on '{xform.id_string}'."),
audit,
request,
)
def external_export_response(export):
"""
Redirects to export_url of XLSReports successful export. In case of a
failure, returns a 400 HTTP JSON response with the error message.
"""
if isinstance(export, Export) and export.internal_status == Export.SUCCESSFUL:
return HttpResponseRedirect(export.export_url)
http_status = status.HTTP_400_BAD_REQUEST
return Response(json.dumps(export), http_status, content_type="application/json")
def _generate_filename(request, xform, remove_group_name=False, dataview_pk=False):
if request.GET.get("raw"):
filename = None
else:
# append group name removed flag otherwise use the form id_string
if remove_group_name:
filename = f"{xform.id_string}-{GROUPNAME_REMOVED_FLAG}"
elif dataview_pk:
filename = f"{xform.id_string}-{DATAVIEW_EXPORT}"
else:
filename = xform.id_string
return filename
def _set_start_end_params(request, query):
# check for start and end params
if "start" in request.GET or "end" in request.GET:
query = json.loads(query) if isinstance(query, six.string_types) else query
query[SUBMISSION_TIME] = {}
try:
if request.GET.get("start"):
query[SUBMISSION_TIME]["$gte"] = _format_date_for_mongo(
request.GET["start"]
)
if request.GET.get("end"):
query[SUBMISSION_TIME]["$lte"] = _format_date_for_mongo(
request.GET["end"]
)
except ValueError as e:
raise exceptions.ParseError(
_("Dates must be in the format YY_MM_DD_hh_mm_ss")
) from e
else:
query = json.dumps(query)
return query
def _get_extension_from_export_type(export_type):
extension = export_type
if export_type == Export.XLSX_EXPORT:
extension = "xlsx" |
# pylint: disable=invalid-name
def _format_date_for_mongo(datetime_str):
return datetime.strptime(datetime_str, "%y_%m_%d_%H_%M_%S").strftime(
"%Y-%m-%dT%H:%M:%S"
)
def process_async_export(request, xform, export_type, options=None):
"""
Check if should generate export or just return the latest export.
Rules for regenerating an export are:
1. Filter included on the exports.
2. New submission done.
3. Always regenerate external exports.
(External exports uses templates and the template might have
changed)
:param request:
:param xform:
:param export_type:
:param options: additional export params that may include
query: export filter
token: template url for xls external reports
meta: metadataid that contains the external xls report template url
remove_group_name: Flag to determine if group names should appear
:return: response dictionary
"""
# maintain the order of keys while processing the export
export_type = _get_export_type(export_type)
token = options.get("token")
meta = options.get("meta")
query = options.get("query")
force_xlsx = options.get("force_xlsx")
try:
query = filter_queryset_xform_meta_perms_sql(xform, request.user, query)
except NoRecordsPermission:
payload = {"details": _("You don't have permission")}
return Response(
data=json.dumps(payload),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
else:
if query:
options["query"] = query
if (
export_type in EXTERNAL_EXPORT_TYPES
and (token is not None)
or (meta is not None)
):
export_type = Export.EXTERNAL_EXPORT
if export_type == Export.GOOGLE_SHEETS_EXPORT:
credential = _get_google_credential(request)
if isinstance(credential, HttpResponseRedirect):
return credential
options["google_credentials"] = credential.to_json()
if (
should_create_new_export(xform, export_type, options, request=request)
or export_type == Export.EXTERNAL_EXPORT
):
resp = {
"job_uuid": _create_export_async(
xform, export_type, query, force_xlsx, options=options
)
}
else:
print("Do not create a new export.")
export = newest_export_for(xform, export_type, options)
if not export.filename:
# tends to happen when using newest_export_for.
resp = {
"job_uuid": _create_export_async(
xform, export_type, query, force_xlsx, options=options
)
}
else:
resp = export_async_export_response(request, export)
return resp
def _create_export_async(
xform, export_type, query=None, force_xlsx=False, options=None
):
"""
Creates async exports
:param xform:
:param export_type:
:param query:
:param force_xlsx:
:param options:
:return:
job_uuid generated
"""
export = check_pending_export(xform, export_type, options)
if export:
return export.task_id
try:
export, async_result = viewer_task.create_async_export(
xform, export_type, query, force_xlsx, options=options
)
except ExportConnectionError as e:
raise ServiceUnavailable from e
return async_result.task_id
def export_async_export_response(request, export):
"""
Checks the export status and generates the reponse
:param request:
:param export:
:return: response dict example {"job_status": "Success", "export_url": ...}
"""
if export.status == Export.SUCCESSFUL:
if export.export_type not in [
Export.EXTERNAL_EXPORT,
Export.GOOGLE_SHEETS_EXPORT,
]:
export_url = reverse(
"export-detail", kwargs={"pk": export.pk}, request=request
)
else:
export_url = export.export_url
resp = async_status(SUCCESSFUL)
resp["export_url"] = export_url
elif export.status == Export.PENDING:
resp = async_status(PENDING)
else:
resp = async_status(FAILED, export.error_message)
return resp
def get_async_response(job_uuid, request, xform, count=0):
"""
Returns the status of an async task for the given job_uuid.
"""
def _get_response():
export = get_object_or_404(Export, task_id=job_uuid)
return export_async_export_response(request, export)
try:
job = AsyncResult(job_uuid)
if job.state == "SUCCESS":
resp = _get_response()
else:
resp = async_status(celery_state_to_status(job.state))
# append task result to the response
if job.result:
result = job.result
if isinstance(result, dict):
resp.update(result)
else:
resp.update({"progress": str(result)})
except (OperationalError, ConnectionError) as e:
report_exception("Connection Error", e, sys.exc_info())
if count > 0:
raise ServiceUnavailable from e
return get_async_response(job_uuid, request, xform, count + 1)
except BacklogLimitExceeded:
# most likely still processing
resp = async_status(celery_state_to_status("PENDING"))
return resp
# pylint: disable=redefined-builtin
def response_for_format(data, format=None):
"""
Return appropriately formatted data in Response().
"""
if format == "xml":
formatted_data = data.xml
elif format in ("xls", "xlsx"):
if not data.xls or not data.xls.storage.exists(data.xls.name):
raise Http404()
formatted_data = data.xls
else:
formatted_data = (
json.loads(data.json) if isinstance(data.json, str) else data.json
)
return Response(formatted_data)
def get_existing_file_format(data, format):
"""
Util function to extract the existing form extension
"""
if format in XLS_EXTENSIONS:
existing_file_format = data.name.split(".")[-1]
return existing_file_format
return format
def generate_google_web_flow(request):
"""
Returns a OAuth2WebServerFlow object from the request redirect_uri.
"""
if "redirect_uri" in request.GET:
redirect_uri = request.GET.get("redirect_uri")
elif "redirect_uri" in request.POST:
redirect_uri = request.POST.get("redirect_uri")
elif "redirect_uri" in request.query_params:
redirect_uri = request.query_params.get("redirect_uri")
elif "redirect_uri" in request.data:
redirect_uri = request.data.get("redirect_uri")
else:
redirect_uri = settings.GOOGLE_STEP2_URI
return create_flow(redirect_uri)
def _get_google_credential(request):
credential = None
storage = None
if request.user.is_authenticated:
try:
storage = TokenStorageModel.objects.get(id=request.user)
credential = storage.credential
except TokenStorageModel.DoesNotExist:
pass
elif request.session.get("access_token"):
credential = Credentials(token=request.session["access_token"])
if credential and not credential.valid:
try:
credential.refresh(Request())
storage.credential = credential
storage.save()
except RefreshError:
storage.delete()
credential = None
if not credential:
google_flow = generate_google_web_flow(request)
authorization_url, _state = google_flow.authorization_url(
access_type="offline", include_granted_scopes="true", prompt="consent"
)
return HttpResponseRedirect(authorization_url)
return credential | elif export_type in [Export.CSV_ZIP_EXPORT, Export.SAV_ZIP_EXPORT]:
extension = "zip"
return extension
| random_line_split |
api_export_tools.py | # -*- coding: utf-8 -*-
"""
API export utility functions.
"""
import json
import os
import sys
from datetime import datetime
from google.auth.transport.requests import Request
from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials # noqa
from django.conf import settings
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.utils.translation import gettext as _
import six
from celery.backends.rpc import BacklogLimitExceeded
from celery.result import AsyncResult
from kombu.exceptions import OperationalError
from rest_framework import exceptions, status
from rest_framework.response import Response
from rest_framework.reverse import reverse
try:
from savReaderWriter import SPSSIOError
except ImportError:
SPSSIOError = Exception
from onadata.apps.main.models import TokenStorageModel
from onadata.apps.viewer import tasks as viewer_task
from onadata.apps.viewer.models.export import Export, ExportConnectionError
from onadata.libs.exceptions import (
J2XException,
NoRecordsFoundError,
NoRecordsPermission,
ServiceUnavailable,
)
from onadata.libs.permissions import filter_queryset_xform_meta_perms_sql
from onadata.libs.utils import log
from onadata.libs.utils.async_status import (
FAILED,
PENDING,
SUCCESSFUL,
async_status,
celery_state_to_status,
)
from onadata.libs.utils.common_tags import (
DATAVIEW_EXPORT,
GROUPNAME_REMOVED_FLAG,
OSM,
SUBMISSION_TIME,
)
from onadata.libs.utils.common_tools import report_exception
from onadata.libs.utils.export_tools import (
check_pending_export,
generate_attachments_zip_export,
generate_export,
generate_external_export,
generate_geojson_export,
generate_kml_export,
generate_osm_export,
newest_export_for,
parse_request_export_options,
should_create_new_export,
)
from onadata.libs.utils.google import create_flow
from onadata.libs.utils.logger_tools import response_with_mimetype_and_name
from onadata.libs.utils.model_tools import get_columns_with_hxl
from onadata.settings.common import XLS_EXTENSIONS
# Supported external exports
EXTERNAL_EXPORT_TYPES = ["xlsx"]
EXPORT_EXT = {
"xlsx": Export.XLSX_EXPORT,
"csv": Export.CSV_EXPORT,
"csvzip": Export.CSV_ZIP_EXPORT,
"savzip": Export.SAV_ZIP_EXPORT,
"uuid": Export.EXTERNAL_EXPORT,
"kml": Export.KML_EXPORT,
"zip": Export.ZIP_EXPORT,
OSM: Export.OSM_EXPORT,
"gsheets": Export.GOOGLE_SHEETS_EXPORT,
"geojson": Export.GEOJSON_EXPORT,
}
def get_metadata_format(data_value):
"""Returns metadata format/extension"""
fmt = "csv"
if data_value.startswith("xform_geojson") or data_value.startswith(
"dataview_geojson"
):
fmt = "geojson"
return fmt
def include_hxl_row(dv_columns, hxl_columns):
"""
This function returns a boolean value. If the dataview's columns are not
part of the hxl columns, we return False. Returning False would mean that
we don't have to add the hxl column row if there aren't any hxl columns
in the dataview.
:param dv_columns - dataview columns
:param hxl_columns - hxl columns from the dataview's xform
:return True or False
"""
return bool(set(hxl_columns).intersection(set(dv_columns)))
def _get_export_type(export_type):
if export_type not in EXPORT_EXT or (
export_type == Export.GOOGLE_SHEETS_EXPORT
and not getattr(settings, "GOOGLE_EXPORT", False)
):
raise exceptions.ParseError(
_(f"'{export_type}' format not known or not implemented!")
)
return EXPORT_EXT[export_type]
# pylint: disable=too-many-arguments, too-many-locals, too-many-branches
def custom_response_handler( # noqa: C0901
request,
xform,
query,
export_type,
token=None,
meta=None,
dataview=False,
filename=None,
metadata=None,
):
"""
Returns a HTTP response with export file for download.
"""
export_type = _get_export_type(export_type)
if (
export_type in EXTERNAL_EXPORT_TYPES
and (token is not None)
or (meta is not None)
):
export_type = Export.EXTERNAL_EXPORT
options = parse_request_export_options(request.query_params)
dataview_pk = hasattr(dataview, "pk") and dataview.pk
options["dataview_pk"] = dataview_pk
options["host"] = request.get_host()
if dataview:
columns_with_hxl = get_columns_with_hxl(xform.survey.get("children"))
if columns_with_hxl:
options["include_hxl"] = include_hxl_row(
dataview.columns, list(columns_with_hxl)
)
try:
query = filter_queryset_xform_meta_perms_sql(xform, request.user, query)
except NoRecordsPermission:
return Response(
data=json.dumps({"details": _("You don't have permission")}),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
if query:
options["query"] = query
remove_group_name = options.get("remove_group_name")
export_id = request.query_params.get("export_id")
if export_id:
export = get_object_or_404(Export, id=export_id, xform=xform)
else:
if export_type == Export.GOOGLE_SHEETS_EXPORT:
return Response(
data=json.dumps(
{"details": _("Sheets export only supported in async mode")}
),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
# check if we need to re-generate,
# we always re-generate if a filter is specified
def _new_export():
return _generate_new_export(
request,
xform,
query,
export_type,
dataview_pk=dataview_pk,
metadata=metadata,
)
if should_create_new_export(xform, export_type, options, request=request):
export = _new_export()
else:
export = newest_export_for(xform, export_type, options)
if not export.filename and not export.error_message:
export = _new_export()
log_export(request, xform, export_type)
if export_type == Export.EXTERNAL_EXPORT:
return external_export_response(export)
if export.filename is None and export.error_message:
raise exceptions.ParseError(export.error_message)
# get extension from file_path, exporter could modify to
# xlsx if it exceeds limits
_path, ext = os.path.splitext(export.filename)
ext = ext[1:]
show_date = True
if filename is None and export.status == Export.SUCCESSFUL:
filename = _generate_filename(
request, xform, remove_group_name, dataview_pk=dataview_pk
)
else:
show_date = False
response = response_with_mimetype_and_name(
Export.EXPORT_MIMES[ext],
filename,
extension=ext,
show_date=show_date,
file_path=export.filepath,
)
return response
def | ( # noqa: C0901
request, xform, query, export_type, dataview_pk=False, metadata=None
):
query = _set_start_end_params(request, query)
extension = _get_extension_from_export_type(export_type)
options = {
"extension": extension,
"username": xform.user.username,
"id_string": xform.id_string,
"host": request.get_host(),
"sort": request.query_params.get('sort')
}
if query:
options["query"] = query
options["dataview_pk"] = dataview_pk
if export_type == Export.GOOGLE_SHEETS_EXPORT:
options["google_credentials"] = _get_google_credential(request).to_json()
try:
if export_type == Export.EXTERNAL_EXPORT:
options["token"] = request.GET.get("token")
options["data_id"] = request.GET.get("data_id")
options["meta"] = request.GET.get("meta")
export = generate_external_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.OSM_EXPORT:
export = generate_osm_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.ZIP_EXPORT:
export = generate_attachments_zip_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.KML_EXPORT:
export = generate_kml_export(
export_type,
xform.user.username,
xform.id_string,
None,
options,
xform=xform,
)
elif export_type == Export.GEOJSON_EXPORT:
export = generate_geojson_export(
export_type,
xform.user.username,
xform.id_string,
metadata,
None,
options,
xform=xform,
)
else:
options.update(parse_request_export_options(request.query_params))
export = generate_export(export_type, xform, None, options)
audit = {"xform": xform.id_string, "export_type": export_type}
log.audit_log(
log.Actions.EXPORT_CREATED,
request.user,
xform.user,
_("Created %(export_type)s export on '%(id_string)s'.")
% {"id_string": xform.id_string, "export_type": export_type.upper()},
audit,
request,
)
except NoRecordsFoundError as e:
raise Http404(_("No records found to export")) from e
except J2XException as e:
# j2x exception
return async_status(FAILED, str(e))
except SPSSIOError as e:
raise exceptions.ParseError(str(e)) from e
else:
return export
def log_export(request, xform, export_type):
"""
Logs audit logs of export requests.
"""
# log download as well
audit = {"xform": xform.id_string, "export_type": export_type}
log.audit_log(
log.Actions.EXPORT_DOWNLOADED,
request.user,
xform.user,
_(f"Downloaded {export_type.upper()} export on '{xform.id_string}'."),
audit,
request,
)
def external_export_response(export):
"""
Redirects to export_url of XLSReports successful export. In case of a
failure, returns a 400 HTTP JSON response with the error message.
"""
if isinstance(export, Export) and export.internal_status == Export.SUCCESSFUL:
return HttpResponseRedirect(export.export_url)
http_status = status.HTTP_400_BAD_REQUEST
return Response(json.dumps(export), http_status, content_type="application/json")
def _generate_filename(request, xform, remove_group_name=False, dataview_pk=False):
if request.GET.get("raw"):
filename = None
else:
# append group name removed flag otherwise use the form id_string
if remove_group_name:
filename = f"{xform.id_string}-{GROUPNAME_REMOVED_FLAG}"
elif dataview_pk:
filename = f"{xform.id_string}-{DATAVIEW_EXPORT}"
else:
filename = xform.id_string
return filename
def _set_start_end_params(request, query):
# check for start and end params
if "start" in request.GET or "end" in request.GET:
query = json.loads(query) if isinstance(query, six.string_types) else query
query[SUBMISSION_TIME] = {}
try:
if request.GET.get("start"):
query[SUBMISSION_TIME]["$gte"] = _format_date_for_mongo(
request.GET["start"]
)
if request.GET.get("end"):
query[SUBMISSION_TIME]["$lte"] = _format_date_for_mongo(
request.GET["end"]
)
except ValueError as e:
raise exceptions.ParseError(
_("Dates must be in the format YY_MM_DD_hh_mm_ss")
) from e
else:
query = json.dumps(query)
return query
def _get_extension_from_export_type(export_type):
extension = export_type
if export_type == Export.XLSX_EXPORT:
extension = "xlsx"
elif export_type in [Export.CSV_ZIP_EXPORT, Export.SAV_ZIP_EXPORT]:
extension = "zip"
return extension
# pylint: disable=invalid-name
def _format_date_for_mongo(datetime_str):
return datetime.strptime(datetime_str, "%y_%m_%d_%H_%M_%S").strftime(
"%Y-%m-%dT%H:%M:%S"
)
def process_async_export(request, xform, export_type, options=None):
"""
Check if should generate export or just return the latest export.
Rules for regenerating an export are:
1. Filter included on the exports.
2. New submission done.
3. Always regenerate external exports.
(External exports uses templates and the template might have
changed)
:param request:
:param xform:
:param export_type:
:param options: additional export params that may include
query: export filter
token: template url for xls external reports
meta: metadataid that contains the external xls report template url
remove_group_name: Flag to determine if group names should appear
:return: response dictionary
"""
# maintain the order of keys while processing the export
export_type = _get_export_type(export_type)
token = options.get("token")
meta = options.get("meta")
query = options.get("query")
force_xlsx = options.get("force_xlsx")
try:
query = filter_queryset_xform_meta_perms_sql(xform, request.user, query)
except NoRecordsPermission:
payload = {"details": _("You don't have permission")}
return Response(
data=json.dumps(payload),
status=status.HTTP_403_FORBIDDEN,
content_type="application/json",
)
else:
if query:
options["query"] = query
if (
export_type in EXTERNAL_EXPORT_TYPES
and (token is not None)
or (meta is not None)
):
export_type = Export.EXTERNAL_EXPORT
if export_type == Export.GOOGLE_SHEETS_EXPORT:
credential = _get_google_credential(request)
if isinstance(credential, HttpResponseRedirect):
return credential
options["google_credentials"] = credential.to_json()
if (
should_create_new_export(xform, export_type, options, request=request)
or export_type == Export.EXTERNAL_EXPORT
):
resp = {
"job_uuid": _create_export_async(
xform, export_type, query, force_xlsx, options=options
)
}
else:
print("Do not create a new export.")
export = newest_export_for(xform, export_type, options)
if not export.filename:
# tends to happen when using newest_export_for.
resp = {
"job_uuid": _create_export_async(
xform, export_type, query, force_xlsx, options=options
)
}
else:
resp = export_async_export_response(request, export)
return resp
def _create_export_async(
xform, export_type, query=None, force_xlsx=False, options=None
):
"""
Creates async exports
:param xform:
:param export_type:
:param query:
:param force_xlsx:
:param options:
:return:
job_uuid generated
"""
export = check_pending_export(xform, export_type, options)
if export:
return export.task_id
try:
export, async_result = viewer_task.create_async_export(
xform, export_type, query, force_xlsx, options=options
)
except ExportConnectionError as e:
raise ServiceUnavailable from e
return async_result.task_id
def export_async_export_response(request, export):
"""
Checks the export status and generates the reponse
:param request:
:param export:
:return: response dict example {"job_status": "Success", "export_url": ...}
"""
if export.status == Export.SUCCESSFUL:
if export.export_type not in [
Export.EXTERNAL_EXPORT,
Export.GOOGLE_SHEETS_EXPORT,
]:
export_url = reverse(
"export-detail", kwargs={"pk": export.pk}, request=request
)
else:
export_url = export.export_url
resp = async_status(SUCCESSFUL)
resp["export_url"] = export_url
elif export.status == Export.PENDING:
resp = async_status(PENDING)
else:
resp = async_status(FAILED, export.error_message)
return resp
def get_async_response(job_uuid, request, xform, count=0):
"""
Returns the status of an async task for the given job_uuid.
"""
def _get_response():
export = get_object_or_404(Export, task_id=job_uuid)
return export_async_export_response(request, export)
try:
job = AsyncResult(job_uuid)
if job.state == "SUCCESS":
resp = _get_response()
else:
resp = async_status(celery_state_to_status(job.state))
# append task result to the response
if job.result:
result = job.result
if isinstance(result, dict):
resp.update(result)
else:
resp.update({"progress": str(result)})
except (OperationalError, ConnectionError) as e:
report_exception("Connection Error", e, sys.exc_info())
if count > 0:
raise ServiceUnavailable from e
return get_async_response(job_uuid, request, xform, count + 1)
except BacklogLimitExceeded:
# most likely still processing
resp = async_status(celery_state_to_status("PENDING"))
return resp
# pylint: disable=redefined-builtin
def response_for_format(data, format=None):
"""
Return appropriately formatted data in Response().
"""
if format == "xml":
formatted_data = data.xml
elif format in ("xls", "xlsx"):
if not data.xls or not data.xls.storage.exists(data.xls.name):
raise Http404()
formatted_data = data.xls
else:
formatted_data = (
json.loads(data.json) if isinstance(data.json, str) else data.json
)
return Response(formatted_data)
def get_existing_file_format(data, format):
"""
Util function to extract the existing form extension
"""
if format in XLS_EXTENSIONS:
existing_file_format = data.name.split(".")[-1]
return existing_file_format
return format
def generate_google_web_flow(request):
"""
Returns a OAuth2WebServerFlow object from the request redirect_uri.
"""
if "redirect_uri" in request.GET:
redirect_uri = request.GET.get("redirect_uri")
elif "redirect_uri" in request.POST:
redirect_uri = request.POST.get("redirect_uri")
elif "redirect_uri" in request.query_params:
redirect_uri = request.query_params.get("redirect_uri")
elif "redirect_uri" in request.data:
redirect_uri = request.data.get("redirect_uri")
else:
redirect_uri = settings.GOOGLE_STEP2_URI
return create_flow(redirect_uri)
def _get_google_credential(request):
credential = None
storage = None
if request.user.is_authenticated:
try:
storage = TokenStorageModel.objects.get(id=request.user)
credential = storage.credential
except TokenStorageModel.DoesNotExist:
pass
elif request.session.get("access_token"):
credential = Credentials(token=request.session["access_token"])
if credential and not credential.valid:
try:
credential.refresh(Request())
storage.credential = credential
storage.save()
except RefreshError:
storage.delete()
credential = None
if not credential:
google_flow = generate_google_web_flow(request)
authorization_url, _state = google_flow.authorization_url(
access_type="offline", include_granted_scopes="true", prompt="consent"
)
return HttpResponseRedirect(authorization_url)
return credential
| _generate_new_export | identifier_name |
custom_widget.rs | //!
//!
//! A demonstration of designing a custom, third-party widget.
//!
//! In this case, we'll design a simple circular button.
//!
//! All of the custom widget design will occur within the `circular_button` module.
//!
//! We'll *use* our fancy circular button in the `main` function (below the circular_button module).
//!
//! Note that in this case, we use `piston_window` to draw our widget, however in practise you may
//! use any backend you wish.
//!
//! For more information, please see the `Widget` trait documentation.
//!
#[macro_use] extern crate conrod;
extern crate find_folder;
extern crate piston_window;
extern crate vecmath;
/// The module in which we'll implement our own custom circular button.
mod circular_button {
use conrod::{
default_x_dimension,
default_y_dimension,
CharacterCache,
Circle,
Color,
Colorable,
CommonBuilder,
Dimension,
Dimensions,
FontSize,
IndexSlot,
Labelable,
Mouse,
Point,
Positionable,
Scalar,
Text,
Theme,
UpdateArgs,
Widget,
WidgetKind,
Ui,
};
/// The type upon which we'll implement the `Widget` trait.
pub struct CircularButton<'a, F> {
/// An object that handles some of the dirty work of rendering a GUI. We don't
/// really have to worry about it.
common: CommonBuilder,
/// Optional label string for the button.
maybe_label: Option<&'a str>,
/// Optional callback for when the button is pressed. If you want the button to
/// do anything, this callback must exist.
maybe_react: Option<F>,
/// See the Style struct below.
style: Style,
/// Whether the button is currently enabled, i.e. whether it responds to
/// user input.
enabled: bool
}
/// Represents the unique styling for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct Style {
/// Color of the button.
pub maybe_color: Option<Color>,
/// Radius of the button.
pub maybe_radius: Option<Scalar>,
/// Color of the button's label.
pub maybe_label_color: Option<Color>,
/// Font size of the button's label.
pub maybe_label_font_size: Option<u32>,
}
/// Represents the unique, cached state for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct State {
/// The current interaction state. See the Interaction enum below. See also
/// get_new_interaction below, where we define all the logic for transitioning between
/// interaction states.
interaction: Interaction,
/// An index to use for our **Circle** primitive graphics widget.
circle_idx: IndexSlot,
/// An index to use for our **Text** primitive graphics widget (for the label).
text_idx: IndexSlot,
}
/// A `&'static str` that can be used to uniquely identify our widget type.
pub const KIND: WidgetKind = "CircularButton";
/// A type to keep track of interaction between updates.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Interaction {
Normal,
Highlighted,
Clicked,
}
impl Interaction {
/// Alter the widget color depending on the current interaction.
fn color(&self, color: Color) -> Color {
match *self {
/// The base color as defined in the Style struct, or a default provided
/// by the current Theme if the Style has no color.
Interaction::Normal => color,
/// The Color object (from Elmesque) can calculate a highlighted version
/// of itself. We don't have to use it, though. We could specify any color
/// we want.
Interaction::Highlighted => color.highlighted(),
/// Ditto for clicked.
Interaction::Clicked => color.clicked(),
}
}
}
/// Check the current interaction with the button. Takes into account whether the mouse is
/// over the button and the previous interaction state.
fn get_new_interaction(is_over: bool, prev: Interaction, mouse: Mouse) -> Interaction {
use conrod::MouseButtonPosition::{Down, Up};
use self::Interaction::{Normal, Highlighted, Clicked};
match (is_over, prev, mouse.left.position) {
// LMB is down over the button. But the button wasn't Highlighted last
// update. This means the user clicked somewhere outside the button and
// moved over the button holding LMB down. We do nothing in this case.
(true, Normal, Down) => Normal,
// LMB is down over the button. The button was either Highlighted or Clicked
// last update. If it was highlighted before, that means the user clicked
// just now, and we transition to the Clicked state. If it was clicked
// before, that means the user is still holding LMB down from a previous
// click, in which case the state remains Clicked.
(true, _, Down) => Clicked,
// LMB is up. The mouse is hovering over the button. Regardless of what the
// state was last update, the state should definitely be Highlighted now.
(true, _, Up) => Highlighted,
// LMB is down, the mouse is not over the button, but the previous state was
// Clicked. That means the user clicked the button and then moved the mouse
// outside the button while holding LMB down. The button stays Clicked.
(false, Clicked, Down) => Clicked,
// If none of the above applies, then nothing interesting is happening with
// this button.
_ => Normal,
}
}
/// Return whether or not a given point is over a circle at a given point on a
/// Cartesian plane. We use this to determine whether the mouse is over the button.
pub fn is_over_circ(circ_center: Point, mouse_point: Point, dim: Dimensions) -> bool {
// Offset vector from the center of the circle to the mouse.
let offset = ::vecmath::vec2_sub(mouse_point, circ_center);
// If the length of the offset vector is less than or equal to the circle's
// radius, then the mouse is inside the circle. We assume that dim is a square
// bounding box around the circle, thus 2 * radius == dim[0] == dim[1].
::vecmath::vec2_len(offset) <= dim[0] / 2.0
}
impl<'a, F> CircularButton<'a, F> {
/// Create a button context to be built upon.
pub fn new() -> CircularButton<'a, F> {
CircularButton {
common: CommonBuilder::new(),
maybe_react: None,
maybe_label: None,
style: Style::new(),
enabled: true,
}
}
/// Set the reaction for the Button. The reaction will be triggered upon release
/// of the button. Like other Conrod configs, this returns self for chainability.
pub fn react(mut self, reaction: F) -> Self {
self.maybe_react = Some(reaction);
self
}
/// If true, will allow user inputs. If false, will disallow user inputs. Like
/// other Conrod configs, this returns self for chainability. Allow dead code
/// because we never call this in the example.
#[allow(dead_code)]
pub fn enabled(mut self, flag: bool) -> Self {
self.enabled = flag;
self
}
}
/// A custom Conrod widget must implement the Widget trait. See the **Widget** trait
/// documentation for more details.
impl<'a, F> Widget for CircularButton<'a, F>
where F: FnMut()
{
/// The State struct that we defined above.
type State = State;
/// The Style struct that we defined above.
type Style = Style;
fn common(&self) -> &CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut CommonBuilder {
&mut self.common
}
fn unique_kind(&self) -> &'static str {
KIND
}
fn init_state(&self) -> State {
State {
interaction: Interaction::Normal,
circle_idx: IndexSlot::new(),
text_idx: IndexSlot::new(),
}
}
fn style(&self) -> Style {
self.style.clone()
}
/// Default width of the widget.
///
/// This method is optional.
/// | //
// Defaults can come from several places. Here, we define how certain defaults take
// precedence over others.
//
// Most commonly, defaults are to be retrieved from the `Theme`, however in some cases
// some other logic may need to be considered.
default_x_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Default height of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_y_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
default_y_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Update the state of the button. The state may or may not have changed since
/// the last update. (E.g. it may have changed because the user moused over the
/// button.) If the state has changed, return the new state. Else, return None.
fn update<C: CharacterCache>(mut self, args: UpdateArgs<Self, C>) {
let UpdateArgs { idx, state, rect, mut ui, style, .. } = args;
let (xy, dim) = rect.xy_dim();
let maybe_mouse = ui.input().maybe_mouse.map(|mouse| mouse.relative_to(xy));
// Check whether or not a new interaction has occurred.
let new_interaction = match (self.enabled, maybe_mouse) {
(false, _) | (true, None) => Interaction::Normal,
(true, Some(mouse)) => {
// Conrod does us a favor by transforming mouse.xy into this widget's
// local coordinate system. Because mouse.xy is in local coords,
// we must also pass the circle center in local coords. Thus we pass
// [0.0, 0.0] as the center.
//
// See above where we define is_over_circ.
let is_over = is_over_circ([0.0, 0.0], mouse.xy, dim);
// See above where we define get_new_interaction.
get_new_interaction(is_over, state.view().interaction, mouse)
},
};
// If the mouse was released over the button, react. state.interaction is the
// button's state as of a moment ago. new_interaction is the updated state as
// of right now. So this if statement is saying: If the button was clicked a
// moment ago, and it's now highlighted, then the button has been activated.
if let (Interaction::Clicked, Interaction::Highlighted) =
(state.view().interaction, new_interaction)
{
// Recall that our CircularButton struct includes maybe_react, which
// stores either a reaction function or None. If maybe_react is Some, call
// the function.
if let Some(ref mut react) = self.maybe_react {
react();
}
}
// Here we check to see whether or not our button should capture the mouse.
//
// Widgets can "capture" user input. If the button captures the mouse, then mouse
// events will only be seen by the button. Other widgets will not see mouse events
// until the button uncaptures the mouse.
match (state.view().interaction, new_interaction) {
// If the user has pressed the button we capture the mouse.
(Interaction::Highlighted, Interaction::Clicked) => {
ui.capture_mouse();
},
// If the user releases the button, we uncapture the mouse.
(Interaction::Clicked, Interaction::Highlighted) |
(Interaction::Clicked, Interaction::Normal) => {
ui.uncapture_mouse();
},
_ => (),
}
// Whenever we call `state.update` (as below), a flag is set within our `State`
// indicating that there has been some mutation and that our widget requires a
// re-draw. Thus, we only want to call `state.update` if there has been some change in
// order to only re-draw when absolutely required.
//
// You can see how we do this below - we check if the state has changed before calling
// `state.update`.
// If the interaction has changed, set the new interaction.
if state.view().interaction != new_interaction {
state.update(|state| state.interaction = new_interaction);
}
// Finally, we'll describe how we want our widget drawn by simply instantiating the
// necessary primitive graphics widgets.
//
// Conrod will automatically determine whether or not any changes have occurred and
// whether or not any widgets need to be re-drawn.
//
// The primitive graphics widgets are special in that their unique state is used within
// conrod's backend to do the actual drawing. This allows us to build up more complex
// widgets by using these simple primitives with our familiar layout, coloring, etc
// methods.
//
// If you notice that conrod is missing some sort of primitive graphics that you
// require, please file an issue or open a PR so we can add it! :)
// First, we'll draw the **Circle** with a radius that is half our given width.
let radius = rect.w() / 2.0;
let color = new_interaction.color(style.color(ui.theme()));
let circle_idx = state.view().circle_idx.get(&mut ui);
Circle::fill(radius)
.middle_of(idx)
.graphics_for(idx)
.color(color)
.set(circle_idx, &mut ui);
// Now we'll instantiate our label using the **Text** widget.
let label_color = style.label_color(ui.theme());
let font_size = style.label_font_size(ui.theme());
let text_idx = state.view().text_idx.get(&mut ui);
if let Some(ref label) = self.maybe_label {
Text::new(label)
.middle_of(idx)
.font_size(font_size)
.graphics_for(idx)
.color(label_color)
.set(text_idx, &mut ui);
}
}
}
impl Style {
/// Construct the default Style.
pub fn new() -> Style {
Style {
maybe_color: None,
maybe_radius: None,
maybe_label_color: None,
maybe_label_font_size: None,
}
}
/// Get the Color for an Element.
pub fn color(&self, theme: &Theme) -> Color {
self.maybe_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_color.unwrap_or(theme.shape_color)
})).unwrap_or(theme.shape_color)
}
/// Get the label Color for an Element.
pub fn label_color(&self, theme: &Theme) -> Color {
self.maybe_label_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_color.unwrap_or(theme.label_color)
})).unwrap_or(theme.label_color)
}
/// Get the label font size for an Element.
pub fn label_font_size(&self, theme: &Theme) -> FontSize {
self.maybe_label_font_size.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_font_size.unwrap_or(theme.font_size_medium)
})).unwrap_or(theme.font_size_medium)
}
}
/// Provide the chainable color() configuration method.
impl<'a, F> Colorable for CircularButton<'a, F> {
fn color(mut self, color: Color) -> Self {
self.style.maybe_color = Some(color);
self
}
}
/// Provide the chainable label(), label_color(), and label_font_size()
/// configuration methods.
impl<'a, F> Labelable<'a> for CircularButton<'a, F> {
fn label(mut self, text: &'a str) -> Self {
self.maybe_label = Some(text);
self
}
fn label_color(mut self, color: Color) -> Self {
self.style.maybe_label_color = Some(color);
self
}
fn label_font_size(mut self, size: FontSize) -> Self {
self.style.maybe_label_font_size = Some(size);
self
}
}
}
fn main() {
use piston_window::{EventLoop, Glyphs, PistonWindow, OpenGL, UpdateEvent, WindowSettings};
use conrod::{Colorable, Labelable, Positionable, Sizeable, Widget};
use circular_button::CircularButton;
// PistonWindow has two type parameters, but the default type is
// PistonWindow<T = (), W: Window = GlutinWindow>. To change the Piston backend,
// specify a different type in the let binding, e.g.
// let window: PistonWindow<(), Sdl2Window>.
let window: PistonWindow = WindowSettings::new("Control Panel", [1200, 800])
.opengl(OpenGL::V3_2)
.exit_on_esc(true)
.build().unwrap();
// Conrod's main object.
let mut ui = {
// Load a font. `Glyphs` is provided to us via piston_window and gfx, though you may use
// any type that implements `CharacterCache`.
let assets = find_folder::Search::ParentsThenKids(3, 3)
.for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
let glyph_cache = Glyphs::new(&font_path, window.factory.borrow().clone()).unwrap();
conrod::Ui::new(glyph_cache, conrod::Theme::default())
};
for e in window.ups(60) {
// Pass each `Event` to the `Ui`.
ui.handle_event(e.event.as_ref().unwrap());
e.update(|_| ui.set_widgets(|ui| {
// Sets a color to clear the background with before the Ui draws our widget.
conrod::Split::new(BACKGROUND).color(conrod::color::dark_red()).set(ui);
// Create an instance of our custom widget.
CircularButton::new()
.color(conrod::color::rgb(0.0, 0.3, 0.1))
.middle_of(BACKGROUND)
.dimensions(256.0, 256.0)
.label_color(conrod::color::white())
.label("Circular Button")
// This is called when the user clicks the button.
.react(|| println!("Click"))
// Add the widget to the conrod::Ui. This schedules the widget it to be
// drawn when we call Ui::draw.
.set(CIRCLE_BUTTON, ui);
}));
// Draws the whole Ui (in this case, just our widget) whenever a change occurs.
e.draw_2d(|c, g| ui.draw_if_changed(c, g))
}
}
// The `widget_ids` macro is a easy, safe way of generating unique `WidgetId`s.
widget_ids! {
// An ID for the background widget, upon which we'll place our custom button.
BACKGROUND,
// The WidgetId we'll use to plug our widget into the `Ui`.
CIRCLE_BUTTON,
} | /// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_x_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
// If no width was given via the `Sizeable` (a trait implemented for all widgets)
// methods, some default width must be chosen. | random_line_split |
custom_widget.rs | //!
//!
//! A demonstration of designing a custom, third-party widget.
//!
//! In this case, we'll design a simple circular button.
//!
//! All of the custom widget design will occur within the `circular_button` module.
//!
//! We'll *use* our fancy circular button in the `main` function (below the circular_button module).
//!
//! Note that in this case, we use `piston_window` to draw our widget, however in practise you may
//! use any backend you wish.
//!
//! For more information, please see the `Widget` trait documentation.
//!
#[macro_use] extern crate conrod;
extern crate find_folder;
extern crate piston_window;
extern crate vecmath;
/// The module in which we'll implement our own custom circular button.
mod circular_button {
use conrod::{
default_x_dimension,
default_y_dimension,
CharacterCache,
Circle,
Color,
Colorable,
CommonBuilder,
Dimension,
Dimensions,
FontSize,
IndexSlot,
Labelable,
Mouse,
Point,
Positionable,
Scalar,
Text,
Theme,
UpdateArgs,
Widget,
WidgetKind,
Ui,
};
/// The type upon which we'll implement the `Widget` trait.
pub struct CircularButton<'a, F> {
/// An object that handles some of the dirty work of rendering a GUI. We don't
/// really have to worry about it.
common: CommonBuilder,
/// Optional label string for the button.
maybe_label: Option<&'a str>,
/// Optional callback for when the button is pressed. If you want the button to
/// do anything, this callback must exist.
maybe_react: Option<F>,
/// See the Style struct below.
style: Style,
/// Whether the button is currently enabled, i.e. whether it responds to
/// user input.
enabled: bool
}
/// Represents the unique styling for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct Style {
/// Color of the button.
pub maybe_color: Option<Color>,
/// Radius of the button.
pub maybe_radius: Option<Scalar>,
/// Color of the button's label.
pub maybe_label_color: Option<Color>,
/// Font size of the button's label.
pub maybe_label_font_size: Option<u32>,
}
/// Represents the unique, cached state for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct State {
/// The current interaction state. See the Interaction enum below. See also
/// get_new_interaction below, where we define all the logic for transitioning between
/// interaction states.
interaction: Interaction,
/// An index to use for our **Circle** primitive graphics widget.
circle_idx: IndexSlot,
/// An index to use for our **Text** primitive graphics widget (for the label).
text_idx: IndexSlot,
}
/// A `&'static str` that can be used to uniquely identify our widget type.
pub const KIND: WidgetKind = "CircularButton";
/// A type to keep track of interaction between updates.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Interaction {
Normal,
Highlighted,
Clicked,
}
impl Interaction {
/// Alter the widget color depending on the current interaction.
fn color(&self, color: Color) -> Color {
match *self {
/// The base color as defined in the Style struct, or a default provided
/// by the current Theme if the Style has no color.
Interaction::Normal => color,
/// The Color object (from Elmesque) can calculate a highlighted version
/// of itself. We don't have to use it, though. We could specify any color
/// we want.
Interaction::Highlighted => color.highlighted(),
/// Ditto for clicked.
Interaction::Clicked => color.clicked(),
}
}
}
/// Check the current interaction with the button. Takes into account whether the mouse is
/// over the button and the previous interaction state.
fn get_new_interaction(is_over: bool, prev: Interaction, mouse: Mouse) -> Interaction {
use conrod::MouseButtonPosition::{Down, Up};
use self::Interaction::{Normal, Highlighted, Clicked};
match (is_over, prev, mouse.left.position) {
// LMB is down over the button. But the button wasn't Highlighted last
// update. This means the user clicked somewhere outside the button and
// moved over the button holding LMB down. We do nothing in this case.
(true, Normal, Down) => Normal,
// LMB is down over the button. The button was either Highlighted or Clicked
// last update. If it was highlighted before, that means the user clicked
// just now, and we transition to the Clicked state. If it was clicked
// before, that means the user is still holding LMB down from a previous
// click, in which case the state remains Clicked.
(true, _, Down) => Clicked,
// LMB is up. The mouse is hovering over the button. Regardless of what the
// state was last update, the state should definitely be Highlighted now.
(true, _, Up) => Highlighted,
// LMB is down, the mouse is not over the button, but the previous state was
// Clicked. That means the user clicked the button and then moved the mouse
// outside the button while holding LMB down. The button stays Clicked.
(false, Clicked, Down) => Clicked,
// If none of the above applies, then nothing interesting is happening with
// this button.
_ => Normal,
}
}
/// Return whether or not a given point is over a circle at a given point on a
/// Cartesian plane. We use this to determine whether the mouse is over the button.
pub fn is_over_circ(circ_center: Point, mouse_point: Point, dim: Dimensions) -> bool {
// Offset vector from the center of the circle to the mouse.
let offset = ::vecmath::vec2_sub(mouse_point, circ_center);
// If the length of the offset vector is less than or equal to the circle's
// radius, then the mouse is inside the circle. We assume that dim is a square
// bounding box around the circle, thus 2 * radius == dim[0] == dim[1].
::vecmath::vec2_len(offset) <= dim[0] / 2.0
}
impl<'a, F> CircularButton<'a, F> {
/// Create a button context to be built upon.
pub fn new() -> CircularButton<'a, F> {
CircularButton {
common: CommonBuilder::new(),
maybe_react: None,
maybe_label: None,
style: Style::new(),
enabled: true,
}
}
/// Set the reaction for the Button. The reaction will be triggered upon release
/// of the button. Like other Conrod configs, this returns self for chainability.
pub fn react(mut self, reaction: F) -> Self {
self.maybe_react = Some(reaction);
self
}
/// If true, will allow user inputs. If false, will disallow user inputs. Like
/// other Conrod configs, this returns self for chainability. Allow dead code
/// because we never call this in the example.
#[allow(dead_code)]
pub fn enabled(mut self, flag: bool) -> Self {
self.enabled = flag;
self
}
}
/// A custom Conrod widget must implement the Widget trait. See the **Widget** trait
/// documentation for more details.
impl<'a, F> Widget for CircularButton<'a, F>
where F: FnMut()
{
/// The State struct that we defined above.
type State = State;
/// The Style struct that we defined above.
type Style = Style;
fn common(&self) -> &CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut CommonBuilder {
&mut self.common
}
fn unique_kind(&self) -> &'static str {
KIND
}
fn init_state(&self) -> State {
State {
interaction: Interaction::Normal,
circle_idx: IndexSlot::new(),
text_idx: IndexSlot::new(),
}
}
fn style(&self) -> Style {
self.style.clone()
}
/// Default width of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_x_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
// If no width was given via the `Sizeable` (a trait implemented for all widgets)
// methods, some default width must be chosen.
//
// Defaults can come from several places. Here, we define how certain defaults take
// precedence over others.
//
// Most commonly, defaults are to be retrieved from the `Theme`, however in some cases
// some other logic may need to be considered.
default_x_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Default height of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_y_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
default_y_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Update the state of the button. The state may or may not have changed since
/// the last update. (E.g. it may have changed because the user moused over the
/// button.) If the state has changed, return the new state. Else, return None.
fn update<C: CharacterCache>(mut self, args: UpdateArgs<Self, C>) {
let UpdateArgs { idx, state, rect, mut ui, style, .. } = args;
let (xy, dim) = rect.xy_dim();
let maybe_mouse = ui.input().maybe_mouse.map(|mouse| mouse.relative_to(xy));
// Check whether or not a new interaction has occurred.
let new_interaction = match (self.enabled, maybe_mouse) {
(false, _) | (true, None) => Interaction::Normal,
(true, Some(mouse)) => {
// Conrod does us a favor by transforming mouse.xy into this widget's
// local coordinate system. Because mouse.xy is in local coords,
// we must also pass the circle center in local coords. Thus we pass
// [0.0, 0.0] as the center.
//
// See above where we define is_over_circ.
let is_over = is_over_circ([0.0, 0.0], mouse.xy, dim);
// See above where we define get_new_interaction.
get_new_interaction(is_over, state.view().interaction, mouse)
},
};
// If the mouse was released over the button, react. state.interaction is the
// button's state as of a moment ago. new_interaction is the updated state as
// of right now. So this if statement is saying: If the button was clicked a
// moment ago, and it's now highlighted, then the button has been activated.
if let (Interaction::Clicked, Interaction::Highlighted) =
(state.view().interaction, new_interaction)
{
// Recall that our CircularButton struct includes maybe_react, which
// stores either a reaction function or None. If maybe_react is Some, call
// the function.
if let Some(ref mut react) = self.maybe_react |
}
// Here we check to see whether or not our button should capture the mouse.
//
// Widgets can "capture" user input. If the button captures the mouse, then mouse
// events will only be seen by the button. Other widgets will not see mouse events
// until the button uncaptures the mouse.
match (state.view().interaction, new_interaction) {
// If the user has pressed the button we capture the mouse.
(Interaction::Highlighted, Interaction::Clicked) => {
ui.capture_mouse();
},
// If the user releases the button, we uncapture the mouse.
(Interaction::Clicked, Interaction::Highlighted) |
(Interaction::Clicked, Interaction::Normal) => {
ui.uncapture_mouse();
},
_ => (),
}
// Whenever we call `state.update` (as below), a flag is set within our `State`
// indicating that there has been some mutation and that our widget requires a
// re-draw. Thus, we only want to call `state.update` if there has been some change in
// order to only re-draw when absolutely required.
//
// You can see how we do this below - we check if the state has changed before calling
// `state.update`.
// If the interaction has changed, set the new interaction.
if state.view().interaction != new_interaction {
state.update(|state| state.interaction = new_interaction);
}
// Finally, we'll describe how we want our widget drawn by simply instantiating the
// necessary primitive graphics widgets.
//
// Conrod will automatically determine whether or not any changes have occurred and
// whether or not any widgets need to be re-drawn.
//
// The primitive graphics widgets are special in that their unique state is used within
// conrod's backend to do the actual drawing. This allows us to build up more complex
// widgets by using these simple primitives with our familiar layout, coloring, etc
// methods.
//
// If you notice that conrod is missing some sort of primitive graphics that you
// require, please file an issue or open a PR so we can add it! :)
// First, we'll draw the **Circle** with a radius that is half our given width.
let radius = rect.w() / 2.0;
let color = new_interaction.color(style.color(ui.theme()));
let circle_idx = state.view().circle_idx.get(&mut ui);
Circle::fill(radius)
.middle_of(idx)
.graphics_for(idx)
.color(color)
.set(circle_idx, &mut ui);
// Now we'll instantiate our label using the **Text** widget.
let label_color = style.label_color(ui.theme());
let font_size = style.label_font_size(ui.theme());
let text_idx = state.view().text_idx.get(&mut ui);
if let Some(ref label) = self.maybe_label {
Text::new(label)
.middle_of(idx)
.font_size(font_size)
.graphics_for(idx)
.color(label_color)
.set(text_idx, &mut ui);
}
}
}
impl Style {
/// Construct the default Style.
pub fn new() -> Style {
Style {
maybe_color: None,
maybe_radius: None,
maybe_label_color: None,
maybe_label_font_size: None,
}
}
/// Get the Color for an Element.
pub fn color(&self, theme: &Theme) -> Color {
self.maybe_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_color.unwrap_or(theme.shape_color)
})).unwrap_or(theme.shape_color)
}
/// Get the label Color for an Element.
pub fn label_color(&self, theme: &Theme) -> Color {
self.maybe_label_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_color.unwrap_or(theme.label_color)
})).unwrap_or(theme.label_color)
}
/// Get the label font size for an Element.
pub fn label_font_size(&self, theme: &Theme) -> FontSize {
self.maybe_label_font_size.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_font_size.unwrap_or(theme.font_size_medium)
})).unwrap_or(theme.font_size_medium)
}
}
/// Provide the chainable color() configuration method.
impl<'a, F> Colorable for CircularButton<'a, F> {
fn color(mut self, color: Color) -> Self {
self.style.maybe_color = Some(color);
self
}
}
/// Provide the chainable label(), label_color(), and label_font_size()
/// configuration methods.
impl<'a, F> Labelable<'a> for CircularButton<'a, F> {
fn label(mut self, text: &'a str) -> Self {
self.maybe_label = Some(text);
self
}
fn label_color(mut self, color: Color) -> Self {
self.style.maybe_label_color = Some(color);
self
}
fn label_font_size(mut self, size: FontSize) -> Self {
self.style.maybe_label_font_size = Some(size);
self
}
}
}
fn main() {
use piston_window::{EventLoop, Glyphs, PistonWindow, OpenGL, UpdateEvent, WindowSettings};
use conrod::{Colorable, Labelable, Positionable, Sizeable, Widget};
use circular_button::CircularButton;
// PistonWindow has two type parameters, but the default type is
// PistonWindow<T = (), W: Window = GlutinWindow>. To change the Piston backend,
// specify a different type in the let binding, e.g.
// let window: PistonWindow<(), Sdl2Window>.
let window: PistonWindow = WindowSettings::new("Control Panel", [1200, 800])
.opengl(OpenGL::V3_2)
.exit_on_esc(true)
.build().unwrap();
// Conrod's main object.
let mut ui = {
// Load a font. `Glyphs` is provided to us via piston_window and gfx, though you may use
// any type that implements `CharacterCache`.
let assets = find_folder::Search::ParentsThenKids(3, 3)
.for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
let glyph_cache = Glyphs::new(&font_path, window.factory.borrow().clone()).unwrap();
conrod::Ui::new(glyph_cache, conrod::Theme::default())
};
for e in window.ups(60) {
// Pass each `Event` to the `Ui`.
ui.handle_event(e.event.as_ref().unwrap());
e.update(|_| ui.set_widgets(|ui| {
// Sets a color to clear the background with before the Ui draws our widget.
conrod::Split::new(BACKGROUND).color(conrod::color::dark_red()).set(ui);
// Create an instance of our custom widget.
CircularButton::new()
.color(conrod::color::rgb(0.0, 0.3, 0.1))
.middle_of(BACKGROUND)
.dimensions(256.0, 256.0)
.label_color(conrod::color::white())
.label("Circular Button")
// This is called when the user clicks the button.
.react(|| println!("Click"))
// Add the widget to the conrod::Ui. This schedules the widget it to be
// drawn when we call Ui::draw.
.set(CIRCLE_BUTTON, ui);
}));
// Draws the whole Ui (in this case, just our widget) whenever a change occurs.
e.draw_2d(|c, g| ui.draw_if_changed(c, g))
}
}
// The `widget_ids` macro is a easy, safe way of generating unique `WidgetId`s.
widget_ids! {
// An ID for the background widget, upon which we'll place our custom button.
BACKGROUND,
// The WidgetId we'll use to plug our widget into the `Ui`.
CIRCLE_BUTTON,
}
| {
react();
} | conditional_block |
custom_widget.rs | //!
//!
//! A demonstration of designing a custom, third-party widget.
//!
//! In this case, we'll design a simple circular button.
//!
//! All of the custom widget design will occur within the `circular_button` module.
//!
//! We'll *use* our fancy circular button in the `main` function (below the circular_button module).
//!
//! Note that in this case, we use `piston_window` to draw our widget, however in practise you may
//! use any backend you wish.
//!
//! For more information, please see the `Widget` trait documentation.
//!
#[macro_use] extern crate conrod;
extern crate find_folder;
extern crate piston_window;
extern crate vecmath;
/// The module in which we'll implement our own custom circular button.
mod circular_button {
use conrod::{
default_x_dimension,
default_y_dimension,
CharacterCache,
Circle,
Color,
Colorable,
CommonBuilder,
Dimension,
Dimensions,
FontSize,
IndexSlot,
Labelable,
Mouse,
Point,
Positionable,
Scalar,
Text,
Theme,
UpdateArgs,
Widget,
WidgetKind,
Ui,
};
/// The type upon which we'll implement the `Widget` trait.
pub struct CircularButton<'a, F> {
/// An object that handles some of the dirty work of rendering a GUI. We don't
/// really have to worry about it.
common: CommonBuilder,
/// Optional label string for the button.
maybe_label: Option<&'a str>,
/// Optional callback for when the button is pressed. If you want the button to
/// do anything, this callback must exist.
maybe_react: Option<F>,
/// See the Style struct below.
style: Style,
/// Whether the button is currently enabled, i.e. whether it responds to
/// user input.
enabled: bool
}
/// Represents the unique styling for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct Style {
/// Color of the button.
pub maybe_color: Option<Color>,
/// Radius of the button.
pub maybe_radius: Option<Scalar>,
/// Color of the button's label.
pub maybe_label_color: Option<Color>,
/// Font size of the button's label.
pub maybe_label_font_size: Option<u32>,
}
/// Represents the unique, cached state for our CircularButton widget.
#[derive(Clone, Debug, PartialEq)]
pub struct State {
/// The current interaction state. See the Interaction enum below. See also
/// get_new_interaction below, where we define all the logic for transitioning between
/// interaction states.
interaction: Interaction,
/// An index to use for our **Circle** primitive graphics widget.
circle_idx: IndexSlot,
/// An index to use for our **Text** primitive graphics widget (for the label).
text_idx: IndexSlot,
}
/// A `&'static str` that can be used to uniquely identify our widget type.
pub const KIND: WidgetKind = "CircularButton";
/// A type to keep track of interaction between updates.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Interaction {
Normal,
Highlighted,
Clicked,
}
impl Interaction {
/// Alter the widget color depending on the current interaction.
fn color(&self, color: Color) -> Color {
match *self {
/// The base color as defined in the Style struct, or a default provided
/// by the current Theme if the Style has no color.
Interaction::Normal => color,
/// The Color object (from Elmesque) can calculate a highlighted version
/// of itself. We don't have to use it, though. We could specify any color
/// we want.
Interaction::Highlighted => color.highlighted(),
/// Ditto for clicked.
Interaction::Clicked => color.clicked(),
}
}
}
/// Check the current interaction with the button. Takes into account whether the mouse is
/// over the button and the previous interaction state.
fn get_new_interaction(is_over: bool, prev: Interaction, mouse: Mouse) -> Interaction {
use conrod::MouseButtonPosition::{Down, Up};
use self::Interaction::{Normal, Highlighted, Clicked};
match (is_over, prev, mouse.left.position) {
// LMB is down over the button. But the button wasn't Highlighted last
// update. This means the user clicked somewhere outside the button and
// moved over the button holding LMB down. We do nothing in this case.
(true, Normal, Down) => Normal,
// LMB is down over the button. The button was either Highlighted or Clicked
// last update. If it was highlighted before, that means the user clicked
// just now, and we transition to the Clicked state. If it was clicked
// before, that means the user is still holding LMB down from a previous
// click, in which case the state remains Clicked.
(true, _, Down) => Clicked,
// LMB is up. The mouse is hovering over the button. Regardless of what the
// state was last update, the state should definitely be Highlighted now.
(true, _, Up) => Highlighted,
// LMB is down, the mouse is not over the button, but the previous state was
// Clicked. That means the user clicked the button and then moved the mouse
// outside the button while holding LMB down. The button stays Clicked.
(false, Clicked, Down) => Clicked,
// If none of the above applies, then nothing interesting is happening with
// this button.
_ => Normal,
}
}
/// Return whether or not a given point is over a circle at a given point on a
/// Cartesian plane. We use this to determine whether the mouse is over the button.
pub fn is_over_circ(circ_center: Point, mouse_point: Point, dim: Dimensions) -> bool {
// Offset vector from the center of the circle to the mouse.
let offset = ::vecmath::vec2_sub(mouse_point, circ_center);
// If the length of the offset vector is less than or equal to the circle's
// radius, then the mouse is inside the circle. We assume that dim is a square
// bounding box around the circle, thus 2 * radius == dim[0] == dim[1].
::vecmath::vec2_len(offset) <= dim[0] / 2.0
}
impl<'a, F> CircularButton<'a, F> {
/// Create a button context to be built upon.
pub fn new() -> CircularButton<'a, F> {
CircularButton {
common: CommonBuilder::new(),
maybe_react: None,
maybe_label: None,
style: Style::new(),
enabled: true,
}
}
/// Set the reaction for the Button. The reaction will be triggered upon release
/// of the button. Like other Conrod configs, this returns self for chainability.
pub fn react(mut self, reaction: F) -> Self {
self.maybe_react = Some(reaction);
self
}
/// If true, will allow user inputs. If false, will disallow user inputs. Like
/// other Conrod configs, this returns self for chainability. Allow dead code
/// because we never call this in the example.
#[allow(dead_code)]
pub fn enabled(mut self, flag: bool) -> Self {
self.enabled = flag;
self
}
}
/// A custom Conrod widget must implement the Widget trait. See the **Widget** trait
/// documentation for more details.
impl<'a, F> Widget for CircularButton<'a, F>
where F: FnMut()
{
/// The State struct that we defined above.
type State = State;
/// The Style struct that we defined above.
type Style = Style;
fn | (&self) -> &CommonBuilder {
&self.common
}
fn common_mut(&mut self) -> &mut CommonBuilder {
&mut self.common
}
fn unique_kind(&self) -> &'static str {
KIND
}
fn init_state(&self) -> State {
State {
interaction: Interaction::Normal,
circle_idx: IndexSlot::new(),
text_idx: IndexSlot::new(),
}
}
fn style(&self) -> Style {
self.style.clone()
}
/// Default width of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_x_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
// If no width was given via the `Sizeable` (a trait implemented for all widgets)
// methods, some default width must be chosen.
//
// Defaults can come from several places. Here, we define how certain defaults take
// precedence over others.
//
// Most commonly, defaults are to be retrieved from the `Theme`, however in some cases
// some other logic may need to be considered.
default_x_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Default height of the widget.
///
/// This method is optional.
///
/// The default implementation is the same as below, but unwraps to an absolute scalar of
/// `0.0` instead of `64.0`.
fn default_y_dimension<C: CharacterCache>(&self, ui: &Ui<C>) -> Dimension {
default_y_dimension(self, ui).unwrap_or(Dimension::Absolute(64.0))
}
/// Update the state of the button. The state may or may not have changed since
/// the last update. (E.g. it may have changed because the user moused over the
/// button.) If the state has changed, return the new state. Else, return None.
fn update<C: CharacterCache>(mut self, args: UpdateArgs<Self, C>) {
let UpdateArgs { idx, state, rect, mut ui, style, .. } = args;
let (xy, dim) = rect.xy_dim();
let maybe_mouse = ui.input().maybe_mouse.map(|mouse| mouse.relative_to(xy));
// Check whether or not a new interaction has occurred.
let new_interaction = match (self.enabled, maybe_mouse) {
(false, _) | (true, None) => Interaction::Normal,
(true, Some(mouse)) => {
// Conrod does us a favor by transforming mouse.xy into this widget's
// local coordinate system. Because mouse.xy is in local coords,
// we must also pass the circle center in local coords. Thus we pass
// [0.0, 0.0] as the center.
//
// See above where we define is_over_circ.
let is_over = is_over_circ([0.0, 0.0], mouse.xy, dim);
// See above where we define get_new_interaction.
get_new_interaction(is_over, state.view().interaction, mouse)
},
};
// If the mouse was released over the button, react. state.interaction is the
// button's state as of a moment ago. new_interaction is the updated state as
// of right now. So this if statement is saying: If the button was clicked a
// moment ago, and it's now highlighted, then the button has been activated.
if let (Interaction::Clicked, Interaction::Highlighted) =
(state.view().interaction, new_interaction)
{
// Recall that our CircularButton struct includes maybe_react, which
// stores either a reaction function or None. If maybe_react is Some, call
// the function.
if let Some(ref mut react) = self.maybe_react {
react();
}
}
// Here we check to see whether or not our button should capture the mouse.
//
// Widgets can "capture" user input. If the button captures the mouse, then mouse
// events will only be seen by the button. Other widgets will not see mouse events
// until the button uncaptures the mouse.
match (state.view().interaction, new_interaction) {
// If the user has pressed the button we capture the mouse.
(Interaction::Highlighted, Interaction::Clicked) => {
ui.capture_mouse();
},
// If the user releases the button, we uncapture the mouse.
(Interaction::Clicked, Interaction::Highlighted) |
(Interaction::Clicked, Interaction::Normal) => {
ui.uncapture_mouse();
},
_ => (),
}
// Whenever we call `state.update` (as below), a flag is set within our `State`
// indicating that there has been some mutation and that our widget requires a
// re-draw. Thus, we only want to call `state.update` if there has been some change in
// order to only re-draw when absolutely required.
//
// You can see how we do this below - we check if the state has changed before calling
// `state.update`.
// If the interaction has changed, set the new interaction.
if state.view().interaction != new_interaction {
state.update(|state| state.interaction = new_interaction);
}
// Finally, we'll describe how we want our widget drawn by simply instantiating the
// necessary primitive graphics widgets.
//
// Conrod will automatically determine whether or not any changes have occurred and
// whether or not any widgets need to be re-drawn.
//
// The primitive graphics widgets are special in that their unique state is used within
// conrod's backend to do the actual drawing. This allows us to build up more complex
// widgets by using these simple primitives with our familiar layout, coloring, etc
// methods.
//
// If you notice that conrod is missing some sort of primitive graphics that you
// require, please file an issue or open a PR so we can add it! :)
// First, we'll draw the **Circle** with a radius that is half our given width.
let radius = rect.w() / 2.0;
let color = new_interaction.color(style.color(ui.theme()));
let circle_idx = state.view().circle_idx.get(&mut ui);
Circle::fill(radius)
.middle_of(idx)
.graphics_for(idx)
.color(color)
.set(circle_idx, &mut ui);
// Now we'll instantiate our label using the **Text** widget.
let label_color = style.label_color(ui.theme());
let font_size = style.label_font_size(ui.theme());
let text_idx = state.view().text_idx.get(&mut ui);
if let Some(ref label) = self.maybe_label {
Text::new(label)
.middle_of(idx)
.font_size(font_size)
.graphics_for(idx)
.color(label_color)
.set(text_idx, &mut ui);
}
}
}
impl Style {
/// Construct the default Style.
pub fn new() -> Style {
Style {
maybe_color: None,
maybe_radius: None,
maybe_label_color: None,
maybe_label_font_size: None,
}
}
/// Get the Color for an Element.
pub fn color(&self, theme: &Theme) -> Color {
self.maybe_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_color.unwrap_or(theme.shape_color)
})).unwrap_or(theme.shape_color)
}
/// Get the label Color for an Element.
pub fn label_color(&self, theme: &Theme) -> Color {
self.maybe_label_color.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_color.unwrap_or(theme.label_color)
})).unwrap_or(theme.label_color)
}
/// Get the label font size for an Element.
pub fn label_font_size(&self, theme: &Theme) -> FontSize {
self.maybe_label_font_size.or(theme.widget_style::<Self>(KIND).map(|default| {
default.style.maybe_label_font_size.unwrap_or(theme.font_size_medium)
})).unwrap_or(theme.font_size_medium)
}
}
/// Provide the chainable color() configuration method.
impl<'a, F> Colorable for CircularButton<'a, F> {
fn color(mut self, color: Color) -> Self {
self.style.maybe_color = Some(color);
self
}
}
/// Provide the chainable label(), label_color(), and label_font_size()
/// configuration methods.
impl<'a, F> Labelable<'a> for CircularButton<'a, F> {
fn label(mut self, text: &'a str) -> Self {
self.maybe_label = Some(text);
self
}
fn label_color(mut self, color: Color) -> Self {
self.style.maybe_label_color = Some(color);
self
}
fn label_font_size(mut self, size: FontSize) -> Self {
self.style.maybe_label_font_size = Some(size);
self
}
}
}
fn main() {
use piston_window::{EventLoop, Glyphs, PistonWindow, OpenGL, UpdateEvent, WindowSettings};
use conrod::{Colorable, Labelable, Positionable, Sizeable, Widget};
use circular_button::CircularButton;
// PistonWindow has two type parameters, but the default type is
// PistonWindow<T = (), W: Window = GlutinWindow>. To change the Piston backend,
// specify a different type in the let binding, e.g.
// let window: PistonWindow<(), Sdl2Window>.
let window: PistonWindow = WindowSettings::new("Control Panel", [1200, 800])
.opengl(OpenGL::V3_2)
.exit_on_esc(true)
.build().unwrap();
// Conrod's main object.
let mut ui = {
// Load a font. `Glyphs` is provided to us via piston_window and gfx, though you may use
// any type that implements `CharacterCache`.
let assets = find_folder::Search::ParentsThenKids(3, 3)
.for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
let glyph_cache = Glyphs::new(&font_path, window.factory.borrow().clone()).unwrap();
conrod::Ui::new(glyph_cache, conrod::Theme::default())
};
for e in window.ups(60) {
// Pass each `Event` to the `Ui`.
ui.handle_event(e.event.as_ref().unwrap());
e.update(|_| ui.set_widgets(|ui| {
// Sets a color to clear the background with before the Ui draws our widget.
conrod::Split::new(BACKGROUND).color(conrod::color::dark_red()).set(ui);
// Create an instance of our custom widget.
CircularButton::new()
.color(conrod::color::rgb(0.0, 0.3, 0.1))
.middle_of(BACKGROUND)
.dimensions(256.0, 256.0)
.label_color(conrod::color::white())
.label("Circular Button")
// This is called when the user clicks the button.
.react(|| println!("Click"))
// Add the widget to the conrod::Ui. This schedules the widget it to be
// drawn when we call Ui::draw.
.set(CIRCLE_BUTTON, ui);
}));
// Draws the whole Ui (in this case, just our widget) whenever a change occurs.
e.draw_2d(|c, g| ui.draw_if_changed(c, g))
}
}
// The `widget_ids` macro is a easy, safe way of generating unique `WidgetId`s.
widget_ids! {
// An ID for the background widget, upon which we'll place our custom button.
BACKGROUND,
// The WidgetId we'll use to plug our widget into the `Ui`.
CIRCLE_BUTTON,
}
| common | identifier_name |
handlers.go | /*
Package handlers implements the http handlers for the api and defines the
Server structure for shared context between handlers.
*/
package handlers
import (
"encoding/json"
"github.com/AdRoll/batchiepatchie/awsclients"
"github.com/AdRoll/batchiepatchie/jobs"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/batch"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/labstack/echo"
"github.com/labstack/gommon/log"
"github.com/opentracing/opentracing-go"
"io/ioutil"
"net/http"
"strconv"
"strings"
)
const (
defaultQueryLimit = 100
defaultPageNumber = 0
)
type Server struct {
Storage jobs.FinderStorer
Killer jobs.Killer
Index []byte
}
// KillTaskID is a struct to handle JSON request to kill a task
type KillTaskID struct {
ID string `json:"id" form:"id" query:"id"`
}
// KillTasks is a struct to handle JSON request to kill many tasks
type KillTasks struct {
IDs []string `json:"ids" form:"ids" query:"ids"`
}
// Find is a request handler, returns json with jobs matching the query param 'q'
func (s *Server) Find(c echo.Context) error {
span := opentracing.StartSpan("API.Find")
defer span.Finish()
c.QueryParams()
search := c.QueryParam("q")
queuesStr := c.QueryParam("queue")
statusStr := c.QueryParam("status")
column := c.QueryParam("sortColumn")
sort := strings.ToUpper(c.QueryParam("sortDirection")) == "ASC"
var queues []string
var status []string
if len(queuesStr) > 0 {
queues = strings.Split(queuesStr, ",")
}
if len(statusStr) > 0 {
status = strings.Split(statusStr, ",")
}
page, err := strconv.Atoi(c.QueryParam("page"))
if err != nil {
// if err, set default
page = 0
}
foundJobs, err := s.Storage.Find(&jobs.Options{
Search: search,
Limit: defaultQueryLimit,
Offset: page * defaultQueryLimit,
Queues: queues,
SortBy: column,
SortAsc: sort,
Status: status,
})
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, foundJobs)
return nil
}
func (s *Server) GetStatus(c echo.Context) error {
span := opentracing.StartSpan("API.GetStatus")
defer span.Finish()
query := c.Param("id")
job, err := s.Storage.GetStatus(query)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
if job == nil {
c.JSON(http.StatusNotFound, job)
return nil
} else {
c.JSON(http.StatusOK, job)
return nil
}
}
// FindOne is a request handler, returns a job matching the query parameter 'q'
func (s *Server) FindOne(c echo.Context) error {
span := opentracing.StartSpan("API.FindOne")
defer span.Finish()
query := c.Param("id")
job, err := s.Storage.FindOne(query)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, job)
return nil
}
// KillMany is a request handler, kills a job matching the post parameter 'id' (AWS task ID)
func (s *Server) KillMany(c echo.Context) error {
span := opentracing.StartSpan("API.KillMany")
defer span.Finish()
obj, err := BodyToKillTask(c)
if err != nil {
c.JSON(http.StatusBadRequest, "{\"error\": \"Cannot deserialize\"}")
}
values := obj.IDs
results := make(map[string]string)
for _, value := range values {
err := s.Killer.KillOne(value, "terminated from UI", s.Storage)
if err != nil {
results[value] = err.Error()
}
results[value] = "OK"
}
c.JSON(http.StatusOK, results)
return nil
}
func (s *Server) FetchLogs(c echo.Context) error {
span := opentracing.StartSpan("API.FetchLogs")
defer span.Finish()
const LOG_GROUP_NAME = "/aws/batch/job"
format := c.QueryParam("format")
if format != "text" {
c.JSON(http.StatusBadRequest, "Only 'text' format is supported. Add format=text to your query.")
return nil
}
c.Response().Header().Set(echo.HeaderContentType, echo.MIMETextPlain)
id := c.Param("id")
job, err := s.Storage.FindOne(id)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
svc := awsclients.CloudWatchLogs
oldStyleLogs := func() (*string, error) {
// AWS Batch seems to cap name strings to 50 characters for cloudwatch.
truncated_job_name := job.Name
if len(job.Name) > 50 {
truncated_job_name = job.Name[:50]
}
s := truncated_job_name + "/" + id + "/"
return &s, nil
}
newStyleLogs := func() (*string, error) {
log_stream_name := job.LogStreamName
if log_stream_name == nil || len(*log_stream_name) == 0 {
return nil, nil
}
return log_stream_name, nil
}
logSources := [...]func() (*string, error){oldStyleLogs, newStyleLogs}
var logStreams *cloudwatchlogs.DescribeLogStreamsOutput
for _, log_source := range logSources {
var name *string
name, err = log_source()
if err != nil {
continue
}
// No error but no logs either
if name == nil {
continue
}
logStreams, err = svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamNamePrefix: aws.String(*name),
})
if err != nil || len(logStreams.LogStreams) <= 0 {
continue
}
break
}
if err != nil {
log.Error(err)
c.String(http.StatusInternalServerError, err.Error())
return err
}
c.Response().WriteHeader(http.StatusOK)
if logStreams == nil || len(logStreams.LogStreams) <= 0 {
// Write empty logs.
c.Response().Flush()
return nil
}
startFromHead := true
var previousToken *string
var nextToken *string
lines_pushed := 0
for {
var logEvents *cloudwatchlogs.GetLogEventsOutput
var err2 error
previousToken = nextToken
if nextToken != nil {
logEvents, err2 = svc.GetLogEvents(&cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamName: logStreams.LogStreams[0].LogStreamName,
StartFromHead: &startFromHead,
NextToken: nextToken,
})
} else {
logEvents, err2 = svc.GetLogEvents(&cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamName: logStreams.LogStreams[0].LogStreamName,
StartFromHead: &startFromHead,
})
}
if err2 != nil {
return err2
}
nextToken = logEvents.NextForwardToken
events := logEvents.Events
for _, event := range events {
_, err2 = c.Response().Write([]byte(*event.Message + "\n"))
if err2 != nil {
return err2
}
lines_pushed += 1
if lines_pushed >= 1000 {
c.Response().Flush()
}
}
if nextToken == nil || (previousToken != nil && *previousToken == *nextToken) {
break
}
}
return nil
}
// KillOne is a request handler, kills a job matching the post parameter 'id' (AWS task ID)
func (s *Server) KillOne(c echo.Context) error {
span := opentracing.StartSpan("API.KillOne")
defer span.Finish()
task := new(KillTaskID)
if err := c.Bind(task); err != nil {
return err
}
err := s.Killer.KillOne(task.ID, "terminated from UI", s.Storage)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, task.ID)
return nil
}
func (s *Server) ListActiveJobQueues(c echo.Context) error {
span := opentracing.StartSpan("API.ListActiveJobQueues")
defer span.Finish()
active_job_queues, err := s.Storage.ListActiveJobQueues()
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, active_job_queues)
return nil
}
func (s *Server) ListAllJobQueues(c echo.Context) error {
span := opentracing.StartSpan("API.ListAllJobQueues")
defer span.Finish()
// This function gets *all* job queues, even those not registered to
// Batchiepatchie. Therefore, we must ask AWS about all the job
// queues. (as opposed to looking in our data store).
svc := awsclients.Batch
result := make([]string, 0)
var next_token *string
for {
var input *batch.DescribeJobQueuesInput
if next_token != nil {
input = &batch.DescribeJobQueuesInput{NextToken: next_token}
} else {
input = &batch.DescribeJobQueuesInput{}
}
job_queues, err := svc.DescribeJobQueues(input)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
}
for _, job_queue := range job_queues.JobQueues {
name := job_queue.JobQueueName
result = append(result, *name)
}
if input.NextToken != nil {
next_token = input.NextToken
} else {
break
}
}
c.JSON(http.StatusOK, result)
return nil
}
func (s *Server) ActivateJobQueue(c echo.Context) error {
span := opentracing.StartSpan("API.ActivateJobQueue")
defer span.Finish()
job_queue_name := c.Param("name")
err := s.Storage.ActivateJobQueue(job_queue_name)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
} else {
c.String(http.StatusOK, "[]")
return nil
}
}
func (s *Server) | (c echo.Context) error {
span := opentracing.StartSpan("API.DeactivateJobQueue")
defer span.Finish()
job_queue_name := c.Param("name")
err := s.Storage.DeactivateJobQueue(job_queue_name)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
} else {
c.String(http.StatusOK, "[]")
return nil
}
}
// Stats
func (s *Server) JobStats(c echo.Context) error {
span := opentracing.StartSpan("API.JobStats")
defer span.Finish()
c.QueryParams()
queuesStr := c.QueryParam("queue")
statusStr := c.QueryParam("status")
start, start_err := strconv.ParseInt(c.QueryParam("start"), 10, 64)
end, end_err := strconv.ParseInt(c.QueryParam("end"), 10, 64)
var duration int64 = end - start
var minuteSeconds int64 = 60
var hourSeconds int64 = 60 * minuteSeconds
var daySeconds int64 = 24 * hourSeconds
if start_err != nil {
log.Error(start_err)
c.JSON(http.StatusInternalServerError, start_err)
return start_err
}
if end_err != nil {
log.Error(end_err)
c.JSON(http.StatusInternalServerError, end_err)
return end_err
}
// The interval used by the query to break stats down by
var interval int64
if duration >= 30*daySeconds {
interval = daySeconds
} else if duration >= 3*daySeconds {
interval = 6 * hourSeconds
} else if duration >= 4*hourSeconds {
interval = hourSeconds
} else if duration >= hourSeconds {
interval = 15 * minuteSeconds
} else {
interval = 5 * minuteSeconds
}
var queues []string
var status []string
if len(queuesStr) > 0 {
queues = strings.Split(queuesStr, ",")
}
if len(statusStr) > 0 {
status = strings.Split(statusStr, ",")
}
results, err := s.Storage.JobStats(&jobs.JobStatsOptions{
Queues: queues,
Status: status,
Interval: interval,
Start: start,
End: end,
})
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, results)
return nil
}
// IndexHandler returns
func (s *Server) IndexHandler(c echo.Context) error {
c.HTMLBlob(http.StatusOK, s.Index)
return nil
}
func BodyToKillTask(c echo.Context) (KillTasks, error) {
var obj KillTasks
s, err := ioutil.ReadAll(c.Request().Body)
if err != nil {
log.Error("Cannot read request")
return obj, err
}
if err := json.Unmarshal(s, &obj); err != nil {
return obj, err
}
return obj, nil
}
| DeactivateJobQueue | identifier_name |
handlers.go | /*
Package handlers implements the http handlers for the api and defines the
Server structure for shared context between handlers.
*/
package handlers
import (
"encoding/json"
"github.com/AdRoll/batchiepatchie/awsclients"
"github.com/AdRoll/batchiepatchie/jobs"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/batch"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/labstack/echo"
"github.com/labstack/gommon/log"
"github.com/opentracing/opentracing-go"
"io/ioutil"
"net/http"
"strconv"
"strings"
)
const (
defaultQueryLimit = 100
defaultPageNumber = 0
)
type Server struct {
Storage jobs.FinderStorer
Killer jobs.Killer
Index []byte
}
// KillTaskID is a struct to handle JSON request to kill a task
type KillTaskID struct {
ID string `json:"id" form:"id" query:"id"`
}
// KillTasks is a struct to handle JSON request to kill many tasks
type KillTasks struct {
IDs []string `json:"ids" form:"ids" query:"ids"`
}
// Find is a request handler, returns json with jobs matching the query param 'q'
func (s *Server) Find(c echo.Context) error {
span := opentracing.StartSpan("API.Find")
defer span.Finish()
c.QueryParams()
search := c.QueryParam("q")
queuesStr := c.QueryParam("queue")
statusStr := c.QueryParam("status")
column := c.QueryParam("sortColumn")
sort := strings.ToUpper(c.QueryParam("sortDirection")) == "ASC"
var queues []string
var status []string
if len(queuesStr) > 0 {
queues = strings.Split(queuesStr, ",")
}
if len(statusStr) > 0 {
status = strings.Split(statusStr, ",")
}
page, err := strconv.Atoi(c.QueryParam("page"))
if err != nil {
// if err, set default
page = 0
}
foundJobs, err := s.Storage.Find(&jobs.Options{
Search: search,
Limit: defaultQueryLimit,
Offset: page * defaultQueryLimit,
Queues: queues,
SortBy: column,
SortAsc: sort,
Status: status,
})
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, foundJobs)
return nil
}
func (s *Server) GetStatus(c echo.Context) error {
span := opentracing.StartSpan("API.GetStatus")
defer span.Finish()
query := c.Param("id")
job, err := s.Storage.GetStatus(query)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
if job == nil {
c.JSON(http.StatusNotFound, job)
return nil
} else {
c.JSON(http.StatusOK, job)
return nil
}
}
// FindOne is a request handler, returns a job matching the query parameter 'q'
func (s *Server) FindOne(c echo.Context) error {
span := opentracing.StartSpan("API.FindOne")
defer span.Finish()
query := c.Param("id")
job, err := s.Storage.FindOne(query)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, job)
return nil
}
// KillMany is a request handler, kills a job matching the post parameter 'id' (AWS task ID)
func (s *Server) KillMany(c echo.Context) error {
span := opentracing.StartSpan("API.KillMany")
defer span.Finish()
obj, err := BodyToKillTask(c)
if err != nil {
c.JSON(http.StatusBadRequest, "{\"error\": \"Cannot deserialize\"}")
}
values := obj.IDs
results := make(map[string]string)
for _, value := range values {
err := s.Killer.KillOne(value, "terminated from UI", s.Storage)
if err != nil {
results[value] = err.Error()
}
results[value] = "OK"
}
c.JSON(http.StatusOK, results)
return nil
}
func (s *Server) FetchLogs(c echo.Context) error {
span := opentracing.StartSpan("API.FetchLogs")
defer span.Finish()
const LOG_GROUP_NAME = "/aws/batch/job"
format := c.QueryParam("format")
if format != "text" {
c.JSON(http.StatusBadRequest, "Only 'text' format is supported. Add format=text to your query.")
return nil
}
c.Response().Header().Set(echo.HeaderContentType, echo.MIMETextPlain)
id := c.Param("id")
job, err := s.Storage.FindOne(id)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
svc := awsclients.CloudWatchLogs
oldStyleLogs := func() (*string, error) {
// AWS Batch seems to cap name strings to 50 characters for cloudwatch.
truncated_job_name := job.Name
if len(job.Name) > 50 {
truncated_job_name = job.Name[:50]
}
s := truncated_job_name + "/" + id + "/"
return &s, nil
}
newStyleLogs := func() (*string, error) {
log_stream_name := job.LogStreamName
if log_stream_name == nil || len(*log_stream_name) == 0 {
return nil, nil
}
return log_stream_name, nil
}
logSources := [...]func() (*string, error){oldStyleLogs, newStyleLogs}
var logStreams *cloudwatchlogs.DescribeLogStreamsOutput
for _, log_source := range logSources {
var name *string
name, err = log_source()
if err != nil {
continue
}
// No error but no logs either
if name == nil {
continue
}
logStreams, err = svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamNamePrefix: aws.String(*name),
})
if err != nil || len(logStreams.LogStreams) <= 0 {
continue
}
break
}
if err != nil {
log.Error(err)
c.String(http.StatusInternalServerError, err.Error())
return err
}
c.Response().WriteHeader(http.StatusOK)
if logStreams == nil || len(logStreams.LogStreams) <= 0 {
// Write empty logs.
c.Response().Flush()
return nil
}
startFromHead := true
var previousToken *string
var nextToken *string
lines_pushed := 0
for {
var logEvents *cloudwatchlogs.GetLogEventsOutput
var err2 error
previousToken = nextToken
if nextToken != nil {
logEvents, err2 = svc.GetLogEvents(&cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamName: logStreams.LogStreams[0].LogStreamName,
StartFromHead: &startFromHead,
NextToken: nextToken,
})
} else {
logEvents, err2 = svc.GetLogEvents(&cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamName: logStreams.LogStreams[0].LogStreamName,
StartFromHead: &startFromHead,
})
}
if err2 != nil {
return err2
}
nextToken = logEvents.NextForwardToken
events := logEvents.Events
for _, event := range events {
_, err2 = c.Response().Write([]byte(*event.Message + "\n"))
if err2 != nil {
return err2
}
lines_pushed += 1
if lines_pushed >= 1000 {
c.Response().Flush()
}
}
if nextToken == nil || (previousToken != nil && *previousToken == *nextToken) {
break
}
}
return nil
}
// KillOne is a request handler, kills a job matching the post parameter 'id' (AWS task ID)
func (s *Server) KillOne(c echo.Context) error {
span := opentracing.StartSpan("API.KillOne")
defer span.Finish()
task := new(KillTaskID)
if err := c.Bind(task); err != nil {
return err
}
err := s.Killer.KillOne(task.ID, "terminated from UI", s.Storage)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, task.ID)
return nil
}
func (s *Server) ListActiveJobQueues(c echo.Context) error {
span := opentracing.StartSpan("API.ListActiveJobQueues")
defer span.Finish()
active_job_queues, err := s.Storage.ListActiveJobQueues()
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, active_job_queues)
return nil
}
func (s *Server) ListAllJobQueues(c echo.Context) error |
func (s *Server) ActivateJobQueue(c echo.Context) error {
span := opentracing.StartSpan("API.ActivateJobQueue")
defer span.Finish()
job_queue_name := c.Param("name")
err := s.Storage.ActivateJobQueue(job_queue_name)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
} else {
c.String(http.StatusOK, "[]")
return nil
}
}
func (s *Server) DeactivateJobQueue(c echo.Context) error {
span := opentracing.StartSpan("API.DeactivateJobQueue")
defer span.Finish()
job_queue_name := c.Param("name")
err := s.Storage.DeactivateJobQueue(job_queue_name)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
} else {
c.String(http.StatusOK, "[]")
return nil
}
}
// Stats
func (s *Server) JobStats(c echo.Context) error {
span := opentracing.StartSpan("API.JobStats")
defer span.Finish()
c.QueryParams()
queuesStr := c.QueryParam("queue")
statusStr := c.QueryParam("status")
start, start_err := strconv.ParseInt(c.QueryParam("start"), 10, 64)
end, end_err := strconv.ParseInt(c.QueryParam("end"), 10, 64)
var duration int64 = end - start
var minuteSeconds int64 = 60
var hourSeconds int64 = 60 * minuteSeconds
var daySeconds int64 = 24 * hourSeconds
if start_err != nil {
log.Error(start_err)
c.JSON(http.StatusInternalServerError, start_err)
return start_err
}
if end_err != nil {
log.Error(end_err)
c.JSON(http.StatusInternalServerError, end_err)
return end_err
}
// The interval used by the query to break stats down by
var interval int64
if duration >= 30*daySeconds {
interval = daySeconds
} else if duration >= 3*daySeconds {
interval = 6 * hourSeconds
} else if duration >= 4*hourSeconds {
interval = hourSeconds
} else if duration >= hourSeconds {
interval = 15 * minuteSeconds
} else {
interval = 5 * minuteSeconds
}
var queues []string
var status []string
if len(queuesStr) > 0 {
queues = strings.Split(queuesStr, ",")
}
if len(statusStr) > 0 {
status = strings.Split(statusStr, ",")
}
results, err := s.Storage.JobStats(&jobs.JobStatsOptions{
Queues: queues,
Status: status,
Interval: interval,
Start: start,
End: end,
})
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, results)
return nil
}
// IndexHandler returns
func (s *Server) IndexHandler(c echo.Context) error {
c.HTMLBlob(http.StatusOK, s.Index)
return nil
}
func BodyToKillTask(c echo.Context) (KillTasks, error) {
var obj KillTasks
s, err := ioutil.ReadAll(c.Request().Body)
if err != nil {
log.Error("Cannot read request")
return obj, err
}
if err := json.Unmarshal(s, &obj); err != nil {
return obj, err
}
return obj, nil
}
| {
span := opentracing.StartSpan("API.ListAllJobQueues")
defer span.Finish()
// This function gets *all* job queues, even those not registered to
// Batchiepatchie. Therefore, we must ask AWS about all the job
// queues. (as opposed to looking in our data store).
svc := awsclients.Batch
result := make([]string, 0)
var next_token *string
for {
var input *batch.DescribeJobQueuesInput
if next_token != nil {
input = &batch.DescribeJobQueuesInput{NextToken: next_token}
} else {
input = &batch.DescribeJobQueuesInput{}
}
job_queues, err := svc.DescribeJobQueues(input)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
}
for _, job_queue := range job_queues.JobQueues {
name := job_queue.JobQueueName
result = append(result, *name)
}
if input.NextToken != nil {
next_token = input.NextToken
} else {
break
}
}
c.JSON(http.StatusOK, result)
return nil
} | identifier_body |
handlers.go | /*
Package handlers implements the http handlers for the api and defines the
Server structure for shared context between handlers.
*/
package handlers
import (
"encoding/json"
"github.com/AdRoll/batchiepatchie/awsclients"
"github.com/AdRoll/batchiepatchie/jobs"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/batch"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/labstack/echo"
"github.com/labstack/gommon/log"
"github.com/opentracing/opentracing-go"
"io/ioutil"
"net/http"
"strconv"
"strings"
)
const (
defaultQueryLimit = 100
defaultPageNumber = 0
)
type Server struct {
Storage jobs.FinderStorer
Killer jobs.Killer
Index []byte
}
// KillTaskID is a struct to handle JSON request to kill a task
type KillTaskID struct {
ID string `json:"id" form:"id" query:"id"`
}
// KillTasks is a struct to handle JSON request to kill many tasks
type KillTasks struct {
IDs []string `json:"ids" form:"ids" query:"ids"`
}
// Find is a request handler, returns json with jobs matching the query param 'q'
func (s *Server) Find(c echo.Context) error {
span := opentracing.StartSpan("API.Find")
defer span.Finish()
c.QueryParams()
search := c.QueryParam("q")
queuesStr := c.QueryParam("queue")
statusStr := c.QueryParam("status")
column := c.QueryParam("sortColumn")
sort := strings.ToUpper(c.QueryParam("sortDirection")) == "ASC"
var queues []string
var status []string
if len(queuesStr) > 0 {
queues = strings.Split(queuesStr, ",")
}
if len(statusStr) > 0 {
status = strings.Split(statusStr, ",")
}
page, err := strconv.Atoi(c.QueryParam("page"))
if err != nil {
// if err, set default
page = 0
}
foundJobs, err := s.Storage.Find(&jobs.Options{
Search: search,
Limit: defaultQueryLimit,
Offset: page * defaultQueryLimit,
Queues: queues,
SortBy: column,
SortAsc: sort,
Status: status,
})
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, foundJobs)
return nil
}
func (s *Server) GetStatus(c echo.Context) error {
span := opentracing.StartSpan("API.GetStatus")
defer span.Finish()
query := c.Param("id")
job, err := s.Storage.GetStatus(query)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
if job == nil {
c.JSON(http.StatusNotFound, job)
return nil
} else {
c.JSON(http.StatusOK, job)
return nil
}
}
// FindOne is a request handler, returns a job matching the query parameter 'q'
func (s *Server) FindOne(c echo.Context) error {
span := opentracing.StartSpan("API.FindOne")
defer span.Finish()
query := c.Param("id")
job, err := s.Storage.FindOne(query)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, job)
return nil
}
// KillMany is a request handler, kills a job matching the post parameter 'id' (AWS task ID)
func (s *Server) KillMany(c echo.Context) error {
span := opentracing.StartSpan("API.KillMany")
defer span.Finish()
obj, err := BodyToKillTask(c)
if err != nil {
c.JSON(http.StatusBadRequest, "{\"error\": \"Cannot deserialize\"}")
}
values := obj.IDs
results := make(map[string]string)
for _, value := range values {
err := s.Killer.KillOne(value, "terminated from UI", s.Storage)
if err != nil {
results[value] = err.Error()
}
results[value] = "OK"
}
c.JSON(http.StatusOK, results)
return nil
}
func (s *Server) FetchLogs(c echo.Context) error {
span := opentracing.StartSpan("API.FetchLogs")
defer span.Finish()
const LOG_GROUP_NAME = "/aws/batch/job"
format := c.QueryParam("format")
if format != "text" {
c.JSON(http.StatusBadRequest, "Only 'text' format is supported. Add format=text to your query.")
return nil
}
c.Response().Header().Set(echo.HeaderContentType, echo.MIMETextPlain)
id := c.Param("id")
job, err := s.Storage.FindOne(id)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
svc := awsclients.CloudWatchLogs
oldStyleLogs := func() (*string, error) {
// AWS Batch seems to cap name strings to 50 characters for cloudwatch.
truncated_job_name := job.Name
if len(job.Name) > 50 {
truncated_job_name = job.Name[:50]
}
s := truncated_job_name + "/" + id + "/"
return &s, nil
}
newStyleLogs := func() (*string, error) {
log_stream_name := job.LogStreamName
if log_stream_name == nil || len(*log_stream_name) == 0 {
return nil, nil
}
return log_stream_name, nil
}
logSources := [...]func() (*string, error){oldStyleLogs, newStyleLogs}
var logStreams *cloudwatchlogs.DescribeLogStreamsOutput
for _, log_source := range logSources {
var name *string
name, err = log_source()
if err != nil {
continue
}
// No error but no logs either
if name == nil {
continue
}
logStreams, err = svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamNamePrefix: aws.String(*name),
})
if err != nil || len(logStreams.LogStreams) <= 0 {
continue
}
break
}
if err != nil {
log.Error(err)
c.String(http.StatusInternalServerError, err.Error())
return err
}
c.Response().WriteHeader(http.StatusOK)
if logStreams == nil || len(logStreams.LogStreams) <= 0 {
// Write empty logs.
c.Response().Flush()
return nil
}
startFromHead := true
var previousToken *string
var nextToken *string
lines_pushed := 0
for {
var logEvents *cloudwatchlogs.GetLogEventsOutput
var err2 error
previousToken = nextToken
if nextToken != nil {
logEvents, err2 = svc.GetLogEvents(&cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamName: logStreams.LogStreams[0].LogStreamName,
StartFromHead: &startFromHead,
NextToken: nextToken,
})
} else {
logEvents, err2 = svc.GetLogEvents(&cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamName: logStreams.LogStreams[0].LogStreamName,
StartFromHead: &startFromHead,
})
}
if err2 != nil {
return err2
}
nextToken = logEvents.NextForwardToken
events := logEvents.Events
for _, event := range events {
_, err2 = c.Response().Write([]byte(*event.Message + "\n"))
if err2 != nil {
return err2
}
lines_pushed += 1
if lines_pushed >= 1000 {
c.Response().Flush()
}
}
if nextToken == nil || (previousToken != nil && *previousToken == *nextToken) {
break
}
}
return nil
}
// KillOne is a request handler, kills a job matching the post parameter 'id' (AWS task ID)
func (s *Server) KillOne(c echo.Context) error {
span := opentracing.StartSpan("API.KillOne")
defer span.Finish()
task := new(KillTaskID)
if err := c.Bind(task); err != nil {
return err
}
err := s.Killer.KillOne(task.ID, "terminated from UI", s.Storage)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, task.ID)
return nil
}
func (s *Server) ListActiveJobQueues(c echo.Context) error {
span := opentracing.StartSpan("API.ListActiveJobQueues")
defer span.Finish()
active_job_queues, err := s.Storage.ListActiveJobQueues()
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, active_job_queues)
return nil
}
func (s *Server) ListAllJobQueues(c echo.Context) error {
span := opentracing.StartSpan("API.ListAllJobQueues")
defer span.Finish()
// This function gets *all* job queues, even those not registered to
// Batchiepatchie. Therefore, we must ask AWS about all the job
// queues. (as opposed to looking in our data store).
svc := awsclients.Batch
result := make([]string, 0)
var next_token *string
for {
var input *batch.DescribeJobQueuesInput
if next_token != nil {
input = &batch.DescribeJobQueuesInput{NextToken: next_token}
} else {
input = &batch.DescribeJobQueuesInput{}
}
job_queues, err := svc.DescribeJobQueues(input)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
}
for _, job_queue := range job_queues.JobQueues {
name := job_queue.JobQueueName
result = append(result, *name)
}
if input.NextToken != nil {
next_token = input.NextToken
} else {
break
}
}
c.JSON(http.StatusOK, result)
return nil | span := opentracing.StartSpan("API.ActivateJobQueue")
defer span.Finish()
job_queue_name := c.Param("name")
err := s.Storage.ActivateJobQueue(job_queue_name)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
} else {
c.String(http.StatusOK, "[]")
return nil
}
}
func (s *Server) DeactivateJobQueue(c echo.Context) error {
span := opentracing.StartSpan("API.DeactivateJobQueue")
defer span.Finish()
job_queue_name := c.Param("name")
err := s.Storage.DeactivateJobQueue(job_queue_name)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
} else {
c.String(http.StatusOK, "[]")
return nil
}
}
// Stats
func (s *Server) JobStats(c echo.Context) error {
span := opentracing.StartSpan("API.JobStats")
defer span.Finish()
c.QueryParams()
queuesStr := c.QueryParam("queue")
statusStr := c.QueryParam("status")
start, start_err := strconv.ParseInt(c.QueryParam("start"), 10, 64)
end, end_err := strconv.ParseInt(c.QueryParam("end"), 10, 64)
var duration int64 = end - start
var minuteSeconds int64 = 60
var hourSeconds int64 = 60 * minuteSeconds
var daySeconds int64 = 24 * hourSeconds
if start_err != nil {
log.Error(start_err)
c.JSON(http.StatusInternalServerError, start_err)
return start_err
}
if end_err != nil {
log.Error(end_err)
c.JSON(http.StatusInternalServerError, end_err)
return end_err
}
// The interval used by the query to break stats down by
var interval int64
if duration >= 30*daySeconds {
interval = daySeconds
} else if duration >= 3*daySeconds {
interval = 6 * hourSeconds
} else if duration >= 4*hourSeconds {
interval = hourSeconds
} else if duration >= hourSeconds {
interval = 15 * minuteSeconds
} else {
interval = 5 * minuteSeconds
}
var queues []string
var status []string
if len(queuesStr) > 0 {
queues = strings.Split(queuesStr, ",")
}
if len(statusStr) > 0 {
status = strings.Split(statusStr, ",")
}
results, err := s.Storage.JobStats(&jobs.JobStatsOptions{
Queues: queues,
Status: status,
Interval: interval,
Start: start,
End: end,
})
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, results)
return nil
}
// IndexHandler returns
func (s *Server) IndexHandler(c echo.Context) error {
c.HTMLBlob(http.StatusOK, s.Index)
return nil
}
func BodyToKillTask(c echo.Context) (KillTasks, error) {
var obj KillTasks
s, err := ioutil.ReadAll(c.Request().Body)
if err != nil {
log.Error("Cannot read request")
return obj, err
}
if err := json.Unmarshal(s, &obj); err != nil {
return obj, err
}
return obj, nil
} | }
func (s *Server) ActivateJobQueue(c echo.Context) error { | random_line_split |
handlers.go | /*
Package handlers implements the http handlers for the api and defines the
Server structure for shared context between handlers.
*/
package handlers
import (
"encoding/json"
"github.com/AdRoll/batchiepatchie/awsclients"
"github.com/AdRoll/batchiepatchie/jobs"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/batch"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/labstack/echo"
"github.com/labstack/gommon/log"
"github.com/opentracing/opentracing-go"
"io/ioutil"
"net/http"
"strconv"
"strings"
)
const (
defaultQueryLimit = 100
defaultPageNumber = 0
)
type Server struct {
Storage jobs.FinderStorer
Killer jobs.Killer
Index []byte
}
// KillTaskID is a struct to handle JSON request to kill a task
type KillTaskID struct {
ID string `json:"id" form:"id" query:"id"`
}
// KillTasks is a struct to handle JSON request to kill many tasks
type KillTasks struct {
IDs []string `json:"ids" form:"ids" query:"ids"`
}
// Find is a request handler, returns json with jobs matching the query param 'q'
func (s *Server) Find(c echo.Context) error {
span := opentracing.StartSpan("API.Find")
defer span.Finish()
c.QueryParams()
search := c.QueryParam("q")
queuesStr := c.QueryParam("queue")
statusStr := c.QueryParam("status")
column := c.QueryParam("sortColumn")
sort := strings.ToUpper(c.QueryParam("sortDirection")) == "ASC"
var queues []string
var status []string
if len(queuesStr) > 0 {
queues = strings.Split(queuesStr, ",")
}
if len(statusStr) > 0 {
status = strings.Split(statusStr, ",")
}
page, err := strconv.Atoi(c.QueryParam("page"))
if err != nil {
// if err, set default
page = 0
}
foundJobs, err := s.Storage.Find(&jobs.Options{
Search: search,
Limit: defaultQueryLimit,
Offset: page * defaultQueryLimit,
Queues: queues,
SortBy: column,
SortAsc: sort,
Status: status,
})
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, foundJobs)
return nil
}
func (s *Server) GetStatus(c echo.Context) error {
span := opentracing.StartSpan("API.GetStatus")
defer span.Finish()
query := c.Param("id")
job, err := s.Storage.GetStatus(query)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
if job == nil {
c.JSON(http.StatusNotFound, job)
return nil
} else {
c.JSON(http.StatusOK, job)
return nil
}
}
// FindOne is a request handler, returns a job matching the query parameter 'q'
func (s *Server) FindOne(c echo.Context) error {
span := opentracing.StartSpan("API.FindOne")
defer span.Finish()
query := c.Param("id")
job, err := s.Storage.FindOne(query)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, job)
return nil
}
// KillMany is a request handler, kills a job matching the post parameter 'id' (AWS task ID)
func (s *Server) KillMany(c echo.Context) error {
span := opentracing.StartSpan("API.KillMany")
defer span.Finish()
obj, err := BodyToKillTask(c)
if err != nil {
c.JSON(http.StatusBadRequest, "{\"error\": \"Cannot deserialize\"}")
}
values := obj.IDs
results := make(map[string]string)
for _, value := range values {
err := s.Killer.KillOne(value, "terminated from UI", s.Storage)
if err != nil {
results[value] = err.Error()
}
results[value] = "OK"
}
c.JSON(http.StatusOK, results)
return nil
}
func (s *Server) FetchLogs(c echo.Context) error {
span := opentracing.StartSpan("API.FetchLogs")
defer span.Finish()
const LOG_GROUP_NAME = "/aws/batch/job"
format := c.QueryParam("format")
if format != "text" {
c.JSON(http.StatusBadRequest, "Only 'text' format is supported. Add format=text to your query.")
return nil
}
c.Response().Header().Set(echo.HeaderContentType, echo.MIMETextPlain)
id := c.Param("id")
job, err := s.Storage.FindOne(id)
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
svc := awsclients.CloudWatchLogs
oldStyleLogs := func() (*string, error) {
// AWS Batch seems to cap name strings to 50 characters for cloudwatch.
truncated_job_name := job.Name
if len(job.Name) > 50 {
truncated_job_name = job.Name[:50]
}
s := truncated_job_name + "/" + id + "/"
return &s, nil
}
newStyleLogs := func() (*string, error) {
log_stream_name := job.LogStreamName
if log_stream_name == nil || len(*log_stream_name) == 0 {
return nil, nil
}
return log_stream_name, nil
}
logSources := [...]func() (*string, error){oldStyleLogs, newStyleLogs}
var logStreams *cloudwatchlogs.DescribeLogStreamsOutput
for _, log_source := range logSources {
var name *string
name, err = log_source()
if err != nil {
continue
}
// No error but no logs either
if name == nil {
continue
}
logStreams, err = svc.DescribeLogStreams(&cloudwatchlogs.DescribeLogStreamsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamNamePrefix: aws.String(*name),
})
if err != nil || len(logStreams.LogStreams) <= 0 {
continue
}
break
}
if err != nil {
log.Error(err)
c.String(http.StatusInternalServerError, err.Error())
return err
}
c.Response().WriteHeader(http.StatusOK)
if logStreams == nil || len(logStreams.LogStreams) <= 0 {
// Write empty logs.
c.Response().Flush()
return nil
}
startFromHead := true
var previousToken *string
var nextToken *string
lines_pushed := 0
for {
var logEvents *cloudwatchlogs.GetLogEventsOutput
var err2 error
previousToken = nextToken
if nextToken != nil {
logEvents, err2 = svc.GetLogEvents(&cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamName: logStreams.LogStreams[0].LogStreamName,
StartFromHead: &startFromHead,
NextToken: nextToken,
})
} else {
logEvents, err2 = svc.GetLogEvents(&cloudwatchlogs.GetLogEventsInput{
LogGroupName: aws.String(LOG_GROUP_NAME),
LogStreamName: logStreams.LogStreams[0].LogStreamName,
StartFromHead: &startFromHead,
})
}
if err2 != nil {
return err2
}
nextToken = logEvents.NextForwardToken
events := logEvents.Events
for _, event := range events {
_, err2 = c.Response().Write([]byte(*event.Message + "\n"))
if err2 != nil {
return err2
}
lines_pushed += 1
if lines_pushed >= 1000 {
c.Response().Flush()
}
}
if nextToken == nil || (previousToken != nil && *previousToken == *nextToken) {
break
}
}
return nil
}
// KillOne is a request handler, kills a job matching the post parameter 'id' (AWS task ID)
func (s *Server) KillOne(c echo.Context) error {
span := opentracing.StartSpan("API.KillOne")
defer span.Finish()
task := new(KillTaskID)
if err := c.Bind(task); err != nil {
return err
}
err := s.Killer.KillOne(task.ID, "terminated from UI", s.Storage)
if err != nil |
c.JSON(http.StatusOK, task.ID)
return nil
}
func (s *Server) ListActiveJobQueues(c echo.Context) error {
span := opentracing.StartSpan("API.ListActiveJobQueues")
defer span.Finish()
active_job_queues, err := s.Storage.ListActiveJobQueues()
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, active_job_queues)
return nil
}
func (s *Server) ListAllJobQueues(c echo.Context) error {
span := opentracing.StartSpan("API.ListAllJobQueues")
defer span.Finish()
// This function gets *all* job queues, even those not registered to
// Batchiepatchie. Therefore, we must ask AWS about all the job
// queues. (as opposed to looking in our data store).
svc := awsclients.Batch
result := make([]string, 0)
var next_token *string
for {
var input *batch.DescribeJobQueuesInput
if next_token != nil {
input = &batch.DescribeJobQueuesInput{NextToken: next_token}
} else {
input = &batch.DescribeJobQueuesInput{}
}
job_queues, err := svc.DescribeJobQueues(input)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
}
for _, job_queue := range job_queues.JobQueues {
name := job_queue.JobQueueName
result = append(result, *name)
}
if input.NextToken != nil {
next_token = input.NextToken
} else {
break
}
}
c.JSON(http.StatusOK, result)
return nil
}
func (s *Server) ActivateJobQueue(c echo.Context) error {
span := opentracing.StartSpan("API.ActivateJobQueue")
defer span.Finish()
job_queue_name := c.Param("name")
err := s.Storage.ActivateJobQueue(job_queue_name)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
} else {
c.String(http.StatusOK, "[]")
return nil
}
}
func (s *Server) DeactivateJobQueue(c echo.Context) error {
span := opentracing.StartSpan("API.DeactivateJobQueue")
defer span.Finish()
job_queue_name := c.Param("name")
err := s.Storage.DeactivateJobQueue(job_queue_name)
if err != nil {
c.JSON(http.StatusInternalServerError, err)
return err
} else {
c.String(http.StatusOK, "[]")
return nil
}
}
// Stats
func (s *Server) JobStats(c echo.Context) error {
span := opentracing.StartSpan("API.JobStats")
defer span.Finish()
c.QueryParams()
queuesStr := c.QueryParam("queue")
statusStr := c.QueryParam("status")
start, start_err := strconv.ParseInt(c.QueryParam("start"), 10, 64)
end, end_err := strconv.ParseInt(c.QueryParam("end"), 10, 64)
var duration int64 = end - start
var minuteSeconds int64 = 60
var hourSeconds int64 = 60 * minuteSeconds
var daySeconds int64 = 24 * hourSeconds
if start_err != nil {
log.Error(start_err)
c.JSON(http.StatusInternalServerError, start_err)
return start_err
}
if end_err != nil {
log.Error(end_err)
c.JSON(http.StatusInternalServerError, end_err)
return end_err
}
// The interval used by the query to break stats down by
var interval int64
if duration >= 30*daySeconds {
interval = daySeconds
} else if duration >= 3*daySeconds {
interval = 6 * hourSeconds
} else if duration >= 4*hourSeconds {
interval = hourSeconds
} else if duration >= hourSeconds {
interval = 15 * minuteSeconds
} else {
interval = 5 * minuteSeconds
}
var queues []string
var status []string
if len(queuesStr) > 0 {
queues = strings.Split(queuesStr, ",")
}
if len(statusStr) > 0 {
status = strings.Split(statusStr, ",")
}
results, err := s.Storage.JobStats(&jobs.JobStatsOptions{
Queues: queues,
Status: status,
Interval: interval,
Start: start,
End: end,
})
if err != nil {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
}
c.JSON(http.StatusOK, results)
return nil
}
// IndexHandler returns
func (s *Server) IndexHandler(c echo.Context) error {
c.HTMLBlob(http.StatusOK, s.Index)
return nil
}
func BodyToKillTask(c echo.Context) (KillTasks, error) {
var obj KillTasks
s, err := ioutil.ReadAll(c.Request().Body)
if err != nil {
log.Error("Cannot read request")
return obj, err
}
if err := json.Unmarshal(s, &obj); err != nil {
return obj, err
}
return obj, nil
}
| {
log.Error(err)
c.JSON(http.StatusInternalServerError, err)
return err
} | conditional_block |
wire.rs | //! Creating and consuming data in wire format.
use super::name::ToDname;
use super::net::{Ipv4Addr, Ipv6Addr};
use core::fmt;
use octseq::builder::{OctetsBuilder, Truncate};
use octseq::parse::{Parser, ShortInput};
//------------ Composer ------------------------------------------------------
pub trait Composer:
OctetsBuilder + AsRef<[u8]> + AsMut<[u8]> + Truncate
{
/// Appends a domain name using name compression if supported.
///
/// Domain name compression attempts to lower the size of a DNS message
/// by avoiding to include repeated domain name suffixes. Instead of
/// adding the full suffix, a pointer to the location of the previous
/// occurence is added. Since that occurence may itself contain a
/// compressed suffix, doing name compression isn’t cheap and therefore
/// optional. However, in order to be able to opt in, we need to know
/// if we are dealing with a domain name that ought to be compressed.
///
/// The trait provides a default implementation which simply appends the
/// name uncompressed.
fn append_compressed_dname<N: ToDname + ?Sized>(
&mut self,
name: &N,
) -> Result<(), Self::AppendError> {
name.compose(self)
}
fn can_compress(&self) -> bool {
false
}
}
#[cfg(feature = "std")]
impl Composer for std::vec::Vec<u8> {}
impl<const N: usize> Composer for octseq::array::Array<N> {}
#[cfg(feature = "bytes")]
impl Composer for bytes::BytesMut {}
#[cfg(feature = "smallvec")]
impl<A: smallvec::Array<Item = u8>> Composer for smallvec::SmallVec<A> {}
//------------ Compose -------------------------------------------------------
/// An extension trait to add composing to foreign types.
///
/// This trait can be used to add the `compose` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
///
/// The trait can only be used for types that have a fixed-size wire
/// representation.
pub trait Compose {
/// The length in octets of the wire representation of a value.
///
/// Because all wire format lengths are limited to 16 bit, this is a
/// `u16` rather than a `usize`.
const COMPOSE_LEN: u16 = 0;
/// Appends the wire format representation of the value to the target.
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError>;
}
impl<'a, T: Compose + ?Sized> Compose for &'a T {
const COMPOSE_LEN: u16 = T::COMPOSE_LEN;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
(*self).compose(target)
}
}
impl Compose for i8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self as u8])
}
}
impl Compose for u8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self])
}
}
macro_rules! compose_to_be_bytes {
( $type:ident ) => {
impl Compose for $type {
const COMPOSE_LEN: u16 = ($type::BITS >> 3) as u16;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.to_be_bytes())
}
}
};
}
compose_to_be_bytes!(i16);
compose_to_be_bytes!(u16);
compose_to_be_bytes!(i32);
compose_to_be_bytes!(u32);
compose_to_be_bytes!(i64);
compose_to_be_bytes!(u64);
compose_to_be_bytes!(i128);
compose_to_be_bytes!(u128);
impl Compose for Ipv4Addr {
const COMPOSE_LEN: u16 = 4;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
impl Compose for Ipv6Addr {
const COMPOSE_LEN: u16 = 16;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
// No impl for [u8; const N: usize] because we can’t guarantee a correct
// COMPOSE_LEN -- it may be longer than a u16 can hold.
//------------ Parse ------------------------------------------------------
/// An extension trait to add parsing to foreign types.
///
/// This trait can be used to add the `parse` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
pub trait Parse<'a, Octs: ?Sized>: Sized {
/// Extracts a value from the beginning of `parser`.
///
/// If parsing fails and an error is returned, the parser’s position
/// should be considered to be undefined. If it is supposed to be reused
/// in this case, you should store the position before attempting to parse
/// and seek to that position again before continuing.
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError>;
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
| pl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for Ipv4Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
Ok(Self::new(
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
))
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for Ipv6Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut buf = [0u8; 16];
parser.parse_buf(&mut buf)?;
Ok(buf.into())
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized, const N: usize> Parse<'a, Octs>
for [u8; N]
{
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut res = [0u8; N];
parser.parse_buf(&mut res)?;
Ok(res)
}
}
//============ Helpful Function ==============================================
/// Parses something from a `Vec<u8>`.
///
/// The actual parsing happens in the provided closure. Returns an error if
/// the closure returns an error or if there is unparsed data left over after
/// the closure returns. Otherwise returns whatever the closure returned.
#[cfg(feature = "std")]
pub fn parse_slice<F, T>(data: &[u8], op: F) -> Result<T, ParseError>
where
F: FnOnce(&mut Parser<[u8]>) -> Result<T, ParseError>,
{
let mut parser = Parser::from_ref(data);
let res = op(&mut parser)?;
if parser.remaining() > 0 {
Err(ParseError::form_error("trailing data"))
} else {
Ok(res)
}
}
/// Composes something into a `Vec<u8>`.
///
/// The actual composing happens in the provided closure.
/// This function is mostly useful in testing so you can construct this vec
/// directly inside an asserting.
#[cfg(feature = "std")]
pub fn compose_vec(
op: impl FnOnce(
&mut std::vec::Vec<u8>,
) -> Result<(), core::convert::Infallible>,
) -> std::vec::Vec<u8> {
let mut res = std::vec::Vec::new();
octseq::builder::infallible(op(&mut res));
res
}
//============ Error Types ===================================================
//------------ ParseError ----------------------------------------------------
/// An error happened while parsing data.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ParseError {
/// An attempt was made to go beyond the end of the parser.
ShortInput,
/// A formatting error occurred.
Form(FormError),
}
impl ParseError {
/// Creates a new parse error as a form error with the given message.
pub fn form_error(msg: &'static str) -> Self {
FormError::new(msg).into()
}
}
//--- From
impl From<ShortInput> for ParseError {
fn from(_: ShortInput) -> Self {
ParseError::ShortInput
}
}
impl From<FormError> for ParseError {
fn from(err: FormError) -> Self {
ParseError::Form(err)
}
}
//--- Display and Error
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::ShortInput => f.write_str("unexpected end of input"),
ParseError::Form(ref err) => err.fmt(f),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ParseError {}
//------------ FormError -----------------------------------------------------
/// A formatting error occured.
///
/// This is a generic error for all kinds of error cases that result in data
/// not being accepted. For diagnostics, the error is being given a static
/// string describing the error.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct FormError(&'static str);
impl FormError {
/// Creates a new form error value with the given diagnostics string.
pub fn new(msg: &'static str) -> Self {
FormError(msg)
}
}
//--- Display and Error
impl fmt::Display for FormError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.0)
}
}
#[cfg(feature = "std")]
impl std::error::Error for FormError {}
| parser.parse_u16_be().map_err(Into::into)
}
}
im | identifier_body |
wire.rs | //! Creating and consuming data in wire format.
use super::name::ToDname;
use super::net::{Ipv4Addr, Ipv6Addr};
use core::fmt;
use octseq::builder::{OctetsBuilder, Truncate};
use octseq::parse::{Parser, ShortInput};
//------------ Composer ------------------------------------------------------
pub trait Composer:
OctetsBuilder + AsRef<[u8]> + AsMut<[u8]> + Truncate
{
/// Appends a domain name using name compression if supported.
///
/// Domain name compression attempts to lower the size of a DNS message
/// by avoiding to include repeated domain name suffixes. Instead of
/// adding the full suffix, a pointer to the location of the previous
/// occurence is added. Since that occurence may itself contain a
/// compressed suffix, doing name compression isn’t cheap and therefore
/// optional. However, in order to be able to opt in, we need to know
/// if we are dealing with a domain name that ought to be compressed.
///
/// The trait provides a default implementation which simply appends the
/// name uncompressed.
fn append_compressed_dname<N: ToDname + ?Sized>(
&mut self,
name: &N,
) -> Result<(), Self::AppendError> {
name.compose(self)
}
fn can_compress(&self) -> bool {
false
}
}
#[cfg(feature = "std")]
impl Composer for std::vec::Vec<u8> {}
impl<const N: usize> Composer for octseq::array::Array<N> {}
#[cfg(feature = "bytes")]
impl Composer for bytes::BytesMut {}
#[cfg(feature = "smallvec")]
impl<A: smallvec::Array<Item = u8>> Composer for smallvec::SmallVec<A> {}
//------------ Compose -------------------------------------------------------
/// An extension trait to add composing to foreign types.
///
/// This trait can be used to add the `compose` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
///
/// The trait can only be used for types that have a fixed-size wire
/// representation.
pub trait Compose {
/// The length in octets of the wire representation of a value.
///
/// Because all wire format lengths are limited to 16 bit, this is a
/// `u16` rather than a `usize`.
const COMPOSE_LEN: u16 = 0;
/// Appends the wire format representation of the value to the target.
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError>;
}
impl<'a, T: Compose + ?Sized> Compose for &'a T {
const COMPOSE_LEN: u16 = T::COMPOSE_LEN;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
(*self).compose(target)
}
}
impl Compose for i8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self as u8])
}
}
impl Compose for u8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self])
}
}
macro_rules! compose_to_be_bytes {
( $type:ident ) => {
impl Compose for $type {
const COMPOSE_LEN: u16 = ($type::BITS >> 3) as u16;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.to_be_bytes())
}
}
};
}
compose_to_be_bytes!(i16);
compose_to_be_bytes!(u16);
compose_to_be_bytes!(i32);
compose_to_be_bytes!(u32);
compose_to_be_bytes!(i64);
compose_to_be_bytes!(u64);
compose_to_be_bytes!(i128);
compose_to_be_bytes!(u128);
impl Compose for Ipv4Addr {
const COMPOSE_LEN: u16 = 4;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
impl Compose for Ipv6Addr {
const COMPOSE_LEN: u16 = 16;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
// No impl for [u8; const N: usize] because we can’t guarantee a correct
// COMPOSE_LEN -- it may be longer than a u16 can hold.
//------------ Parse ------------------------------------------------------
/// An extension trait to add parsing to foreign types.
///
/// This trait can be used to add the `parse` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
pub trait Parse<'a, Octs: ?Sized>: Sized {
/// Extracts a value from the beginning of `parser`.
///
/// If parsing fails and an error is returned, the parser’s position
/// should be considered to be undefined. If it is supposed to be reused
/// in this case, you should store the position before attempting to parse
/// and seek to that position again before continuing.
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError>;
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for Ipv4Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
Ok(Self::new(
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
))
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for Ipv6Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut buf = [0u8; 16];
parser.parse_buf(&mut buf)?;
Ok(buf.into())
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized, const N: usize> Parse<'a, Octs>
for [u8; N]
{
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut res = [0u8; N];
parser.parse_buf(&mut res)?;
Ok(res)
}
}
//============ Helpful Function ==============================================
/// Parses something from a `Vec<u8>`.
///
/// The actual parsing happens in the provided closure. Returns an error if
/// the closure returns an error or if there is unparsed data left over after
/// the closure returns. Otherwise returns whatever the closure returned.
#[cfg(feature = "std")]
pub fn parse_slice<F, T>(data: &[u8], op: F) -> Result<T, ParseError>
where
F: FnOnce(&mut Parser<[u8]>) -> Result<T, ParseError>,
{
let mut parser = Parser::from_ref(data);
let res = op(&mut parser)?;
if parser.remaining() > 0 {
Err(ParseError::form_error("trailing data"))
} else {
Ok(res)
}
}
/// Composes something into a `Vec<u8>`.
///
/// The actual composing happens in the provided closure.
/// This function is mostly useful in testing so you can construct this vec
/// directly inside an asserting.
#[cfg(feature = "std")]
pub fn compose_vec(
op: impl FnOnce(
&mut std::vec::Vec<u8>,
) -> Result<(), core::convert::Infallible>,
) -> std::vec::Vec<u8> {
let mut res = std::vec::Vec::new();
octseq::builder::infallible(op(&mut res));
res
}
//============ Error Types ===================================================
//------------ ParseError ----------------------------------------------------
/// An error happened while parsing data.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ParseError {
/// An attempt was made to go beyond the end of the parser.
ShortInput,
/// A formatting error occurred.
Form(FormError),
}
impl ParseError {
/// Creates a new parse error as a form error with the given message.
pub fn form_error(msg: &'static str) -> Self {
FormError::new(msg).into()
}
}
//--- From
impl From<ShortInput> for ParseError {
fn from(_: ShortInput) -> Self {
ParseError::ShortInput
}
}
| }
}
//--- Display and Error
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::ShortInput => f.write_str("unexpected end of input"),
ParseError::Form(ref err) => err.fmt(f),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ParseError {}
//------------ FormError -----------------------------------------------------
/// A formatting error occured.
///
/// This is a generic error for all kinds of error cases that result in data
/// not being accepted. For diagnostics, the error is being given a static
/// string describing the error.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct FormError(&'static str);
impl FormError {
/// Creates a new form error value with the given diagnostics string.
pub fn new(msg: &'static str) -> Self {
FormError(msg)
}
}
//--- Display and Error
impl fmt::Display for FormError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.0)
}
}
#[cfg(feature = "std")]
impl std::error::Error for FormError {} | impl From<FormError> for ParseError {
fn from(err: FormError) -> Self {
ParseError::Form(err) | random_line_split |
wire.rs | //! Creating and consuming data in wire format.
use super::name::ToDname;
use super::net::{Ipv4Addr, Ipv6Addr};
use core::fmt;
use octseq::builder::{OctetsBuilder, Truncate};
use octseq::parse::{Parser, ShortInput};
//------------ Composer ------------------------------------------------------
pub trait Composer:
OctetsBuilder + AsRef<[u8]> + AsMut<[u8]> + Truncate
{
/// Appends a domain name using name compression if supported.
///
/// Domain name compression attempts to lower the size of a DNS message
/// by avoiding to include repeated domain name suffixes. Instead of
/// adding the full suffix, a pointer to the location of the previous
/// occurence is added. Since that occurence may itself contain a
/// compressed suffix, doing name compression isn’t cheap and therefore
/// optional. However, in order to be able to opt in, we need to know
/// if we are dealing with a domain name that ought to be compressed.
///
/// The trait provides a default implementation which simply appends the
/// name uncompressed.
fn append_compressed_dname<N: ToDname + ?Sized>(
&mut self,
name: &N,
) -> Result<(), Self::AppendError> {
name.compose(self)
}
fn ca | self) -> bool {
false
}
}
#[cfg(feature = "std")]
impl Composer for std::vec::Vec<u8> {}
impl<const N: usize> Composer for octseq::array::Array<N> {}
#[cfg(feature = "bytes")]
impl Composer for bytes::BytesMut {}
#[cfg(feature = "smallvec")]
impl<A: smallvec::Array<Item = u8>> Composer for smallvec::SmallVec<A> {}
//------------ Compose -------------------------------------------------------
/// An extension trait to add composing to foreign types.
///
/// This trait can be used to add the `compose` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
///
/// The trait can only be used for types that have a fixed-size wire
/// representation.
pub trait Compose {
/// The length in octets of the wire representation of a value.
///
/// Because all wire format lengths are limited to 16 bit, this is a
/// `u16` rather than a `usize`.
const COMPOSE_LEN: u16 = 0;
/// Appends the wire format representation of the value to the target.
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError>;
}
impl<'a, T: Compose + ?Sized> Compose for &'a T {
const COMPOSE_LEN: u16 = T::COMPOSE_LEN;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
(*self).compose(target)
}
}
impl Compose for i8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self as u8])
}
}
impl Compose for u8 {
const COMPOSE_LEN: u16 = 1;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&[*self])
}
}
macro_rules! compose_to_be_bytes {
( $type:ident ) => {
impl Compose for $type {
const COMPOSE_LEN: u16 = ($type::BITS >> 3) as u16;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.to_be_bytes())
}
}
};
}
compose_to_be_bytes!(i16);
compose_to_be_bytes!(u16);
compose_to_be_bytes!(i32);
compose_to_be_bytes!(u32);
compose_to_be_bytes!(i64);
compose_to_be_bytes!(u64);
compose_to_be_bytes!(i128);
compose_to_be_bytes!(u128);
impl Compose for Ipv4Addr {
const COMPOSE_LEN: u16 = 4;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
impl Compose for Ipv6Addr {
const COMPOSE_LEN: u16 = 16;
fn compose<Target: OctetsBuilder + ?Sized>(
&self,
target: &mut Target,
) -> Result<(), Target::AppendError> {
target.append_slice(&self.octets())
}
}
// No impl for [u8; const N: usize] because we can’t guarantee a correct
// COMPOSE_LEN -- it may be longer than a u16 can hold.
//------------ Parse ------------------------------------------------------
/// An extension trait to add parsing to foreign types.
///
/// This trait can be used to add the `parse` method to a foreign type. For
/// local types, the method should be added directly to the type instead.
pub trait Parse<'a, Octs: ?Sized>: Sized {
/// Extracts a value from the beginning of `parser`.
///
/// If parsing fails and an error is returned, the parser’s position
/// should be considered to be undefined. If it is supposed to be reused
/// in this case, you should store the position before attempting to parse
/// and seek to that position again before continuing.
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError>;
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u8 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u8().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u16 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u16_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u32 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u32_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for u64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_u64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for i64 {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
parser.parse_i64_be().map_err(Into::into)
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for Ipv4Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
Ok(Self::new(
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
u8::parse(parser)?,
))
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized> Parse<'a, Octs> for Ipv6Addr {
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut buf = [0u8; 16];
parser.parse_buf(&mut buf)?;
Ok(buf.into())
}
}
impl<'a, Octs: AsRef<[u8]> + ?Sized, const N: usize> Parse<'a, Octs>
for [u8; N]
{
fn parse(parser: &mut Parser<'a, Octs>) -> Result<Self, ParseError> {
let mut res = [0u8; N];
parser.parse_buf(&mut res)?;
Ok(res)
}
}
//============ Helpful Function ==============================================
/// Parses something from a `Vec<u8>`.
///
/// The actual parsing happens in the provided closure. Returns an error if
/// the closure returns an error or if there is unparsed data left over after
/// the closure returns. Otherwise returns whatever the closure returned.
#[cfg(feature = "std")]
pub fn parse_slice<F, T>(data: &[u8], op: F) -> Result<T, ParseError>
where
F: FnOnce(&mut Parser<[u8]>) -> Result<T, ParseError>,
{
let mut parser = Parser::from_ref(data);
let res = op(&mut parser)?;
if parser.remaining() > 0 {
Err(ParseError::form_error("trailing data"))
} else {
Ok(res)
}
}
/// Composes something into a `Vec<u8>`.
///
/// The actual composing happens in the provided closure.
/// This function is mostly useful in testing so you can construct this vec
/// directly inside an asserting.
#[cfg(feature = "std")]
pub fn compose_vec(
op: impl FnOnce(
&mut std::vec::Vec<u8>,
) -> Result<(), core::convert::Infallible>,
) -> std::vec::Vec<u8> {
let mut res = std::vec::Vec::new();
octseq::builder::infallible(op(&mut res));
res
}
//============ Error Types ===================================================
//------------ ParseError ----------------------------------------------------
/// An error happened while parsing data.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ParseError {
/// An attempt was made to go beyond the end of the parser.
ShortInput,
/// A formatting error occurred.
Form(FormError),
}
impl ParseError {
/// Creates a new parse error as a form error with the given message.
pub fn form_error(msg: &'static str) -> Self {
FormError::new(msg).into()
}
}
//--- From
impl From<ShortInput> for ParseError {
fn from(_: ShortInput) -> Self {
ParseError::ShortInput
}
}
impl From<FormError> for ParseError {
fn from(err: FormError) -> Self {
ParseError::Form(err)
}
}
//--- Display and Error
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::ShortInput => f.write_str("unexpected end of input"),
ParseError::Form(ref err) => err.fmt(f),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for ParseError {}
//------------ FormError -----------------------------------------------------
/// A formatting error occured.
///
/// This is a generic error for all kinds of error cases that result in data
/// not being accepted. For diagnostics, the error is being given a static
/// string describing the error.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct FormError(&'static str);
impl FormError {
/// Creates a new form error value with the given diagnostics string.
pub fn new(msg: &'static str) -> Self {
FormError(msg)
}
}
//--- Display and Error
impl fmt::Display for FormError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.0)
}
}
#[cfg(feature = "std")]
impl std::error::Error for FormError {}
| n_compress(& | identifier_name |
add_manifest_deployitem.go | // SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors.
//
// SPDX-License-Identifier: Apache-2.0
package components
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"text/template"
"github.com/gardener/landscapercli/pkg/components"
"github.com/gardener/landscaper/apis/core/v1alpha1"
"github.com/gardener/landscaper/apis/deployer/utils/managedresource"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"sigs.k8s.io/yaml"
"github.com/gardener/landscapercli/pkg/blueprints"
"github.com/gardener/landscapercli/pkg/logger"
"github.com/gardener/landscapercli/pkg/util"
)
const addManifestDeployItemUse = `deployitem \
[deployitem name] \
`
const addManifestDeployItemExample = `
landscaper-cli component add manifest deployitem \
nginx \
--component-directory ~/myComponent \
--manifest-file ./deployment.yaml \
--manifest-file ./service.yaml \
--import-param replicas:integer
--cluster-param target-cluster
`
const addManifestDeployItemShort = `
Command to add a deploy item skeleton to the blueprint of a component`
//var identityKeyValidationRegexp = regexp.MustCompile("^[a-z0-9]([-_+a-z0-9]*[a-z0-9])?$")
type addManifestDeployItemOptions struct {
componentPath string
deployItemName string
// names of manifest files
files *[]string
// import parameter definitions in the format "name:type"
importParams *[]string
// parsed import parameter definitions
importDefinitions map[string]*v1alpha1.ImportDefinition
// a map that assigns with each import parameter name a uuid
replacement map[string]string
updateStrategy string
policy string
clusterParam string
}
// NewCreateCommand creates a new blueprint command to create a blueprint
func NewAddManifestDeployItemCommand(ctx context.Context) *cobra.Command {
opts := &addManifestDeployItemOptions{}
cmd := &cobra.Command{
Use: addManifestDeployItemUse,
Example: addManifestDeployItemExample,
Short: addManifestDeployItemShort,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
if err := opts.Complete(args); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
if err := opts.run(ctx, logger.Log); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
fmt.Printf("Deploy item added")
fmt.Printf(" \n- deploy item definition in blueprint folder in file %s created", util.ExecutionFileName(opts.deployItemName))
fmt.Printf(" \n- file reference to deploy item definition added to blueprint")
fmt.Printf(" \n- import definitions added to blueprint")
},
}
opts.AddFlags(cmd.Flags())
return cmd
}
func (o *addManifestDeployItemOptions) Complete(args []string) error {
o.deployItemName = args[0]
if err := o.parseParameterDefinitions(); err != nil {
return err
}
return o.validate()
}
func (o *addManifestDeployItemOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.componentPath,
"component-directory",
".",
"path to component directory (optional, default is current directory)")
o.files = fs.StringArray(
"manifest-file",
[]string{},
"manifest file containing one kubernetes resource")
o.importParams = fs.StringArray(
"import-param",
[]string{},
"import parameter as name:integer|string|boolean, e.g. replicas:integer")
fs.StringVar(&o.updateStrategy,
"update-strategy",
"update",
"update stategy")
fs.StringVar(&o.policy,
"policy",
"manage",
"policy")
fs.StringVar(&o.clusterParam,
"cluster-param",
"targetCluster",
"import parameter name for the target resource containing the access data of the target cluster")
}
func (o *addManifestDeployItemOptions) parseParameterDefinitions() (err error) {
p := components.ParameterDefinitionParser{}
o.importDefinitions, err = p.ParseImportDefinitions(o.importParams)
if err != nil {
return err
}
o.replacement = map[string]string{}
for paramName := range o.importDefinitions {
o.replacement[paramName] = string(uuid.NewUUID())
}
return nil
}
func (o *addManifestDeployItemOptions) validate() error {
if !identityKeyValidationRegexp.Match([]byte(o.deployItemName)) {
return fmt.Errorf("the deploy item name must consist of lower case alphanumeric characters, '-', '_' " +
"or '+', and must start and end with an alphanumeric character")
}
if o.clusterParam == "" {
return fmt.Errorf("cluster-param is missing")
}
if o.files == nil || len(*(o.files)) == 0 {
return fmt.Errorf("no manifest files specified")
}
for _, path := range *(o.files) {
fileInfo, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("manifest file %s does not exist", path)
}
return err
}
if fileInfo.IsDir() {
return fmt.Errorf("manifest file %s is a directory", path)
}
}
err := o.checkIfDeployItemNotAlreadyAdded()
if err != nil {
return err
}
return nil
}
func (o *addManifestDeployItemOptions) run(ctx context.Context, log logr.Logger) error {
err := o.createExecutionFile()
if err != nil {
return err
}
blueprintPath := util.BlueprintDirectoryPath(o.componentPath)
blueprint, err := blueprints.NewBlueprintReader(blueprintPath).Read()
if err != nil {
return err
}
blueprintBuilder := blueprints.NewBlueprintBuilder(blueprint)
if blueprintBuilder.ExistsDeployExecution(o.deployItemName) {
return fmt.Errorf("The blueprint already contains a deploy item %s\n", o.deployItemName)
}
blueprintBuilder.AddDeployExecution(o.deployItemName)
blueprintBuilder.AddImportForTarget(o.clusterParam)
blueprintBuilder.AddImportsFromMap(o.importDefinitions)
return blueprints.NewBlueprintWriter(blueprintPath).Write(blueprint)
}
func (o *addManifestDeployItemOptions) checkIfDeployItemNotAlreadyAdded() error {
_, err := os.Stat(util.ExecutionFilePath(o.componentPath, o.deployItemName))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return fmt.Errorf("Deploy item was already added. The corresponding deploy execution file %s already exists\n",
util.ExecutionFilePath(o.componentPath, o.deployItemName))
}
// parseImportDefinition creates a new ImportDefinition from a given parameter definition string.
// The parameter definition string must have the format "name:type", for example "replicas:integer".
// The supported types are: string, boolean, integer
func (o *addManifestDeployItemOptions) parseImportDefinition(paramDef string) (*v1alpha1.ImportDefinition, error) {
p := components.ParameterDefinitionParser{}
fieldValueDef, err := p.ParseFieldValueDefinition(paramDef)
if err != nil {
return nil, err
}
required := true
return &v1alpha1.ImportDefinition{
FieldValueDefinition: *fieldValueDef,
Required: &required,
}, nil
}
func (o *addManifestDeployItemOptions) createExecutionFile() error {
manifests, err := o.getManifests()
if err != nil {
return err
}
f, err := os.Create(util.ExecutionFilePath(o.componentPath, o.deployItemName))
if err != nil {
return err
}
defer f.Close()
err = o.writeExecution(f)
if err != nil {
return err
}
_, err = f.WriteString(manifests)
return err
}
const manifestExecutionTemplate = `deployItems:
- name: {{.DeployItemName}}
type: landscaper.gardener.cloud/kubernetes-manifest
target:
name: {{.TargetNameExpression}}
namespace: {{.TargetNamespaceExpression}}
config:
apiVersion: manifest.deployer.landscaper.gardener.cloud/v1alpha2
kind: ProviderConfiguration
updateStrategy: {{.UpdateStrategy}}
`
func (o *addManifestDeployItemOptions) writeExecution(f io.Writer) error {
t, err := template.New("").Parse(manifestExecutionTemplate)
if err != nil {
return err
}
data := struct {
DeployItemName string
TargetNameExpression string
TargetNamespaceExpression string
UpdateStrategy string
}{
DeployItemName: o.deployItemName,
TargetNameExpression: blueprints.GetTargetNameExpression(o.clusterParam),
TargetNamespaceExpression: blueprints.GetTargetNamespaceExpression(o.clusterParam),
UpdateStrategy: o.updateStrategy,
}
err = t.Execute(f, data)
if err != nil {
return err
}
return nil
}
func (o *addManifestDeployItemOptions) getManifests() (string, error) {
data, err := o.getManifestsYaml()
if err != nil {
return "", err
}
stringData := string(data)
stringData = indentLines(stringData, 4)
return stringData, nil
}
func indentLines(data string, n int) string {
indent := strings.Repeat(" ", n)
return indent + strings.ReplaceAll(data, "\n", "\n"+indent)
}
func (o *addManifestDeployItemOptions) getManifestsYaml() ([]byte, error) {
manifests, err := o.readManifests()
if err != nil {
return nil, err
}
m := map[string][]managedresource.Manifest{
"manifests": manifests,
}
data, err := yaml.Marshal(m)
if err != nil {
return nil, err
}
data = o.replaceUUIDsByImportTemplates(data)
return data, nil
}
func (o *addManifestDeployItemOptions) readManifests() ([]managedresource.Manifest, error) {
manifests := []managedresource.Manifest{}
if o.files == nil {
return manifests, nil
}
for _, filename := range *o.files {
m, err := o.readManifest(filename)
if err != nil {
return manifests, err
}
manifests = append(manifests, *m)
}
return manifests, nil
}
func (o *addManifestDeployItemOptions) readManifest(filename string) (*managedresource.Manifest, error) {
yamlData, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var m interface{}
err = yaml.Unmarshal(yamlData, &m)
if err != nil {
return nil, err
}
m = o.replaceParamsByUUIDs(m)
// render to string
uuidData, err := json.Marshal(m)
if err != nil {
return nil, err
}
m2 := &managedresource.Manifest{
Policy: managedresource.ManifestPolicy(o.policy),
Manifest: &runtime.RawExtension{
Raw: uuidData,
},
}
return m2, nil
}
func (o *addManifestDeployItemOptions) | (in interface{}) interface{} {
switch m := in.(type) {
case map[string]interface{}:
for k := range m {
m[k] = o.replaceParamsByUUIDs(m[k])
}
return m
case []interface{}:
for k := range m {
m[k] = o.replaceParamsByUUIDs(m[k])
}
return m
case string:
newValue, ok := o.replacement[m]
if ok {
return newValue
}
return m
default:
return m
}
}
func (o *addManifestDeployItemOptions) replaceUUIDsByImportTemplates(data []byte) []byte {
s := string(data)
for paramName, uuid := range o.replacement {
newValue := blueprints.GetImportExpression(paramName)
s = strings.ReplaceAll(s, uuid, newValue)
}
return []byte(s)
}
| replaceParamsByUUIDs | identifier_name |
add_manifest_deployitem.go | // SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors.
//
// SPDX-License-Identifier: Apache-2.0
package components
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"text/template"
"github.com/gardener/landscapercli/pkg/components"
"github.com/gardener/landscaper/apis/core/v1alpha1"
"github.com/gardener/landscaper/apis/deployer/utils/managedresource"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"sigs.k8s.io/yaml"
"github.com/gardener/landscapercli/pkg/blueprints"
"github.com/gardener/landscapercli/pkg/logger"
"github.com/gardener/landscapercli/pkg/util"
)
const addManifestDeployItemUse = `deployitem \
[deployitem name] \
`
const addManifestDeployItemExample = `
landscaper-cli component add manifest deployitem \
nginx \
--component-directory ~/myComponent \
--manifest-file ./deployment.yaml \
--manifest-file ./service.yaml \
--import-param replicas:integer
--cluster-param target-cluster
`
const addManifestDeployItemShort = `
Command to add a deploy item skeleton to the blueprint of a component`
//var identityKeyValidationRegexp = regexp.MustCompile("^[a-z0-9]([-_+a-z0-9]*[a-z0-9])?$")
type addManifestDeployItemOptions struct {
componentPath string
deployItemName string
// names of manifest files
files *[]string
// import parameter definitions in the format "name:type"
importParams *[]string
// parsed import parameter definitions
importDefinitions map[string]*v1alpha1.ImportDefinition
// a map that assigns with each import parameter name a uuid
replacement map[string]string
updateStrategy string
policy string
clusterParam string
}
// NewCreateCommand creates a new blueprint command to create a blueprint
func NewAddManifestDeployItemCommand(ctx context.Context) *cobra.Command {
opts := &addManifestDeployItemOptions{}
cmd := &cobra.Command{
Use: addManifestDeployItemUse,
Example: addManifestDeployItemExample,
Short: addManifestDeployItemShort,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
if err := opts.Complete(args); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
if err := opts.run(ctx, logger.Log); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
fmt.Printf("Deploy item added")
fmt.Printf(" \n- deploy item definition in blueprint folder in file %s created", util.ExecutionFileName(opts.deployItemName))
fmt.Printf(" \n- file reference to deploy item definition added to blueprint")
fmt.Printf(" \n- import definitions added to blueprint")
},
}
opts.AddFlags(cmd.Flags())
return cmd
}
func (o *addManifestDeployItemOptions) Complete(args []string) error {
o.deployItemName = args[0]
if err := o.parseParameterDefinitions(); err != nil {
return err
}
return o.validate()
}
func (o *addManifestDeployItemOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.componentPath,
"component-directory",
".",
"path to component directory (optional, default is current directory)")
o.files = fs.StringArray(
"manifest-file",
[]string{},
"manifest file containing one kubernetes resource")
o.importParams = fs.StringArray(
"import-param",
[]string{},
"import parameter as name:integer|string|boolean, e.g. replicas:integer")
fs.StringVar(&o.updateStrategy,
"update-strategy",
"update",
"update stategy")
fs.StringVar(&o.policy,
"policy",
"manage",
"policy")
fs.StringVar(&o.clusterParam,
"cluster-param",
"targetCluster",
"import parameter name for the target resource containing the access data of the target cluster")
}
func (o *addManifestDeployItemOptions) parseParameterDefinitions() (err error) {
p := components.ParameterDefinitionParser{}
o.importDefinitions, err = p.ParseImportDefinitions(o.importParams)
if err != nil {
return err
}
o.replacement = map[string]string{}
for paramName := range o.importDefinitions {
o.replacement[paramName] = string(uuid.NewUUID())
}
return nil
}
func (o *addManifestDeployItemOptions) validate() error |
func (o *addManifestDeployItemOptions) run(ctx context.Context, log logr.Logger) error {
err := o.createExecutionFile()
if err != nil {
return err
}
blueprintPath := util.BlueprintDirectoryPath(o.componentPath)
blueprint, err := blueprints.NewBlueprintReader(blueprintPath).Read()
if err != nil {
return err
}
blueprintBuilder := blueprints.NewBlueprintBuilder(blueprint)
if blueprintBuilder.ExistsDeployExecution(o.deployItemName) {
return fmt.Errorf("The blueprint already contains a deploy item %s\n", o.deployItemName)
}
blueprintBuilder.AddDeployExecution(o.deployItemName)
blueprintBuilder.AddImportForTarget(o.clusterParam)
blueprintBuilder.AddImportsFromMap(o.importDefinitions)
return blueprints.NewBlueprintWriter(blueprintPath).Write(blueprint)
}
func (o *addManifestDeployItemOptions) checkIfDeployItemNotAlreadyAdded() error {
_, err := os.Stat(util.ExecutionFilePath(o.componentPath, o.deployItemName))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return fmt.Errorf("Deploy item was already added. The corresponding deploy execution file %s already exists\n",
util.ExecutionFilePath(o.componentPath, o.deployItemName))
}
// parseImportDefinition creates a new ImportDefinition from a given parameter definition string.
// The parameter definition string must have the format "name:type", for example "replicas:integer".
// The supported types are: string, boolean, integer
func (o *addManifestDeployItemOptions) parseImportDefinition(paramDef string) (*v1alpha1.ImportDefinition, error) {
p := components.ParameterDefinitionParser{}
fieldValueDef, err := p.ParseFieldValueDefinition(paramDef)
if err != nil {
return nil, err
}
required := true
return &v1alpha1.ImportDefinition{
FieldValueDefinition: *fieldValueDef,
Required: &required,
}, nil
}
func (o *addManifestDeployItemOptions) createExecutionFile() error {
manifests, err := o.getManifests()
if err != nil {
return err
}
f, err := os.Create(util.ExecutionFilePath(o.componentPath, o.deployItemName))
if err != nil {
return err
}
defer f.Close()
err = o.writeExecution(f)
if err != nil {
return err
}
_, err = f.WriteString(manifests)
return err
}
const manifestExecutionTemplate = `deployItems:
- name: {{.DeployItemName}}
type: landscaper.gardener.cloud/kubernetes-manifest
target:
name: {{.TargetNameExpression}}
namespace: {{.TargetNamespaceExpression}}
config:
apiVersion: manifest.deployer.landscaper.gardener.cloud/v1alpha2
kind: ProviderConfiguration
updateStrategy: {{.UpdateStrategy}}
`
func (o *addManifestDeployItemOptions) writeExecution(f io.Writer) error {
t, err := template.New("").Parse(manifestExecutionTemplate)
if err != nil {
return err
}
data := struct {
DeployItemName string
TargetNameExpression string
TargetNamespaceExpression string
UpdateStrategy string
}{
DeployItemName: o.deployItemName,
TargetNameExpression: blueprints.GetTargetNameExpression(o.clusterParam),
TargetNamespaceExpression: blueprints.GetTargetNamespaceExpression(o.clusterParam),
UpdateStrategy: o.updateStrategy,
}
err = t.Execute(f, data)
if err != nil {
return err
}
return nil
}
func (o *addManifestDeployItemOptions) getManifests() (string, error) {
data, err := o.getManifestsYaml()
if err != nil {
return "", err
}
stringData := string(data)
stringData = indentLines(stringData, 4)
return stringData, nil
}
func indentLines(data string, n int) string {
indent := strings.Repeat(" ", n)
return indent + strings.ReplaceAll(data, "\n", "\n"+indent)
}
func (o *addManifestDeployItemOptions) getManifestsYaml() ([]byte, error) {
manifests, err := o.readManifests()
if err != nil {
return nil, err
}
m := map[string][]managedresource.Manifest{
"manifests": manifests,
}
data, err := yaml.Marshal(m)
if err != nil {
return nil, err
}
data = o.replaceUUIDsByImportTemplates(data)
return data, nil
}
func (o *addManifestDeployItemOptions) readManifests() ([]managedresource.Manifest, error) {
manifests := []managedresource.Manifest{}
if o.files == nil {
return manifests, nil
}
for _, filename := range *o.files {
m, err := o.readManifest(filename)
if err != nil {
return manifests, err
}
manifests = append(manifests, *m)
}
return manifests, nil
}
func (o *addManifestDeployItemOptions) readManifest(filename string) (*managedresource.Manifest, error) {
yamlData, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var m interface{}
err = yaml.Unmarshal(yamlData, &m)
if err != nil {
return nil, err
}
m = o.replaceParamsByUUIDs(m)
// render to string
uuidData, err := json.Marshal(m)
if err != nil {
return nil, err
}
m2 := &managedresource.Manifest{
Policy: managedresource.ManifestPolicy(o.policy),
Manifest: &runtime.RawExtension{
Raw: uuidData,
},
}
return m2, nil
}
func (o *addManifestDeployItemOptions) replaceParamsByUUIDs(in interface{}) interface{} {
switch m := in.(type) {
case map[string]interface{}:
for k := range m {
m[k] = o.replaceParamsByUUIDs(m[k])
}
return m
case []interface{}:
for k := range m {
m[k] = o.replaceParamsByUUIDs(m[k])
}
return m
case string:
newValue, ok := o.replacement[m]
if ok {
return newValue
}
return m
default:
return m
}
}
func (o *addManifestDeployItemOptions) replaceUUIDsByImportTemplates(data []byte) []byte {
s := string(data)
for paramName, uuid := range o.replacement {
newValue := blueprints.GetImportExpression(paramName)
s = strings.ReplaceAll(s, uuid, newValue)
}
return []byte(s)
}
| {
if !identityKeyValidationRegexp.Match([]byte(o.deployItemName)) {
return fmt.Errorf("the deploy item name must consist of lower case alphanumeric characters, '-', '_' " +
"or '+', and must start and end with an alphanumeric character")
}
if o.clusterParam == "" {
return fmt.Errorf("cluster-param is missing")
}
if o.files == nil || len(*(o.files)) == 0 {
return fmt.Errorf("no manifest files specified")
}
for _, path := range *(o.files) {
fileInfo, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("manifest file %s does not exist", path)
}
return err
}
if fileInfo.IsDir() {
return fmt.Errorf("manifest file %s is a directory", path)
}
}
err := o.checkIfDeployItemNotAlreadyAdded()
if err != nil {
return err
}
return nil
} | identifier_body |
add_manifest_deployitem.go | // SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors.
//
// SPDX-License-Identifier: Apache-2.0
package components
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"text/template"
"github.com/gardener/landscapercli/pkg/components"
"github.com/gardener/landscaper/apis/core/v1alpha1"
"github.com/gardener/landscaper/apis/deployer/utils/managedresource"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"sigs.k8s.io/yaml"
"github.com/gardener/landscapercli/pkg/blueprints"
"github.com/gardener/landscapercli/pkg/logger"
"github.com/gardener/landscapercli/pkg/util"
)
const addManifestDeployItemUse = `deployitem \
[deployitem name] \
`
const addManifestDeployItemExample = `
landscaper-cli component add manifest deployitem \
nginx \
--component-directory ~/myComponent \
--manifest-file ./deployment.yaml \
--manifest-file ./service.yaml \
--import-param replicas:integer
--cluster-param target-cluster
`
const addManifestDeployItemShort = `
Command to add a deploy item skeleton to the blueprint of a component`
//var identityKeyValidationRegexp = regexp.MustCompile("^[a-z0-9]([-_+a-z0-9]*[a-z0-9])?$")
type addManifestDeployItemOptions struct {
componentPath string
deployItemName string
// names of manifest files
files *[]string
// import parameter definitions in the format "name:type"
importParams *[]string
// parsed import parameter definitions
importDefinitions map[string]*v1alpha1.ImportDefinition
// a map that assigns with each import parameter name a uuid
replacement map[string]string
updateStrategy string
policy string
clusterParam string
}
// NewCreateCommand creates a new blueprint command to create a blueprint
func NewAddManifestDeployItemCommand(ctx context.Context) *cobra.Command {
opts := &addManifestDeployItemOptions{}
cmd := &cobra.Command{
Use: addManifestDeployItemUse,
Example: addManifestDeployItemExample,
Short: addManifestDeployItemShort,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
if err := opts.Complete(args); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
if err := opts.run(ctx, logger.Log); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
fmt.Printf("Deploy item added")
fmt.Printf(" \n- deploy item definition in blueprint folder in file %s created", util.ExecutionFileName(opts.deployItemName))
fmt.Printf(" \n- file reference to deploy item definition added to blueprint")
fmt.Printf(" \n- import definitions added to blueprint")
},
}
opts.AddFlags(cmd.Flags())
return cmd
}
func (o *addManifestDeployItemOptions) Complete(args []string) error {
o.deployItemName = args[0]
if err := o.parseParameterDefinitions(); err != nil {
return err
}
return o.validate()
}
func (o *addManifestDeployItemOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.componentPath,
"component-directory",
".",
"path to component directory (optional, default is current directory)")
o.files = fs.StringArray(
"manifest-file",
[]string{},
"manifest file containing one kubernetes resource")
o.importParams = fs.StringArray(
"import-param",
[]string{},
"import parameter as name:integer|string|boolean, e.g. replicas:integer")
fs.StringVar(&o.updateStrategy,
"update-strategy",
"update",
"update stategy")
fs.StringVar(&o.policy,
"policy",
"manage",
"policy")
fs.StringVar(&o.clusterParam,
"cluster-param",
"targetCluster",
"import parameter name for the target resource containing the access data of the target cluster")
}
func (o *addManifestDeployItemOptions) parseParameterDefinitions() (err error) {
p := components.ParameterDefinitionParser{}
o.importDefinitions, err = p.ParseImportDefinitions(o.importParams)
if err != nil {
return err
}
o.replacement = map[string]string{}
for paramName := range o.importDefinitions {
o.replacement[paramName] = string(uuid.NewUUID())
}
return nil
}
func (o *addManifestDeployItemOptions) validate() error {
if !identityKeyValidationRegexp.Match([]byte(o.deployItemName)) {
return fmt.Errorf("the deploy item name must consist of lower case alphanumeric characters, '-', '_' " +
"or '+', and must start and end with an alphanumeric character")
}
if o.clusterParam == "" {
return fmt.Errorf("cluster-param is missing")
}
if o.files == nil || len(*(o.files)) == 0 {
return fmt.Errorf("no manifest files specified")
}
for _, path := range *(o.files) {
fileInfo, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("manifest file %s does not exist", path)
}
return err
}
if fileInfo.IsDir() {
return fmt.Errorf("manifest file %s is a directory", path)
}
}
err := o.checkIfDeployItemNotAlreadyAdded()
if err != nil {
return err
}
return nil
}
func (o *addManifestDeployItemOptions) run(ctx context.Context, log logr.Logger) error {
err := o.createExecutionFile()
if err != nil {
return err
}
blueprintPath := util.BlueprintDirectoryPath(o.componentPath)
blueprint, err := blueprints.NewBlueprintReader(blueprintPath).Read()
if err != nil {
return err
}
blueprintBuilder := blueprints.NewBlueprintBuilder(blueprint)
if blueprintBuilder.ExistsDeployExecution(o.deployItemName) {
return fmt.Errorf("The blueprint already contains a deploy item %s\n", o.deployItemName)
}
blueprintBuilder.AddDeployExecution(o.deployItemName)
blueprintBuilder.AddImportForTarget(o.clusterParam)
blueprintBuilder.AddImportsFromMap(o.importDefinitions)
return blueprints.NewBlueprintWriter(blueprintPath).Write(blueprint)
}
func (o *addManifestDeployItemOptions) checkIfDeployItemNotAlreadyAdded() error {
_, err := os.Stat(util.ExecutionFilePath(o.componentPath, o.deployItemName))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return fmt.Errorf("Deploy item was already added. The corresponding deploy execution file %s already exists\n",
util.ExecutionFilePath(o.componentPath, o.deployItemName))
}
// parseImportDefinition creates a new ImportDefinition from a given parameter definition string.
// The parameter definition string must have the format "name:type", for example "replicas:integer".
// The supported types are: string, boolean, integer
func (o *addManifestDeployItemOptions) parseImportDefinition(paramDef string) (*v1alpha1.ImportDefinition, error) {
p := components.ParameterDefinitionParser{}
fieldValueDef, err := p.ParseFieldValueDefinition(paramDef)
if err != nil {
return nil, err
}
required := true
return &v1alpha1.ImportDefinition{
FieldValueDefinition: *fieldValueDef,
Required: &required,
}, nil
}
func (o *addManifestDeployItemOptions) createExecutionFile() error {
manifests, err := o.getManifests()
if err != nil {
return err
}
f, err := os.Create(util.ExecutionFilePath(o.componentPath, o.deployItemName))
if err != nil {
return err
}
defer f.Close()
err = o.writeExecution(f)
if err != nil {
return err
}
_, err = f.WriteString(manifests)
return err
}
const manifestExecutionTemplate = `deployItems:
- name: {{.DeployItemName}}
type: landscaper.gardener.cloud/kubernetes-manifest
target:
name: {{.TargetNameExpression}}
namespace: {{.TargetNamespaceExpression}}
config:
apiVersion: manifest.deployer.landscaper.gardener.cloud/v1alpha2
kind: ProviderConfiguration
updateStrategy: {{.UpdateStrategy}}
`
func (o *addManifestDeployItemOptions) writeExecution(f io.Writer) error {
t, err := template.New("").Parse(manifestExecutionTemplate)
if err != nil {
return err
}
data := struct {
DeployItemName string
TargetNameExpression string
TargetNamespaceExpression string
UpdateStrategy string
}{
DeployItemName: o.deployItemName,
TargetNameExpression: blueprints.GetTargetNameExpression(o.clusterParam),
TargetNamespaceExpression: blueprints.GetTargetNamespaceExpression(o.clusterParam),
UpdateStrategy: o.updateStrategy,
}
err = t.Execute(f, data)
if err != nil {
return err
}
return nil
}
func (o *addManifestDeployItemOptions) getManifests() (string, error) {
data, err := o.getManifestsYaml()
if err != nil {
return "", err
}
stringData := string(data)
stringData = indentLines(stringData, 4)
return stringData, nil
}
func indentLines(data string, n int) string {
indent := strings.Repeat(" ", n)
return indent + strings.ReplaceAll(data, "\n", "\n"+indent)
}
func (o *addManifestDeployItemOptions) getManifestsYaml() ([]byte, error) {
manifests, err := o.readManifests()
if err != nil {
return nil, err
}
m := map[string][]managedresource.Manifest{
"manifests": manifests,
}
data, err := yaml.Marshal(m)
if err != nil {
return nil, err
}
data = o.replaceUUIDsByImportTemplates(data)
return data, nil
}
| return manifests, nil
}
for _, filename := range *o.files {
m, err := o.readManifest(filename)
if err != nil {
return manifests, err
}
manifests = append(manifests, *m)
}
return manifests, nil
}
func (o *addManifestDeployItemOptions) readManifest(filename string) (*managedresource.Manifest, error) {
yamlData, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var m interface{}
err = yaml.Unmarshal(yamlData, &m)
if err != nil {
return nil, err
}
m = o.replaceParamsByUUIDs(m)
// render to string
uuidData, err := json.Marshal(m)
if err != nil {
return nil, err
}
m2 := &managedresource.Manifest{
Policy: managedresource.ManifestPolicy(o.policy),
Manifest: &runtime.RawExtension{
Raw: uuidData,
},
}
return m2, nil
}
func (o *addManifestDeployItemOptions) replaceParamsByUUIDs(in interface{}) interface{} {
switch m := in.(type) {
case map[string]interface{}:
for k := range m {
m[k] = o.replaceParamsByUUIDs(m[k])
}
return m
case []interface{}:
for k := range m {
m[k] = o.replaceParamsByUUIDs(m[k])
}
return m
case string:
newValue, ok := o.replacement[m]
if ok {
return newValue
}
return m
default:
return m
}
}
func (o *addManifestDeployItemOptions) replaceUUIDsByImportTemplates(data []byte) []byte {
s := string(data)
for paramName, uuid := range o.replacement {
newValue := blueprints.GetImportExpression(paramName)
s = strings.ReplaceAll(s, uuid, newValue)
}
return []byte(s)
} | func (o *addManifestDeployItemOptions) readManifests() ([]managedresource.Manifest, error) {
manifests := []managedresource.Manifest{}
if o.files == nil { | random_line_split |
add_manifest_deployitem.go | // SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company and Gardener contributors.
//
// SPDX-License-Identifier: Apache-2.0
package components
import (
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"text/template"
"github.com/gardener/landscapercli/pkg/components"
"github.com/gardener/landscaper/apis/core/v1alpha1"
"github.com/gardener/landscaper/apis/deployer/utils/managedresource"
"github.com/go-logr/logr"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"sigs.k8s.io/yaml"
"github.com/gardener/landscapercli/pkg/blueprints"
"github.com/gardener/landscapercli/pkg/logger"
"github.com/gardener/landscapercli/pkg/util"
)
const addManifestDeployItemUse = `deployitem \
[deployitem name] \
`
const addManifestDeployItemExample = `
landscaper-cli component add manifest deployitem \
nginx \
--component-directory ~/myComponent \
--manifest-file ./deployment.yaml \
--manifest-file ./service.yaml \
--import-param replicas:integer
--cluster-param target-cluster
`
const addManifestDeployItemShort = `
Command to add a deploy item skeleton to the blueprint of a component`
//var identityKeyValidationRegexp = regexp.MustCompile("^[a-z0-9]([-_+a-z0-9]*[a-z0-9])?$")
type addManifestDeployItemOptions struct {
componentPath string
deployItemName string
// names of manifest files
files *[]string
// import parameter definitions in the format "name:type"
importParams *[]string
// parsed import parameter definitions
importDefinitions map[string]*v1alpha1.ImportDefinition
// a map that assigns with each import parameter name a uuid
replacement map[string]string
updateStrategy string
policy string
clusterParam string
}
// NewCreateCommand creates a new blueprint command to create a blueprint
func NewAddManifestDeployItemCommand(ctx context.Context) *cobra.Command {
opts := &addManifestDeployItemOptions{}
cmd := &cobra.Command{
Use: addManifestDeployItemUse,
Example: addManifestDeployItemExample,
Short: addManifestDeployItemShort,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
if err := opts.Complete(args); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
if err := opts.run(ctx, logger.Log); err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
fmt.Printf("Deploy item added")
fmt.Printf(" \n- deploy item definition in blueprint folder in file %s created", util.ExecutionFileName(opts.deployItemName))
fmt.Printf(" \n- file reference to deploy item definition added to blueprint")
fmt.Printf(" \n- import definitions added to blueprint")
},
}
opts.AddFlags(cmd.Flags())
return cmd
}
func (o *addManifestDeployItemOptions) Complete(args []string) error {
o.deployItemName = args[0]
if err := o.parseParameterDefinitions(); err != nil {
return err
}
return o.validate()
}
func (o *addManifestDeployItemOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.componentPath,
"component-directory",
".",
"path to component directory (optional, default is current directory)")
o.files = fs.StringArray(
"manifest-file",
[]string{},
"manifest file containing one kubernetes resource")
o.importParams = fs.StringArray(
"import-param",
[]string{},
"import parameter as name:integer|string|boolean, e.g. replicas:integer")
fs.StringVar(&o.updateStrategy,
"update-strategy",
"update",
"update stategy")
fs.StringVar(&o.policy,
"policy",
"manage",
"policy")
fs.StringVar(&o.clusterParam,
"cluster-param",
"targetCluster",
"import parameter name for the target resource containing the access data of the target cluster")
}
func (o *addManifestDeployItemOptions) parseParameterDefinitions() (err error) {
p := components.ParameterDefinitionParser{}
o.importDefinitions, err = p.ParseImportDefinitions(o.importParams)
if err != nil {
return err
}
o.replacement = map[string]string{}
for paramName := range o.importDefinitions {
o.replacement[paramName] = string(uuid.NewUUID())
}
return nil
}
func (o *addManifestDeployItemOptions) validate() error {
if !identityKeyValidationRegexp.Match([]byte(o.deployItemName)) {
return fmt.Errorf("the deploy item name must consist of lower case alphanumeric characters, '-', '_' " +
"or '+', and must start and end with an alphanumeric character")
}
if o.clusterParam == "" {
return fmt.Errorf("cluster-param is missing")
}
if o.files == nil || len(*(o.files)) == 0 {
return fmt.Errorf("no manifest files specified")
}
for _, path := range *(o.files) |
err := o.checkIfDeployItemNotAlreadyAdded()
if err != nil {
return err
}
return nil
}
func (o *addManifestDeployItemOptions) run(ctx context.Context, log logr.Logger) error {
err := o.createExecutionFile()
if err != nil {
return err
}
blueprintPath := util.BlueprintDirectoryPath(o.componentPath)
blueprint, err := blueprints.NewBlueprintReader(blueprintPath).Read()
if err != nil {
return err
}
blueprintBuilder := blueprints.NewBlueprintBuilder(blueprint)
if blueprintBuilder.ExistsDeployExecution(o.deployItemName) {
return fmt.Errorf("The blueprint already contains a deploy item %s\n", o.deployItemName)
}
blueprintBuilder.AddDeployExecution(o.deployItemName)
blueprintBuilder.AddImportForTarget(o.clusterParam)
blueprintBuilder.AddImportsFromMap(o.importDefinitions)
return blueprints.NewBlueprintWriter(blueprintPath).Write(blueprint)
}
func (o *addManifestDeployItemOptions) checkIfDeployItemNotAlreadyAdded() error {
_, err := os.Stat(util.ExecutionFilePath(o.componentPath, o.deployItemName))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
return fmt.Errorf("Deploy item was already added. The corresponding deploy execution file %s already exists\n",
util.ExecutionFilePath(o.componentPath, o.deployItemName))
}
// parseImportDefinition creates a new ImportDefinition from a given parameter definition string.
// The parameter definition string must have the format "name:type", for example "replicas:integer".
// The supported types are: string, boolean, integer
func (o *addManifestDeployItemOptions) parseImportDefinition(paramDef string) (*v1alpha1.ImportDefinition, error) {
p := components.ParameterDefinitionParser{}
fieldValueDef, err := p.ParseFieldValueDefinition(paramDef)
if err != nil {
return nil, err
}
required := true
return &v1alpha1.ImportDefinition{
FieldValueDefinition: *fieldValueDef,
Required: &required,
}, nil
}
func (o *addManifestDeployItemOptions) createExecutionFile() error {
manifests, err := o.getManifests()
if err != nil {
return err
}
f, err := os.Create(util.ExecutionFilePath(o.componentPath, o.deployItemName))
if err != nil {
return err
}
defer f.Close()
err = o.writeExecution(f)
if err != nil {
return err
}
_, err = f.WriteString(manifests)
return err
}
const manifestExecutionTemplate = `deployItems:
- name: {{.DeployItemName}}
type: landscaper.gardener.cloud/kubernetes-manifest
target:
name: {{.TargetNameExpression}}
namespace: {{.TargetNamespaceExpression}}
config:
apiVersion: manifest.deployer.landscaper.gardener.cloud/v1alpha2
kind: ProviderConfiguration
updateStrategy: {{.UpdateStrategy}}
`
func (o *addManifestDeployItemOptions) writeExecution(f io.Writer) error {
t, err := template.New("").Parse(manifestExecutionTemplate)
if err != nil {
return err
}
data := struct {
DeployItemName string
TargetNameExpression string
TargetNamespaceExpression string
UpdateStrategy string
}{
DeployItemName: o.deployItemName,
TargetNameExpression: blueprints.GetTargetNameExpression(o.clusterParam),
TargetNamespaceExpression: blueprints.GetTargetNamespaceExpression(o.clusterParam),
UpdateStrategy: o.updateStrategy,
}
err = t.Execute(f, data)
if err != nil {
return err
}
return nil
}
func (o *addManifestDeployItemOptions) getManifests() (string, error) {
data, err := o.getManifestsYaml()
if err != nil {
return "", err
}
stringData := string(data)
stringData = indentLines(stringData, 4)
return stringData, nil
}
func indentLines(data string, n int) string {
indent := strings.Repeat(" ", n)
return indent + strings.ReplaceAll(data, "\n", "\n"+indent)
}
func (o *addManifestDeployItemOptions) getManifestsYaml() ([]byte, error) {
manifests, err := o.readManifests()
if err != nil {
return nil, err
}
m := map[string][]managedresource.Manifest{
"manifests": manifests,
}
data, err := yaml.Marshal(m)
if err != nil {
return nil, err
}
data = o.replaceUUIDsByImportTemplates(data)
return data, nil
}
func (o *addManifestDeployItemOptions) readManifests() ([]managedresource.Manifest, error) {
manifests := []managedresource.Manifest{}
if o.files == nil {
return manifests, nil
}
for _, filename := range *o.files {
m, err := o.readManifest(filename)
if err != nil {
return manifests, err
}
manifests = append(manifests, *m)
}
return manifests, nil
}
func (o *addManifestDeployItemOptions) readManifest(filename string) (*managedresource.Manifest, error) {
yamlData, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var m interface{}
err = yaml.Unmarshal(yamlData, &m)
if err != nil {
return nil, err
}
m = o.replaceParamsByUUIDs(m)
// render to string
uuidData, err := json.Marshal(m)
if err != nil {
return nil, err
}
m2 := &managedresource.Manifest{
Policy: managedresource.ManifestPolicy(o.policy),
Manifest: &runtime.RawExtension{
Raw: uuidData,
},
}
return m2, nil
}
func (o *addManifestDeployItemOptions) replaceParamsByUUIDs(in interface{}) interface{} {
switch m := in.(type) {
case map[string]interface{}:
for k := range m {
m[k] = o.replaceParamsByUUIDs(m[k])
}
return m
case []interface{}:
for k := range m {
m[k] = o.replaceParamsByUUIDs(m[k])
}
return m
case string:
newValue, ok := o.replacement[m]
if ok {
return newValue
}
return m
default:
return m
}
}
func (o *addManifestDeployItemOptions) replaceUUIDsByImportTemplates(data []byte) []byte {
s := string(data)
for paramName, uuid := range o.replacement {
newValue := blueprints.GetImportExpression(paramName)
s = strings.ReplaceAll(s, uuid, newValue)
}
return []byte(s)
}
| {
fileInfo, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("manifest file %s does not exist", path)
}
return err
}
if fileInfo.IsDir() {
return fmt.Errorf("manifest file %s is a directory", path)
}
} | conditional_block |
WBSTree.js | /**
____ _ _ ____
/ ___| ___ ___ _ __| |_ | / ___|
\___ \ / __/ _ \| '__| __| _ | \___ \
___) | (_| (_) | | | |_ | |_| |___) |
|____/ \___\___/|_| \__| \___/|____/
WBSTree v 1.0 - a jQuery Ui extension
Licences: MIT & GPL
modificado y adaptado por el equipo: Scort Js http://swedpe.com/scortjs/
* Programadores:
* William Uria Martinez[Williamuriamartinez@hotmail.com], Angela Mayhua[], Cesar Cardenas[ccardenashq@gmail.com].
*/
Components.WBSTree.prototype.init = function(dataObj) {
this.config = {
container: $('body'),
id: "WBSTree-"+ Math.round(Math.random() * 2000),
iMaxDepth : 100, //maximo nivel de profundidad que se calcula.
iLevelSeparation : 40, //Numero de pixeles de separacion entre niveles
iSiblingSeparation : 40, //numero de pixeles de separacion entre hermanos
iSubtreeSeparation : 80, //numero de pixeles de separacion entre subarboles
defaultNodeWidth : 80, //ancho por defecto de los nodos
defaultNodeHeight : 40, //Altura por defecto de los nodos
width: -1,
height:-1,
css:new Array(),
title: "",
hidden: false,
autoScroll: true,
items: [],
nodoSeleccionado:-1,
iRootOrientation : ECOTree.RO_TOP,
iNodeJustification : ECOTree.NJ_TOP,
algorithm:'ecotree',
svgcontainer:'',
styleNode:{
Background:'#FFFFFF',
Linecolor: '#FFFFFF'
},
clipboard:false,
group:Math.round(Math.random()*999999),
listeners: {
show: function(){},
hide: function(){},
onNoderequestaddchild: function(obj){return true;},
onNoderequestaddbrother: function(obj){return true;},
onNodeaddchild: function(obj){},
onNodeaddbrother: function(obj,dir){},
onNodeSelectForCopy:function(obj){},
oncreateCopyTask:function(obj){}, //cuando se crea una tarea usando copiar, @param obj es la tarea resultado de la copia
onNodeSelectForCut:function(obj){},
onNodePaste:function(obj){},
onNodeCut:function(obj){},
onCancelClipoard:function(obj){},
onNoderequestEdit:function(obj){return true;}, //se aplica a todos los nodos, cuando se envía una petición de edición, si retorna true se procede con la edicion
onNodeEdit:function(obj){} //se lanza este evento luego que un nodo fue editado gráficamente.
}
};
for(var i in dataObj) {
if(this.config[i] != undefined) {
if(i == "listeners") {
for(var j in dataObj[i])
this.config[i][j] = dataObj[i][j];
}
else
this.config[i] = dataObj[i];
}
}
//posicion inicial para algoritmo eco-------------------
this.rootYOffset = -80;
this.rootXOffset = 0;
//----------------------------------------------------------
this.algorithm = this.config.algorithm;
this.id = this.config.id;
this.title = this.config.title;
this.container = this.config.container;
this.className = "WBSTree";
this.Dibujado = false; //si el arbol ya esta dibujado sera true
this.items = this.config.items;
this.nodos = [];
this.grupo= this.config.group;
this.nodoSeleccionado = this.config.nodoSeleccionado;
this.svgcontainer = this.config.svgcontainer;
this.listeners = this.config.listeners;
this.clipboard = -1;
this.TareaPreCortada = -1;
this.TareaPreCopiada = -1;
if(this.algorithm=='ecotree'){
this.maxLevelHeight = []; //array que indica la altura maxima de cada nivel
this.maxLevelWidth = []; //array que indica el ancho maximo de cada nivel
this.previousLevelNode = [];
this.nodoDerechaMax = ''; //nodo que se encuentra mas a la derecha
this.nodoizquierdaMax = ''; //nodo que se encuentra mas a la izquierda
}
}
//---------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype._setNeighbors = function(node, level) {
//funcion utilizada en el algoritmo eco, pero util para otras cosas mas.
node.leftNeighbor = this.previousLevelNode[level];
if(node.leftNeighbor != null)
node.leftNeighbor.rightNeighbor = node;
this.previousLevelNode[level] = node;
}
Components.WBSTree.prototype._setLevelHeight = function (node, level) {
/* usado en el algoritmo eco
Node: nodo que esta siendo procesado
level: nivel en el que se encuentra el nodo.
- si el valor maxLevelHeight[level] no esta aun definido se le asigna la altura del nodo,
- si el valor ya estaba definido se busca el mayor y este queda como valor.
*/
if (this.maxLevelHeight[level] == null)
this.maxLevelHeight[level] = 0;
if(this.maxLevelHeight[level] < node.Alto)
this.maxLevelHeight[level] = node.Alto;
}
Components.WBSTree.prototype._setLevelWidth = function (node, level) {
/* usado en el algoritmo eco
Node: nodo que esta siendo procesado
level: nivel en el que se encuentra el nodo.
- si el valor maxLevelWidth[level] no esta aun definido se le asigna el mayor ancho ,
- si el valor ya estaba definido se busca el mayor y este queda como valor.
*/
if (this.maxLevelWidth[level] == null)
this.maxLevelWidth[level] = 0;
if(this.maxLevelWidth[level] < node.AnchoCajita)
this.maxLevelWidth[level] = node.AnchoCajita;
}
//----------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.create = function() {
this.root = this.svgcontainer.root; //El elemento padre - root de todas las imagenes SVG en el inspector es el elemento <svg>
this.screenGrid = this.svgcontainer.screenGrid;
this.svgContend = this.svgcontainer.svgContend;
if (this.config.clipboard){
this.clipboard = new Clipboard();
this.clipboard.tree=this;
}
if(this.grupo != false)
this.svgcontainer.svg.group({id: this.grupo});
//this.grupo=this.svgcontainer.svg.group({id: this.grupo});
this.MakeItems();
switch(this.algorithm){
case 'basicWilliam':
this.LayoutAlgoritm= new LayoutWilliam(this);
break;
case 'ecotree':
this.LayoutAlgoritm=new ECOTree(this);
break
}
this.calcTree();
this.drawTree();
this.RecalcSize();
return this ;
}
//--------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.addNodo = function(nodo) {
/*construir un nuevo nodo, con los datos enviados en la variable nodo.
//el nodo se agrega a la derecha de sus hermanos, mas no se dibuja ni calcula aun.
@param nodo tiene que tener esta estructura
{ type: 'WBSNode',
id: id propio, diferente a los ids de los demas nodos,
idp:id del padre,
Descripcion:'Descripcion del contenido del nodo',
}
*/
if(nodo.type != "WBSNode"){
return false;
}
if(nodo.id == -1){
return false;
}
nodo.container = this.root;
nodo.screenGrid = this.screenGrid;
nodo.tree = this;
if(nodo.idp == null){
nodo.idp=-1;
this.padres.push(nodo.id);
if(position=='derecha'){
this.nodos[idp].childsId.push(nodo.id);
}
}
else{
//agregar al nodo padre el nodo hijo como child
if ( this.nodos[nodo.idp].childsId.indexOf(nodo.id)==-1)
{this.nodos[nodo.idp].childsId.push(nodo.id);}
this.nodos[nodo.idp].Status="Maximizado"; //sin esta linea el nodo pienza que esta minimizado
//this.items[nodo.idp].childsId.push(nodo.id);
}
var element = Components.create('WBSNode', nodo);
this.nodos[nodo.id] = element;
this.nodos[nodo.id] = element;
//los que fueron detectados como padres son los hijos del nodo -1
}
//--------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.MakeItems = function() {
//construir todos los nodos a partir de los datos ingresados por el usuario.
this.padres = [];
for(var i in this.items) {
var item = this.items[i];
if(item.type != "WBSNode")
continue;
item.container = this.root;
item.screenGrid = this.screenGrid;
item.tree = this;
if(item.idp == null){
item.idp=-1;
this.padres.push(item.id);
}
var element = Components.create('WBSNode', item);
this.nodos[element.id] = element;
}
//fake item
var fake = {
type: 'WBSNode',
id:-1,
idp:null,
Level:0,
Descripcion:'',
tipoObjeto:'WBSPARENT',
childsId:this.padres,
container: this.root,
screenGrid : this.screenGrid,
tree: this
}
var element = Components.create('WBSNode', fake);
this.nodos[element.id] = element;
//los que fueron detectados como padres son los hijos del nodo -1
for(i in this.padres){
this.nodos[element.id].childs[i]=this.nodos[this.padres[i]];
}
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype._getNodeSize = function (node) {
switch(this.config.iRootOrientation)
{
case ECOTree.RO_TOP:
case ECOTree.RO_BOTTOM:
return node.AnchoCajita;
case ECOTree.RO_RIGHT:
case ECOTree.RO_LEFT:
return node.Alto;
}
return 0;
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.cambiarLayout=function(algoritmo){
if (this.algorithm == algoritmo){return false;}
else{ | this.algorithm ='basicWilliam';
return true;
break;
case 'ecotree':
this.LayoutAlgoritm= new ECOTree(this);
this.algorithm ='ecotree';
return true;
break;
}
}
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.prepareForSave=function(){
var resultado=new Array();
for(var i in this.nodos){
if(this.nodos[i].id == -1){
continue;
}
var NodoItem = {
type:"WBSNode",
id:this.nodos[i].id,
Descripcion:this.nodos[i].Descripcion,
Background:this.nodos[i].Background,
stroke:this.nodos[i].stroke,
idp:this.nodos[i].idp,
childsId:this.nodos[i].childsId,
};
resultado.push(NodoItem);
}
return resultado;
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.calcTree = function() {
//dependiendo del algorimo a usar se llama a un metodo u otro
switch(this.algorithm){
case 'basicWilliam':
this.nodos[-1].calculaNodo('undefined'); //se inicia el calculo en el nodo root,
break;
case 'ecotree':
this.nodoDerechaMax = this.nodos[-1];
this.nodoizquierdaMax = this.nodos[-1];
this.LayoutAlgoritm._positionTree(this,this.nodos[-1]);
//el algoritmo de layout ECOtree, puede colocar nodos en el lado negativo del lienzo,
//por lo cual a partir del nodo mas a la izquierda reajustamos la poscicion de todos los nodos.
if(this.nodoizquierdaMax.PosCajitaX<0){
DeltaX = -1*this.nodoizquierdaMax.PosCajitaX;
for(var i in this.nodos) {
this.nodos[i].PosCajitaX = this.nodos[i].PosCajitaX + DeltaX
}
}
//console.log(this.nodoDerechaMax);
//console.log(this.nodoizquierdaMax);
break
}
//this.drawTree();
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.drawTree = function() {
/* Se encarga de dibujar los nodos sobre el lienzo del arbol.
- Si el arbol ya estaba dibujado y esta funcion es llamada, entonces se procedera:
1) Limpieza del lienzo.
2) Limpieza de Calculos previos en los nodos
3) Calculo de los nodos
4) dibujado de los nodos.
- si el arbol se va dibujar por primera vez, solo se procede a dibujarlo.
*/
if(this.Dibujado==true){
this.LimpiarLienzo();
this.LimpiarCalculosPrevios();
this.calcTree();
}
for(var i in this.nodos) {
if(this.nodos[i].config.tipoObjeto!="WBSPARENT")
this.nodos[i].draw();
}
this.RecalcSize();
this.Dibujado = true;
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.borrarNodos = function (){
for(i in this.nodos){
if (this.nodos[i].id != -1){
delete this.nodos[i]}
}
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.LimpiarLienzo = function(){
//quita los graficos del contenedor SVG, no se elimina el grupo principal
this.nodos[0].removeGraphics();
//si algun nodo fue dejado de lado por el borrado desde el nivel 0 lo buscamos
for(i in this.nodos){
this.nodos[i].removeGraphics();
}
this.Dibujado=false;
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.LimpiarCalculosPrevios = function(){
//Se encarga de inicializar los nodos y el arbol, de los calculos realizados por los algoritmos de layout
//dejando el arbol y los nodos listos para iniciar un nuevo ciclo de calculos
for(i in this.nodos){
this.nodos[i].LimpiarCalculosPrevios();
}
this.maxLevelHeight = []; //array que indica la altura maxima de cada nivel
this.maxLevelWidth = []; //array que indica el ancho maximo de cada nivel
this.previousLevelNode = [];
this.nodoDerechaMax = ''; //nodo que se encuentra mas a la derecha
this.nodoizquierdaMax = ''; //nodo que se encuentra mas a la izquierda
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype.RecalcSize = function() {
//calcular el ancho y el alto requeridos para mostrar correctamente el arbol
var sum=0;
switch(this.algorithm){
case 'basicWilliam':
for(var pa in this.padres )
sum=sum+this.nodos[pa].Ancho;
$(this.root).attr('width', sum+10);
this.svgContend.width(sum+10);
$(this.root).attr('height', this.screenGrid.Level[this.screenGrid.MaxLevel].y+60);//50 es el alto de las cajitas
this.svgContend.height(this.screenGrid.Level[this.screenGrid.MaxLevel].y+60);
break;
case 'ecotree':
sum = this.nodoDerechaMax.PosCajitaX+this.nodoDerechaMax.AnchoCajita + 20;
$(this.root).attr('width', sum);
this.svgContend.width(sum+20);
sum=0
for(i in this.maxLevelHeight)
{
sum=sum+this.maxLevelHeight[i]+this.config.iLevelSeparation;
};
$(this.root).attr('height', sum+10);
this.svgContend.height(sum+10);
break
}
}
//-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Components.WBSTree.prototype._getLeftmost = function (node, level, maxlevel) {
if(level >= maxlevel) return node;
if(node._getChildrenCount() == 0) return null;
//var n = node._getChildrenCount();
for(i in node.childsId)
{
var iChild = this.nodos[node.childsId[i]];
var leftmostDescendant = this._getLeftmost(iChild, level + 1, maxlevel);
if(leftmostDescendant != null)
return leftmostDescendant;
}
return null;
} | switch(algoritmo){
case 'basicWilliam':
this.LayoutAlgoritm=new LayoutWilliam(this); | random_line_split |
request.rs | extern crate base64;
extern crate md5;
use std::collections::HashMap;
use std::io::{Read, Write};
use bucket::Bucket;
use chrono::{DateTime, Utc};
use command::Command;
use hmac::Mac;
use reqwest::async;
use reqwest::header::{self, HeaderMap, HeaderName, HeaderValue};
use sha2::{Digest, Sha256};
use url::Url;
use futures::prelude::*;
use tokio::runtime::current_thread::Runtime;
use signing;
use error::{S3Error, S3Result};
use reqwest::async::Response;
use EMPTY_PAYLOAD_SHA;
use LONG_DATE;
/// Collection of HTTP headers sent to S3 service, in key/value format.
pub type Headers = HashMap<String, String>;
/// Collection of HTTP query parameters sent to S3 service, in key/value
/// format.
pub type Query = HashMap<String, String>;
// Temporary structure for making a request
pub struct Request<'a> {
pub bucket: &'a Bucket,
pub path: &'a str,
pub command: Command<'a>,
pub datetime: DateTime<Utc>,
pub async: bool,
}
impl<'a> Request<'a> {
pub fn new<'b>(bucket: &'b Bucket, path: &'b str, command: Command<'b>) -> Request<'b> {
Request {
bucket,
path,
command,
datetime: Utc::now(),
async: false,
}
}
fn url(&self) -> Url {
let mut url_str = match self.command {
Command::GetBucketLocation => {
format!("{}://{}", self.bucket.scheme(), self.bucket.self_host())
}
_ => format!("{}://{}", self.bucket.scheme(), self.bucket.host()),
};
match self.command {
Command::GetBucketLocation => {}
_ => {
url_str.push_str("/");
url_str.push_str(&self.bucket.name());
}
}
if !self.path.starts_with('/') {
url_str.push_str("/");
}
match self.command {
Command::GetBucketLocation => url_str.push_str(self.path),
_ => url_str.push_str(&signing::uri_encode(self.path, false)),
};
// Since every part of this URL is either pre-encoded or statically
// generated, there's really no way this should fail.
let mut url = Url::parse(&url_str).expect("static URL parsing");
for (key, value) in &self.bucket.extra_query {
url.query_pairs_mut().append_pair(key, value);
}
if let Command::ListBucket {
prefix,
delimiter,
continuation_token,
} = self.command.clone()
{
let mut query_pairs = url.query_pairs_mut();
delimiter.map(|d| query_pairs.append_pair("delimiter", &d.clone()));
query_pairs.append_pair("prefix", &prefix);
query_pairs.append_pair("list-type", "2");
if let Some(token) = continuation_token {
query_pairs.append_pair("continuation-token", &token);
}
}
match self.command {
Command::PutObjectTagging { .. }
| Command::GetObjectTagging
| Command::DeleteObjectTagging => {
url.query_pairs_mut().append_pair("tagging", "");
}
_ => {}
}
// println!("{}", url);
url
}
fn content_length(&self) -> usize {
match self.command {
Command::PutObject { content, .. } => content.len(),
Command::PutObjectTagging { tags } => tags.len(),
_ => 0,
}
}
fn content_type(&self) -> String {
match self.command {
Command::PutObject { content_type, .. } => content_type.into(),
_ => "text/plain".into(),
}
}
fn sha256(&self) -> String {
match self.command {
Command::PutObject { content, .. } => {
let mut sha = Sha256::default();
sha.input(content);
hex::encode(sha.result().as_slice())
}
Command::PutObjectTagging { tags } => {
let mut sha = Sha256::default();
sha.input(tags.as_bytes());
hex::encode(sha.result().as_slice())
}
_ => EMPTY_PAYLOAD_SHA.into(),
}
}
fn long_date(&self) -> String {
self.datetime.format(LONG_DATE).to_string()
}
fn canonical_request(&self, headers: &HeaderMap) -> String {
signing::canonical_request( | }
fn string_to_sign(&self, request: &str) -> String {
signing::string_to_sign(&self.datetime, &self.bucket.region(), request)
}
fn signing_key(&self) -> S3Result<Vec<u8>> {
Ok(signing::signing_key(
&self.datetime,
&self.bucket.secret_key(),
&self.bucket.region(),
"s3",
)?)
}
fn authorization(&self, headers: &HeaderMap) -> S3Result<String> {
let canonical_request = self.canonical_request(headers);
let string_to_sign = self.string_to_sign(&canonical_request);
let mut hmac = signing::HmacSha256::new_varkey(&self.signing_key()?)?;
hmac.input(string_to_sign.as_bytes());
let signature = hex::encode(hmac.result().code());
let signed_header = signing::signed_header_string(headers);
Ok(signing::authorization_header(
&self.bucket.access_key(),
&self.datetime,
&self.bucket.region(),
&signed_header,
&signature,
))
}
fn headers(&self) -> S3Result<HeaderMap> {
// Generate this once, but it's used in more than one place.
let sha256 = self.sha256();
// Start with extra_headers, that way our headers replace anything with
// the same name.
let mut headers = self
.bucket
.extra_headers
.iter()
.map(|(k, v)| Ok((k.parse::<HeaderName>()?, v.parse::<HeaderValue>()?)))
.collect::<Result<HeaderMap, S3Error>>()?;
match self.command {
Command::GetBucketLocation => {
headers.insert(header::HOST, self.bucket.self_host().parse()?)
}
_ => headers.insert(header::HOST, self.bucket.host().parse()?),
};
headers.insert(
header::CONTENT_LENGTH,
self.content_length().to_string().parse()?,
);
headers.insert(header::CONTENT_TYPE, self.content_type().parse()?);
headers.insert("X-Amz-Content-Sha256", sha256.parse()?);
headers.insert("X-Amz-Date", self.long_date().parse()?);
if let Some(token) = self.bucket.credentials().token.as_ref() {
headers.insert("X-Amz-Security-Token", token.parse()?);
}
if let Command::PutObjectTagging { tags } = self.command {
let digest = md5::compute(tags);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::PutObject { content, .. } = self.command {
let digest = md5::compute(content);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::GetObject {} = self.command {
headers.insert(
header::ACCEPT,
HeaderValue::from_str("application/octet-stream")?,
);
// headers.insert(header::ACCEPT_CHARSET, HeaderValue::from_str("UTF-8")?);
}
// This must be last, as it signs the other headers
let authorization = self.authorization(&headers)?;
headers.insert(header::AUTHORIZATION, authorization.parse()?);
// The format of RFC2822 is somewhat malleable, so including it in
// signed headers can cause signature mismatches. We do include the
// X-Amz-Date header, so requests are still properly limited to a date
// range and can't be used again e.g. reply attacks. Adding this header
// after the generation of the Authorization header leaves it out of
// the signed headers.
headers.insert(header::DATE, self.datetime.to_rfc2822().parse()?);
Ok(headers)
}
pub fn response_data(&self) -> S3Result<(Vec<u8>, u16)> {
let response_data = self.response_data_future().then(|result| match result {
Ok((response_data, status_code)) => Ok((response_data, status_code)),
Err(e) => Err(e),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(response_data)
}
pub fn response_data_to_writer<T: Write>(&self, writer: &mut T) -> S3Result<u16> {
let status_code_future =
self.response_data_to_writer_future(writer)
.then(|result| match result {
Ok(status_code) => Ok(status_code),
Err(_) => Err(S3Error::from("ReqwestFuture")),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(status_code_future)
}
pub fn response_future(&self) -> impl Future<Item = Response, Error = S3Error> {
let client = if cfg!(feature = "no-verify-ssl") {
async::Client::builder()
.danger_accept_invalid_certs(true)
.danger_accept_invalid_hostnames(true)
.build()
.expect("Could not build dangereous client!")
} else {
async::Client::new()
};
// Build headers
let headers = self.headers().expect("Could not get headers!");
// Get owned content to pass to reqwest
let content = if let Command::PutObject { content, .. } = self.command {
Vec::from(content)
} else if let Command::PutObjectTagging { tags } = self.command {
Vec::from(tags)
} else {
Vec::new()
};
let request = client
.request(self.command.http_verb(), self.url().as_str())
.headers(headers.to_owned())
.body(content.to_owned());
request.send().map_err(S3Error::from)
}
pub fn response_data_future(&self) -> impl Future<Item = (Vec<u8>, u16), Error = S3Error> {
self.response_future()
.and_then(|response| {
// println!("{:?}", response.headers());
let status_code = response.status().as_u16();
Ok((response.into_body().collect(), status_code))
})
.and_then(|(body_future, status_code)| {
body_future
.and_then(move |body| {
let mut entire_body = body
.iter()
.fold(vec![], |mut acc, slice| {acc.extend_from_slice(slice); acc});
entire_body.shrink_to_fit();
Ok((entire_body, status_code))
})
.map_err(S3Error::from)
})
}
pub fn response_data_to_writer_future<'b, T: Write>(
&self,
writer: &'b mut T,
) -> impl Future<Item = u16> + 'b {
let future_response = self.response_data_future();
future_response.and_then(move |(body, status_code)| {
writer
.write_all(body.as_slice())
.expect("Could not write to writer");
Ok(status_code)
})
}
}
#[cfg(test)]
mod tests {
use bucket::Bucket;
use command::Command;
use credentials::Credentials;
use error::S3Result;
use request::Request;
// Fake keys - otherwise using Credentials::default will use actual user
// credentials if they exist.
fn fake_credentials() -> Credentials {
const ACCESS_KEY: &'static str = "AKIAIOSFODNN7EXAMPLE";
const SECRET_KEY: &'static str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
Credentials::new(Some(ACCESS_KEY.into()), Some(SECRET_KEY.into()), None, None)
}
#[test]
fn url_uses_https_by_default() -> S3Result<()> {
let region = "custom-region".parse()?;
let bucket = Bucket::new("my-first-bucket", region, fake_credentials())?;
let path = "/my-first/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "https");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
#[test]
fn url_uses_scheme_from_custom_region_if_defined() -> S3Result<()> {
let region = "http://custom-region".parse()?;
let bucket = Bucket::new("my-second-bucket", region, fake_credentials())?;
let path = "/my-second/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "http");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
} | self.command.http_verb().as_str(),
&self.url(),
headers,
&self.sha256(),
) | random_line_split |
request.rs | extern crate base64;
extern crate md5;
use std::collections::HashMap;
use std::io::{Read, Write};
use bucket::Bucket;
use chrono::{DateTime, Utc};
use command::Command;
use hmac::Mac;
use reqwest::async;
use reqwest::header::{self, HeaderMap, HeaderName, HeaderValue};
use sha2::{Digest, Sha256};
use url::Url;
use futures::prelude::*;
use tokio::runtime::current_thread::Runtime;
use signing;
use error::{S3Error, S3Result};
use reqwest::async::Response;
use EMPTY_PAYLOAD_SHA;
use LONG_DATE;
/// Collection of HTTP headers sent to S3 service, in key/value format.
pub type Headers = HashMap<String, String>;
/// Collection of HTTP query parameters sent to S3 service, in key/value
/// format.
pub type Query = HashMap<String, String>;
// Temporary structure for making a request
pub struct Request<'a> {
pub bucket: &'a Bucket,
pub path: &'a str,
pub command: Command<'a>,
pub datetime: DateTime<Utc>,
pub async: bool,
}
impl<'a> Request<'a> {
pub fn new<'b>(bucket: &'b Bucket, path: &'b str, command: Command<'b>) -> Request<'b> {
Request {
bucket,
path,
command,
datetime: Utc::now(),
async: false,
}
}
fn url(&self) -> Url {
let mut url_str = match self.command {
Command::GetBucketLocation => {
format!("{}://{}", self.bucket.scheme(), self.bucket.self_host())
}
_ => format!("{}://{}", self.bucket.scheme(), self.bucket.host()),
};
match self.command {
Command::GetBucketLocation => {}
_ => {
url_str.push_str("/");
url_str.push_str(&self.bucket.name());
}
}
if !self.path.starts_with('/') {
url_str.push_str("/");
}
match self.command {
Command::GetBucketLocation => url_str.push_str(self.path),
_ => url_str.push_str(&signing::uri_encode(self.path, false)),
};
// Since every part of this URL is either pre-encoded or statically
// generated, there's really no way this should fail.
let mut url = Url::parse(&url_str).expect("static URL parsing");
for (key, value) in &self.bucket.extra_query {
url.query_pairs_mut().append_pair(key, value);
}
if let Command::ListBucket {
prefix,
delimiter,
continuation_token,
} = self.command.clone()
{
let mut query_pairs = url.query_pairs_mut();
delimiter.map(|d| query_pairs.append_pair("delimiter", &d.clone()));
query_pairs.append_pair("prefix", &prefix);
query_pairs.append_pair("list-type", "2");
if let Some(token) = continuation_token {
query_pairs.append_pair("continuation-token", &token);
}
}
match self.command {
Command::PutObjectTagging { .. }
| Command::GetObjectTagging
| Command::DeleteObjectTagging => {
url.query_pairs_mut().append_pair("tagging", "");
}
_ => {}
}
// println!("{}", url);
url
}
fn content_length(&self) -> usize {
match self.command {
Command::PutObject { content, .. } => content.len(),
Command::PutObjectTagging { tags } => tags.len(),
_ => 0,
}
}
fn content_type(&self) -> String {
match self.command {
Command::PutObject { content_type, .. } => content_type.into(),
_ => "text/plain".into(),
}
}
fn sha256(&self) -> String {
match self.command {
Command::PutObject { content, .. } => {
let mut sha = Sha256::default();
sha.input(content);
hex::encode(sha.result().as_slice())
}
Command::PutObjectTagging { tags } => {
let mut sha = Sha256::default();
sha.input(tags.as_bytes());
hex::encode(sha.result().as_slice())
}
_ => EMPTY_PAYLOAD_SHA.into(),
}
}
fn long_date(&self) -> String {
self.datetime.format(LONG_DATE).to_string()
}
fn canonical_request(&self, headers: &HeaderMap) -> String {
signing::canonical_request(
self.command.http_verb().as_str(),
&self.url(),
headers,
&self.sha256(),
)
}
fn string_to_sign(&self, request: &str) -> String {
signing::string_to_sign(&self.datetime, &self.bucket.region(), request)
}
fn signing_key(&self) -> S3Result<Vec<u8>> {
Ok(signing::signing_key(
&self.datetime,
&self.bucket.secret_key(),
&self.bucket.region(),
"s3",
)?)
}
fn authorization(&self, headers: &HeaderMap) -> S3Result<String> {
let canonical_request = self.canonical_request(headers);
let string_to_sign = self.string_to_sign(&canonical_request);
let mut hmac = signing::HmacSha256::new_varkey(&self.signing_key()?)?;
hmac.input(string_to_sign.as_bytes());
let signature = hex::encode(hmac.result().code());
let signed_header = signing::signed_header_string(headers);
Ok(signing::authorization_header(
&self.bucket.access_key(),
&self.datetime,
&self.bucket.region(),
&signed_header,
&signature,
))
}
fn headers(&self) -> S3Result<HeaderMap> {
// Generate this once, but it's used in more than one place.
let sha256 = self.sha256();
// Start with extra_headers, that way our headers replace anything with
// the same name.
let mut headers = self
.bucket
.extra_headers
.iter()
.map(|(k, v)| Ok((k.parse::<HeaderName>()?, v.parse::<HeaderValue>()?)))
.collect::<Result<HeaderMap, S3Error>>()?;
match self.command {
Command::GetBucketLocation => {
headers.insert(header::HOST, self.bucket.self_host().parse()?)
}
_ => headers.insert(header::HOST, self.bucket.host().parse()?),
};
headers.insert(
header::CONTENT_LENGTH,
self.content_length().to_string().parse()?,
);
headers.insert(header::CONTENT_TYPE, self.content_type().parse()?);
headers.insert("X-Amz-Content-Sha256", sha256.parse()?);
headers.insert("X-Amz-Date", self.long_date().parse()?);
if let Some(token) = self.bucket.credentials().token.as_ref() {
headers.insert("X-Amz-Security-Token", token.parse()?);
}
if let Command::PutObjectTagging { tags } = self.command {
let digest = md5::compute(tags);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::PutObject { content, .. } = self.command {
let digest = md5::compute(content);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::GetObject {} = self.command {
headers.insert(
header::ACCEPT,
HeaderValue::from_str("application/octet-stream")?,
);
// headers.insert(header::ACCEPT_CHARSET, HeaderValue::from_str("UTF-8")?);
}
// This must be last, as it signs the other headers
let authorization = self.authorization(&headers)?;
headers.insert(header::AUTHORIZATION, authorization.parse()?);
// The format of RFC2822 is somewhat malleable, so including it in
// signed headers can cause signature mismatches. We do include the
// X-Amz-Date header, so requests are still properly limited to a date
// range and can't be used again e.g. reply attacks. Adding this header
// after the generation of the Authorization header leaves it out of
// the signed headers.
headers.insert(header::DATE, self.datetime.to_rfc2822().parse()?);
Ok(headers)
}
pub fn | (&self) -> S3Result<(Vec<u8>, u16)> {
let response_data = self.response_data_future().then(|result| match result {
Ok((response_data, status_code)) => Ok((response_data, status_code)),
Err(e) => Err(e),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(response_data)
}
pub fn response_data_to_writer<T: Write>(&self, writer: &mut T) -> S3Result<u16> {
let status_code_future =
self.response_data_to_writer_future(writer)
.then(|result| match result {
Ok(status_code) => Ok(status_code),
Err(_) => Err(S3Error::from("ReqwestFuture")),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(status_code_future)
}
pub fn response_future(&self) -> impl Future<Item = Response, Error = S3Error> {
let client = if cfg!(feature = "no-verify-ssl") {
async::Client::builder()
.danger_accept_invalid_certs(true)
.danger_accept_invalid_hostnames(true)
.build()
.expect("Could not build dangereous client!")
} else {
async::Client::new()
};
// Build headers
let headers = self.headers().expect("Could not get headers!");
// Get owned content to pass to reqwest
let content = if let Command::PutObject { content, .. } = self.command {
Vec::from(content)
} else if let Command::PutObjectTagging { tags } = self.command {
Vec::from(tags)
} else {
Vec::new()
};
let request = client
.request(self.command.http_verb(), self.url().as_str())
.headers(headers.to_owned())
.body(content.to_owned());
request.send().map_err(S3Error::from)
}
pub fn response_data_future(&self) -> impl Future<Item = (Vec<u8>, u16), Error = S3Error> {
self.response_future()
.and_then(|response| {
// println!("{:?}", response.headers());
let status_code = response.status().as_u16();
Ok((response.into_body().collect(), status_code))
})
.and_then(|(body_future, status_code)| {
body_future
.and_then(move |body| {
let mut entire_body = body
.iter()
.fold(vec![], |mut acc, slice| {acc.extend_from_slice(slice); acc});
entire_body.shrink_to_fit();
Ok((entire_body, status_code))
})
.map_err(S3Error::from)
})
}
pub fn response_data_to_writer_future<'b, T: Write>(
&self,
writer: &'b mut T,
) -> impl Future<Item = u16> + 'b {
let future_response = self.response_data_future();
future_response.and_then(move |(body, status_code)| {
writer
.write_all(body.as_slice())
.expect("Could not write to writer");
Ok(status_code)
})
}
}
#[cfg(test)]
mod tests {
use bucket::Bucket;
use command::Command;
use credentials::Credentials;
use error::S3Result;
use request::Request;
// Fake keys - otherwise using Credentials::default will use actual user
// credentials if they exist.
fn fake_credentials() -> Credentials {
const ACCESS_KEY: &'static str = "AKIAIOSFODNN7EXAMPLE";
const SECRET_KEY: &'static str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
Credentials::new(Some(ACCESS_KEY.into()), Some(SECRET_KEY.into()), None, None)
}
#[test]
fn url_uses_https_by_default() -> S3Result<()> {
let region = "custom-region".parse()?;
let bucket = Bucket::new("my-first-bucket", region, fake_credentials())?;
let path = "/my-first/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "https");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
#[test]
fn url_uses_scheme_from_custom_region_if_defined() -> S3Result<()> {
let region = "http://custom-region".parse()?;
let bucket = Bucket::new("my-second-bucket", region, fake_credentials())?;
let path = "/my-second/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "http");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
}
| response_data | identifier_name |
request.rs | extern crate base64;
extern crate md5;
use std::collections::HashMap;
use std::io::{Read, Write};
use bucket::Bucket;
use chrono::{DateTime, Utc};
use command::Command;
use hmac::Mac;
use reqwest::async;
use reqwest::header::{self, HeaderMap, HeaderName, HeaderValue};
use sha2::{Digest, Sha256};
use url::Url;
use futures::prelude::*;
use tokio::runtime::current_thread::Runtime;
use signing;
use error::{S3Error, S3Result};
use reqwest::async::Response;
use EMPTY_PAYLOAD_SHA;
use LONG_DATE;
/// Collection of HTTP headers sent to S3 service, in key/value format.
pub type Headers = HashMap<String, String>;
/// Collection of HTTP query parameters sent to S3 service, in key/value
/// format.
pub type Query = HashMap<String, String>;
// Temporary structure for making a request
pub struct Request<'a> {
pub bucket: &'a Bucket,
pub path: &'a str,
pub command: Command<'a>,
pub datetime: DateTime<Utc>,
pub async: bool,
}
impl<'a> Request<'a> {
pub fn new<'b>(bucket: &'b Bucket, path: &'b str, command: Command<'b>) -> Request<'b> {
Request {
bucket,
path,
command,
datetime: Utc::now(),
async: false,
}
}
fn url(&self) -> Url {
let mut url_str = match self.command {
Command::GetBucketLocation => {
format!("{}://{}", self.bucket.scheme(), self.bucket.self_host())
}
_ => format!("{}://{}", self.bucket.scheme(), self.bucket.host()),
};
match self.command {
Command::GetBucketLocation => {}
_ => {
url_str.push_str("/");
url_str.push_str(&self.bucket.name());
}
}
if !self.path.starts_with('/') {
url_str.push_str("/");
}
match self.command {
Command::GetBucketLocation => url_str.push_str(self.path),
_ => url_str.push_str(&signing::uri_encode(self.path, false)),
};
// Since every part of this URL is either pre-encoded or statically
// generated, there's really no way this should fail.
let mut url = Url::parse(&url_str).expect("static URL parsing");
for (key, value) in &self.bucket.extra_query {
url.query_pairs_mut().append_pair(key, value);
}
if let Command::ListBucket {
prefix,
delimiter,
continuation_token,
} = self.command.clone()
{
let mut query_pairs = url.query_pairs_mut();
delimiter.map(|d| query_pairs.append_pair("delimiter", &d.clone()));
query_pairs.append_pair("prefix", &prefix);
query_pairs.append_pair("list-type", "2");
if let Some(token) = continuation_token {
query_pairs.append_pair("continuation-token", &token);
}
}
match self.command {
Command::PutObjectTagging { .. }
| Command::GetObjectTagging
| Command::DeleteObjectTagging => {
url.query_pairs_mut().append_pair("tagging", "");
}
_ => {}
}
// println!("{}", url);
url
}
fn content_length(&self) -> usize {
match self.command {
Command::PutObject { content, .. } => content.len(),
Command::PutObjectTagging { tags } => tags.len(),
_ => 0,
}
}
fn content_type(&self) -> String {
match self.command {
Command::PutObject { content_type, .. } => content_type.into(),
_ => "text/plain".into(),
}
}
fn sha256(&self) -> String {
match self.command {
Command::PutObject { content, .. } => {
let mut sha = Sha256::default();
sha.input(content);
hex::encode(sha.result().as_slice())
}
Command::PutObjectTagging { tags } => {
let mut sha = Sha256::default();
sha.input(tags.as_bytes());
hex::encode(sha.result().as_slice())
}
_ => EMPTY_PAYLOAD_SHA.into(),
}
}
fn long_date(&self) -> String {
self.datetime.format(LONG_DATE).to_string()
}
fn canonical_request(&self, headers: &HeaderMap) -> String {
signing::canonical_request(
self.command.http_verb().as_str(),
&self.url(),
headers,
&self.sha256(),
)
}
fn string_to_sign(&self, request: &str) -> String {
signing::string_to_sign(&self.datetime, &self.bucket.region(), request)
}
fn signing_key(&self) -> S3Result<Vec<u8>> |
fn authorization(&self, headers: &HeaderMap) -> S3Result<String> {
let canonical_request = self.canonical_request(headers);
let string_to_sign = self.string_to_sign(&canonical_request);
let mut hmac = signing::HmacSha256::new_varkey(&self.signing_key()?)?;
hmac.input(string_to_sign.as_bytes());
let signature = hex::encode(hmac.result().code());
let signed_header = signing::signed_header_string(headers);
Ok(signing::authorization_header(
&self.bucket.access_key(),
&self.datetime,
&self.bucket.region(),
&signed_header,
&signature,
))
}
fn headers(&self) -> S3Result<HeaderMap> {
// Generate this once, but it's used in more than one place.
let sha256 = self.sha256();
// Start with extra_headers, that way our headers replace anything with
// the same name.
let mut headers = self
.bucket
.extra_headers
.iter()
.map(|(k, v)| Ok((k.parse::<HeaderName>()?, v.parse::<HeaderValue>()?)))
.collect::<Result<HeaderMap, S3Error>>()?;
match self.command {
Command::GetBucketLocation => {
headers.insert(header::HOST, self.bucket.self_host().parse()?)
}
_ => headers.insert(header::HOST, self.bucket.host().parse()?),
};
headers.insert(
header::CONTENT_LENGTH,
self.content_length().to_string().parse()?,
);
headers.insert(header::CONTENT_TYPE, self.content_type().parse()?);
headers.insert("X-Amz-Content-Sha256", sha256.parse()?);
headers.insert("X-Amz-Date", self.long_date().parse()?);
if let Some(token) = self.bucket.credentials().token.as_ref() {
headers.insert("X-Amz-Security-Token", token.parse()?);
}
if let Command::PutObjectTagging { tags } = self.command {
let digest = md5::compute(tags);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::PutObject { content, .. } = self.command {
let digest = md5::compute(content);
let hash = base64::encode(digest.as_ref());
headers.insert("Content-MD5", hash.parse()?);
} else if let Command::GetObject {} = self.command {
headers.insert(
header::ACCEPT,
HeaderValue::from_str("application/octet-stream")?,
);
// headers.insert(header::ACCEPT_CHARSET, HeaderValue::from_str("UTF-8")?);
}
// This must be last, as it signs the other headers
let authorization = self.authorization(&headers)?;
headers.insert(header::AUTHORIZATION, authorization.parse()?);
// The format of RFC2822 is somewhat malleable, so including it in
// signed headers can cause signature mismatches. We do include the
// X-Amz-Date header, so requests are still properly limited to a date
// range and can't be used again e.g. reply attacks. Adding this header
// after the generation of the Authorization header leaves it out of
// the signed headers.
headers.insert(header::DATE, self.datetime.to_rfc2822().parse()?);
Ok(headers)
}
pub fn response_data(&self) -> S3Result<(Vec<u8>, u16)> {
let response_data = self.response_data_future().then(|result| match result {
Ok((response_data, status_code)) => Ok((response_data, status_code)),
Err(e) => Err(e),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(response_data)
}
pub fn response_data_to_writer<T: Write>(&self, writer: &mut T) -> S3Result<u16> {
let status_code_future =
self.response_data_to_writer_future(writer)
.then(|result| match result {
Ok(status_code) => Ok(status_code),
Err(_) => Err(S3Error::from("ReqwestFuture")),
});
let mut runtime = Runtime::new().unwrap();
runtime.block_on(status_code_future)
}
pub fn response_future(&self) -> impl Future<Item = Response, Error = S3Error> {
let client = if cfg!(feature = "no-verify-ssl") {
async::Client::builder()
.danger_accept_invalid_certs(true)
.danger_accept_invalid_hostnames(true)
.build()
.expect("Could not build dangereous client!")
} else {
async::Client::new()
};
// Build headers
let headers = self.headers().expect("Could not get headers!");
// Get owned content to pass to reqwest
let content = if let Command::PutObject { content, .. } = self.command {
Vec::from(content)
} else if let Command::PutObjectTagging { tags } = self.command {
Vec::from(tags)
} else {
Vec::new()
};
let request = client
.request(self.command.http_verb(), self.url().as_str())
.headers(headers.to_owned())
.body(content.to_owned());
request.send().map_err(S3Error::from)
}
pub fn response_data_future(&self) -> impl Future<Item = (Vec<u8>, u16), Error = S3Error> {
self.response_future()
.and_then(|response| {
// println!("{:?}", response.headers());
let status_code = response.status().as_u16();
Ok((response.into_body().collect(), status_code))
})
.and_then(|(body_future, status_code)| {
body_future
.and_then(move |body| {
let mut entire_body = body
.iter()
.fold(vec![], |mut acc, slice| {acc.extend_from_slice(slice); acc});
entire_body.shrink_to_fit();
Ok((entire_body, status_code))
})
.map_err(S3Error::from)
})
}
pub fn response_data_to_writer_future<'b, T: Write>(
&self,
writer: &'b mut T,
) -> impl Future<Item = u16> + 'b {
let future_response = self.response_data_future();
future_response.and_then(move |(body, status_code)| {
writer
.write_all(body.as_slice())
.expect("Could not write to writer");
Ok(status_code)
})
}
}
#[cfg(test)]
mod tests {
use bucket::Bucket;
use command::Command;
use credentials::Credentials;
use error::S3Result;
use request::Request;
// Fake keys - otherwise using Credentials::default will use actual user
// credentials if they exist.
fn fake_credentials() -> Credentials {
const ACCESS_KEY: &'static str = "AKIAIOSFODNN7EXAMPLE";
const SECRET_KEY: &'static str = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY";
Credentials::new(Some(ACCESS_KEY.into()), Some(SECRET_KEY.into()), None, None)
}
#[test]
fn url_uses_https_by_default() -> S3Result<()> {
let region = "custom-region".parse()?;
let bucket = Bucket::new("my-first-bucket", region, fake_credentials())?;
let path = "/my-first/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "https");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
#[test]
fn url_uses_scheme_from_custom_region_if_defined() -> S3Result<()> {
let region = "http://custom-region".parse()?;
let bucket = Bucket::new("my-second-bucket", region, fake_credentials())?;
let path = "/my-second/path";
let request = Request::new(&bucket, path, Command::GetObject);
assert_eq!(request.url().scheme(), "http");
let headers = request.headers().unwrap();
let host = headers.get("Host").unwrap();
assert_eq!(*host, "custom-region".to_string());
Ok(())
}
}
| {
Ok(signing::signing_key(
&self.datetime,
&self.bucket.secret_key(),
&self.bucket.region(),
"s3",
)?)
} | identifier_body |
__init__.py | """
Defines autosub's main functionality.
"""
from __future__ import absolute_import, print_function, unicode_literals
from fuzzywuzzy import fuzz
import time
import argparse
import cv2
import os
import sys
import html
from datetime import datetime
import numpy as np
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from autosub_v2.constants import (
LANGUAGE_CODES, GOOGLE_SPEECH_API_KEY, GOOGLE_SPEECH_API_URL,
)
from autosub_v2.formatters import FORMATTERS
from paddleocr import PaddleOCR
def nothing(x):
pass
# cv2.namedWindow("Trackbars")
DEFAULT_SUBTITLE_FORMAT = 'srt'
DEFAULT_CONCURRENCY = 10
DEFAULT_SRC_LANGUAGE = 'en'
DEFAULT_DST_LANGUAGE = 'vi'
import six
from google.oauth2 import service_account
from google.cloud import translate_v2 as translate
from google.cloud import vision
credentials = service_account.Credentials.from_service_account_file(r"C:\autosub_models\key.json")
client = vision.ImageAnnotatorClient(credentials=credentials)
translate_client = translate.Client(credentials=credentials)
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def detect_texts_google_cloud(content):
"""Detects text in the file."""
image = vision.Image(content=content)
response = client.text_detection(image=image, image_context={"language_hints": ["zh"]})
texts = response.text_annotations
predict_des = ""
for text in texts:
predict_des = text.description
predict_des = predict_des.strip().replace("-", "")
return predict_des
return predict_des
# def detect_texts(img_path, ocr):
# """Detects text in the file."""
# result = ocr.ocr(img_path, det=False, rec=True, cls=False)
# for line in result:
# # print(line[0])
# if line[1] > 0.7:
# return line[0]
# return ""
def translate_text_google_cloud(target, text):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
if isinstance(text, six.binary_type):
text = text.decode("utf-8")
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
result = translate_client.translate(text, target_language=target)
result = html.unescape(result["translatedText"])
# print(u"Text: {}".format(result["input"]))
print("Translation: {}".format(result))
# print(u"Detected source language: {}".format(result["detectedSourceLanguage"]))
return result
def translate_text(target, text):
|
def generate_subtitles(
source_path,
output=None,
dst_language=DEFAULT_DST_LANGUAGE,
debug=False,
cloud=False,
disable_time=False,
min_height=80,
max_height=100,
l_v=240
):
"""
Given an input audio/video file, generate subtitles in the specified language and format.
"""
# Opens the Video file
print(f"starting: using cloud {cloud}, source_path {source_path}")
if not cloud:
ocr = PaddleOCR(lang='ch', use_gpu=False,
rec_model_dir=r"C:\autosub_models\rec",
cls_model_dir=r"C:\autosub_models\cls",
det_model_dir=r"C:\autosub_models\det",
use_angle_cls=True,
rec_char_type='ch',
drop_score=0.8,
det_db_box_thresh=0.3,
cls=True)
cap = cv2.VideoCapture(source_path)
# cap.set(3, 1280)
# cap.set(4, 720)
# cv2.createTrackbar("L - V", "Trackbars", 0, 100, nothing)
# cv2.createTrackbar("Min height", "Trackbars", 80, 100, nothing)
# cv2.createTrackbar("Max Height", "Trackbars", 100, 100, nothing)
fps = cap.get(cv2.CAP_PROP_FPS)
print(f"fps {fps}")
time_per_frame = 1 / fps
i = 0
div_frame = 6 # 5 frame /s
sub_idx = 1
list_srt = []
old_des = ""
prev_time = 0
current_time = 0
file_name = os.path.basename(source_path)
extenstion = ".srt" if not disable_time else ".txt"
filesub = f"{os.path.splitext(file_name)[0]}{extenstion}"
if os.path.isfile(filesub):
os.remove(filesub)
while (cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
# min_height = cv2.getTrackbarPos("Min height", "Trackbars")
# max_height = cv2.getTrackbarPos("Max Height", "Trackbars")
# if max_height < min_height:
# max_height = min_height + 10
# l_v = cv2.getTrackbarPos("L - V", "Trackbars")
if i % div_frame == 0:
prev_time_ts = datetime.utcfromtimestamp(prev_time).strftime('%H:%M:%S,%f')[:-4]
current_time_ts = datetime.utcfromtimestamp(current_time).strftime('%H:%M:%S,%f')[:-4]
h, w, c = frame.shape
crop_img = frame[int(h * min_height/100):int(h * max_height/100), 0:w]
hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)
# define range of white color in HSV
# change it according to your need !
lower_white = np.array([0, 0, 246], dtype=np.uint8)
upper_white = np.array([157, 21, 255], dtype=np.uint8)
# Threshold the HSV image to get only white colors
mask = cv2.inRange(hsv, lower_white, upper_white)
# Bitwise-AND mask and original image
crop_img = cv2.bitwise_and(crop_img, crop_img, mask=mask)
# crop_img = cv2.cvtColor(crop_img, cv2.COLOR_HSV2RGB)
# crop_img = cv2.cvtColor(crop_img, cv2.COLOR_RGB2GRAY)
description = ""
if cloud:
success, encoded_image = cv2.imencode('.jpg', crop_img)
description = detect_texts_google_cloud(encoded_image.tobytes())
else:
# dst = cv2.fastNlMeansDenoisingColored(crop_img,None,10,10,7,21)
# stacked = np.hstack((dst, crop_img))
if debug:
# cv2.imshow('dst', dst)
cv2.imshow('crop_img', crop_img)
cv2.imshow('frame', frame)
cv2.waitKey(1)
result = ocr.ocr(crop_img, det=False, rec=True, cls=False)
for line in result:
# print(current_time_ts, line)
if line[1] > 0.7:
description = html.unescape(line[0].strip().replace(',', '').replace('、', '').replace('.', ''))
break
description = "" if len(description) < 6 else description
prev_des = ""
ratio = fuzz.ratio(description.lower(), old_des.lower())
if len(list_srt) > 0:
prev_des = list_srt[-1]['description']
print(current_time_ts, description, ratio)
if (old_des != "" or description == "") and (ratio < 70) and current_time - prev_time > 0.5:
list_srt.append({
"description": old_des,
"translate": translate_text(dst_language, old_des),
"first_time": prev_time_ts,
"last_time": current_time_ts,
"sub_idx": sub_idx
})
# with open(f"{os.path.splitext(file_name)[0]}_raw.srt", "a", encoding="utf-8") as myfile:
# myfile.write(f"{list_srt[-1]['sub_idx']}\n")
# myfile.write(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
# myfile.write(f"{list_srt[-1]['description']}\n")
# myfile.write('\n')
# myfile.close()
with open(filesub, "a", encoding="utf-8") as myfile_vi:
if not disable_time:
myfile_vi.write(f"{list_srt[-1]['sub_idx']}\n")
myfile_vi.write(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
myfile_vi.write(f"{list_srt[-1]['translate']}\n")
myfile_vi.write('\n')
myfile_vi.close()
print(f"{list_srt[-1]['sub_idx']}\n")
print(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
print(f"{list_srt[-1]['description']}\n")
print(f"{list_srt[-1]['translate']}\n")
print(f"Similarity{ratio}\n")
print('\n')
sub_idx += 1
prev_time = current_time
if description == "":
prev_time = current_time
old_des = description
current_time += time_per_frame * div_frame
i += 1
cap.release()
return output
def validate(args):
"""
Check that the CLI arguments passed to autosub are valid.
"""
if args.format not in FORMATTERS:
print(
"Subtitle format not supported. "
"Run with --list-formats to see all supported formats."
)
return False
if args.src_language not in LANGUAGE_CODES.keys():
print(
"Source language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if args.dst_language not in LANGUAGE_CODES.keys():
print(
"Destination language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if not args.source_path:
print("Error: You need to specify a source path.")
return False
return True
def main():
"""
Run autosub as a command-line program.
"""
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the video or audio file to subtitle",
nargs='?')
parser.add_argument('-C', '--concurrency', help="Number of concurrent API requests to make",
type=int, default=DEFAULT_CONCURRENCY)
parser.add_argument('-o', '--output',
help="Output path for subtitles (by default, subtitles are saved in \
the same directory and name as the source path)")
parser.add_argument('-F', '--format', help="Destination subtitle format",
default=DEFAULT_SUBTITLE_FORMAT)
parser.add_argument('-S', '--src-language', help="Language spoken in source file",
default=DEFAULT_SRC_LANGUAGE)
parser.add_argument('-D', '--dst-language', help="Desired language for the subtitles",
default=DEFAULT_DST_LANGUAGE)
parser.add_argument('-K', '--api-key',
help="The Google Translate API key to be used. \
(Required for subtitle translation)")
parser.add_argument('--list-formats', help="List all available subtitle formats",
action='store_true')
parser.add_argument('--list-languages', help="List all available source/destination languages",
action='store_true')
parser.add_argument('--min_height', help="minimum height from 0 - 100%", type=float, default=93)
parser.add_argument('--max_height', help="maximum height from 0 - 100%", type=float, default=99)
parser.add_argument('--l_v', help="Light sensitive", type=float, default=210)
parser.add_argument('--debug', help="Allows to show cropped image on the desktop", action='store_true', default=True)
parser.add_argument('--cloud', help="Use google cloud compute to extract text", action='store_true', default=False)
parser.add_argument('--disable_time', help="Parse time function", action='store_true')
parser.add_argument('--all', help="Render all files", action='store_true')
args = parser.parse_args()
if args.list_formats:
print("List of formats:")
for subtitle_format in FORMATTERS:
print("{format}".format(format=subtitle_format))
return 0
if args.list_languages:
print("List of all languages:")
for code, language in sorted(LANGUAGE_CODES.items()):
print("{code}\t{language}".format(code=code, language=language))
return 0
if not validate(args):
return 1
try:
if args.all:
for file in os.listdir():
# *.avi *.flv *.mkv *.mpg *.mp4 *.webm
if file.endswith('.avi') or file.endswith('.flv') or file.endswith('.mkv') or file.endswith('.mpg') or file.endswith('.mp4') or file.endswith(".webm"):
st = time.time()
subtitle_file_path = generate_subtitles(
source_path=file,
dst_language=args.dst_language,
output=args.output,
debug=args.debug,
cloud=args.cloud,
disable_time=args.disable_time,
min_height=args.min_height,
max_height=args.max_height,
l_v=args.l_v,
)
print("Subtitles file created at {} time consumer: {}".format(subtitle_file_path, time.time() - st))
else:
st = time.time()
subtitle_file_path = generate_subtitles(
source_path=args.source_path,
dst_language=args.dst_language,
output=args.output,
debug=args.debug,
cloud=args.cloud,
disable_time=args.disable_time,
min_height=args.min_height,
max_height=args.max_height,
l_v=args.l_v,
)
print("Subtitles file created at {} time consumer: {}".format(subtitle_file_path, time.time() - st))
except KeyboardInterrupt:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| """Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
return text | identifier_body |
__init__.py | """
Defines autosub's main functionality.
"""
from __future__ import absolute_import, print_function, unicode_literals
from fuzzywuzzy import fuzz
import time
import argparse
import cv2
import os
import sys
import html
from datetime import datetime
import numpy as np
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from autosub_v2.constants import (
LANGUAGE_CODES, GOOGLE_SPEECH_API_KEY, GOOGLE_SPEECH_API_URL,
)
from autosub_v2.formatters import FORMATTERS
from paddleocr import PaddleOCR
def nothing(x):
pass
# cv2.namedWindow("Trackbars")
DEFAULT_SUBTITLE_FORMAT = 'srt'
DEFAULT_CONCURRENCY = 10
DEFAULT_SRC_LANGUAGE = 'en'
DEFAULT_DST_LANGUAGE = 'vi'
import six
from google.oauth2 import service_account
from google.cloud import translate_v2 as translate
from google.cloud import vision
credentials = service_account.Credentials.from_service_account_file(r"C:\autosub_models\key.json")
client = vision.ImageAnnotatorClient(credentials=credentials)
translate_client = translate.Client(credentials=credentials)
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def | (content):
"""Detects text in the file."""
image = vision.Image(content=content)
response = client.text_detection(image=image, image_context={"language_hints": ["zh"]})
texts = response.text_annotations
predict_des = ""
for text in texts:
predict_des = text.description
predict_des = predict_des.strip().replace("-", "")
return predict_des
return predict_des
# def detect_texts(img_path, ocr):
# """Detects text in the file."""
# result = ocr.ocr(img_path, det=False, rec=True, cls=False)
# for line in result:
# # print(line[0])
# if line[1] > 0.7:
# return line[0]
# return ""
def translate_text_google_cloud(target, text):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
if isinstance(text, six.binary_type):
text = text.decode("utf-8")
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
result = translate_client.translate(text, target_language=target)
result = html.unescape(result["translatedText"])
# print(u"Text: {}".format(result["input"]))
print("Translation: {}".format(result))
# print(u"Detected source language: {}".format(result["detectedSourceLanguage"]))
return result
def translate_text(target, text):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
return text
def generate_subtitles(
source_path,
output=None,
dst_language=DEFAULT_DST_LANGUAGE,
debug=False,
cloud=False,
disable_time=False,
min_height=80,
max_height=100,
l_v=240
):
"""
Given an input audio/video file, generate subtitles in the specified language and format.
"""
# Opens the Video file
print(f"starting: using cloud {cloud}, source_path {source_path}")
if not cloud:
ocr = PaddleOCR(lang='ch', use_gpu=False,
rec_model_dir=r"C:\autosub_models\rec",
cls_model_dir=r"C:\autosub_models\cls",
det_model_dir=r"C:\autosub_models\det",
use_angle_cls=True,
rec_char_type='ch',
drop_score=0.8,
det_db_box_thresh=0.3,
cls=True)
cap = cv2.VideoCapture(source_path)
# cap.set(3, 1280)
# cap.set(4, 720)
# cv2.createTrackbar("L - V", "Trackbars", 0, 100, nothing)
# cv2.createTrackbar("Min height", "Trackbars", 80, 100, nothing)
# cv2.createTrackbar("Max Height", "Trackbars", 100, 100, nothing)
fps = cap.get(cv2.CAP_PROP_FPS)
print(f"fps {fps}")
time_per_frame = 1 / fps
i = 0
div_frame = 6 # 5 frame /s
sub_idx = 1
list_srt = []
old_des = ""
prev_time = 0
current_time = 0
file_name = os.path.basename(source_path)
extenstion = ".srt" if not disable_time else ".txt"
filesub = f"{os.path.splitext(file_name)[0]}{extenstion}"
if os.path.isfile(filesub):
os.remove(filesub)
while (cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
# min_height = cv2.getTrackbarPos("Min height", "Trackbars")
# max_height = cv2.getTrackbarPos("Max Height", "Trackbars")
# if max_height < min_height:
# max_height = min_height + 10
# l_v = cv2.getTrackbarPos("L - V", "Trackbars")
if i % div_frame == 0:
prev_time_ts = datetime.utcfromtimestamp(prev_time).strftime('%H:%M:%S,%f')[:-4]
current_time_ts = datetime.utcfromtimestamp(current_time).strftime('%H:%M:%S,%f')[:-4]
h, w, c = frame.shape
crop_img = frame[int(h * min_height/100):int(h * max_height/100), 0:w]
hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)
# define range of white color in HSV
# change it according to your need !
lower_white = np.array([0, 0, 246], dtype=np.uint8)
upper_white = np.array([157, 21, 255], dtype=np.uint8)
# Threshold the HSV image to get only white colors
mask = cv2.inRange(hsv, lower_white, upper_white)
# Bitwise-AND mask and original image
crop_img = cv2.bitwise_and(crop_img, crop_img, mask=mask)
# crop_img = cv2.cvtColor(crop_img, cv2.COLOR_HSV2RGB)
# crop_img = cv2.cvtColor(crop_img, cv2.COLOR_RGB2GRAY)
description = ""
if cloud:
success, encoded_image = cv2.imencode('.jpg', crop_img)
description = detect_texts_google_cloud(encoded_image.tobytes())
else:
# dst = cv2.fastNlMeansDenoisingColored(crop_img,None,10,10,7,21)
# stacked = np.hstack((dst, crop_img))
if debug:
# cv2.imshow('dst', dst)
cv2.imshow('crop_img', crop_img)
cv2.imshow('frame', frame)
cv2.waitKey(1)
result = ocr.ocr(crop_img, det=False, rec=True, cls=False)
for line in result:
# print(current_time_ts, line)
if line[1] > 0.7:
description = html.unescape(line[0].strip().replace(',', '').replace('、', '').replace('.', ''))
break
description = "" if len(description) < 6 else description
prev_des = ""
ratio = fuzz.ratio(description.lower(), old_des.lower())
if len(list_srt) > 0:
prev_des = list_srt[-1]['description']
print(current_time_ts, description, ratio)
if (old_des != "" or description == "") and (ratio < 70) and current_time - prev_time > 0.5:
list_srt.append({
"description": old_des,
"translate": translate_text(dst_language, old_des),
"first_time": prev_time_ts,
"last_time": current_time_ts,
"sub_idx": sub_idx
})
# with open(f"{os.path.splitext(file_name)[0]}_raw.srt", "a", encoding="utf-8") as myfile:
# myfile.write(f"{list_srt[-1]['sub_idx']}\n")
# myfile.write(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
# myfile.write(f"{list_srt[-1]['description']}\n")
# myfile.write('\n')
# myfile.close()
with open(filesub, "a", encoding="utf-8") as myfile_vi:
if not disable_time:
myfile_vi.write(f"{list_srt[-1]['sub_idx']}\n")
myfile_vi.write(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
myfile_vi.write(f"{list_srt[-1]['translate']}\n")
myfile_vi.write('\n')
myfile_vi.close()
print(f"{list_srt[-1]['sub_idx']}\n")
print(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
print(f"{list_srt[-1]['description']}\n")
print(f"{list_srt[-1]['translate']}\n")
print(f"Similarity{ratio}\n")
print('\n')
sub_idx += 1
prev_time = current_time
if description == "":
prev_time = current_time
old_des = description
current_time += time_per_frame * div_frame
i += 1
cap.release()
return output
def validate(args):
"""
Check that the CLI arguments passed to autosub are valid.
"""
if args.format not in FORMATTERS:
print(
"Subtitle format not supported. "
"Run with --list-formats to see all supported formats."
)
return False
if args.src_language not in LANGUAGE_CODES.keys():
print(
"Source language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if args.dst_language not in LANGUAGE_CODES.keys():
print(
"Destination language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if not args.source_path:
print("Error: You need to specify a source path.")
return False
return True
def main():
"""
Run autosub as a command-line program.
"""
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the video or audio file to subtitle",
nargs='?')
parser.add_argument('-C', '--concurrency', help="Number of concurrent API requests to make",
type=int, default=DEFAULT_CONCURRENCY)
parser.add_argument('-o', '--output',
help="Output path for subtitles (by default, subtitles are saved in \
the same directory and name as the source path)")
parser.add_argument('-F', '--format', help="Destination subtitle format",
default=DEFAULT_SUBTITLE_FORMAT)
parser.add_argument('-S', '--src-language', help="Language spoken in source file",
default=DEFAULT_SRC_LANGUAGE)
parser.add_argument('-D', '--dst-language', help="Desired language for the subtitles",
default=DEFAULT_DST_LANGUAGE)
parser.add_argument('-K', '--api-key',
help="The Google Translate API key to be used. \
(Required for subtitle translation)")
parser.add_argument('--list-formats', help="List all available subtitle formats",
action='store_true')
parser.add_argument('--list-languages', help="List all available source/destination languages",
action='store_true')
parser.add_argument('--min_height', help="minimum height from 0 - 100%", type=float, default=93)
parser.add_argument('--max_height', help="maximum height from 0 - 100%", type=float, default=99)
parser.add_argument('--l_v', help="Light sensitive", type=float, default=210)
parser.add_argument('--debug', help="Allows to show cropped image on the desktop", action='store_true', default=True)
parser.add_argument('--cloud', help="Use google cloud compute to extract text", action='store_true', default=False)
parser.add_argument('--disable_time', help="Parse time function", action='store_true')
parser.add_argument('--all', help="Render all files", action='store_true')
args = parser.parse_args()
if args.list_formats:
print("List of formats:")
for subtitle_format in FORMATTERS:
print("{format}".format(format=subtitle_format))
return 0
if args.list_languages:
print("List of all languages:")
for code, language in sorted(LANGUAGE_CODES.items()):
print("{code}\t{language}".format(code=code, language=language))
return 0
if not validate(args):
return 1
try:
if args.all:
for file in os.listdir():
# *.avi *.flv *.mkv *.mpg *.mp4 *.webm
if file.endswith('.avi') or file.endswith('.flv') or file.endswith('.mkv') or file.endswith('.mpg') or file.endswith('.mp4') or file.endswith(".webm"):
st = time.time()
subtitle_file_path = generate_subtitles(
source_path=file,
dst_language=args.dst_language,
output=args.output,
debug=args.debug,
cloud=args.cloud,
disable_time=args.disable_time,
min_height=args.min_height,
max_height=args.max_height,
l_v=args.l_v,
)
print("Subtitles file created at {} time consumer: {}".format(subtitle_file_path, time.time() - st))
else:
st = time.time()
subtitle_file_path = generate_subtitles(
source_path=args.source_path,
dst_language=args.dst_language,
output=args.output,
debug=args.debug,
cloud=args.cloud,
disable_time=args.disable_time,
min_height=args.min_height,
max_height=args.max_height,
l_v=args.l_v,
)
print("Subtitles file created at {} time consumer: {}".format(subtitle_file_path, time.time() - st))
except KeyboardInterrupt:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| detect_texts_google_cloud | identifier_name |
__init__.py | """
Defines autosub's main functionality.
"""
from __future__ import absolute_import, print_function, unicode_literals
from fuzzywuzzy import fuzz
import time
import argparse
import cv2
import os
import sys
import html
from datetime import datetime
import numpy as np
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from autosub_v2.constants import (
LANGUAGE_CODES, GOOGLE_SPEECH_API_KEY, GOOGLE_SPEECH_API_URL,
)
from autosub_v2.formatters import FORMATTERS
from paddleocr import PaddleOCR
def nothing(x):
pass
# cv2.namedWindow("Trackbars")
DEFAULT_SUBTITLE_FORMAT = 'srt'
DEFAULT_CONCURRENCY = 10
DEFAULT_SRC_LANGUAGE = 'en'
DEFAULT_DST_LANGUAGE = 'vi'
import six
from google.oauth2 import service_account
from google.cloud import translate_v2 as translate
from google.cloud import vision
credentials = service_account.Credentials.from_service_account_file(r"C:\autosub_models\key.json")
client = vision.ImageAnnotatorClient(credentials=credentials)
translate_client = translate.Client(credentials=credentials)
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def detect_texts_google_cloud(content):
"""Detects text in the file."""
image = vision.Image(content=content)
response = client.text_detection(image=image, image_context={"language_hints": ["zh"]})
texts = response.text_annotations
predict_des = ""
for text in texts:
predict_des = text.description
predict_des = predict_des.strip().replace("-", "")
return predict_des
return predict_des
# def detect_texts(img_path, ocr):
# """Detects text in the file."""
# result = ocr.ocr(img_path, det=False, rec=True, cls=False)
# for line in result:
# # print(line[0])
# if line[1] > 0.7:
# return line[0]
# return ""
def translate_text_google_cloud(target, text):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
if isinstance(text, six.binary_type):
text = text.decode("utf-8")
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
result = translate_client.translate(text, target_language=target)
result = html.unescape(result["translatedText"])
# print(u"Text: {}".format(result["input"]))
print("Translation: {}".format(result))
# print(u"Detected source language: {}".format(result["detectedSourceLanguage"]))
return result
def translate_text(target, text):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
return text
def generate_subtitles(
source_path,
output=None,
dst_language=DEFAULT_DST_LANGUAGE,
debug=False,
cloud=False,
disable_time=False,
min_height=80,
max_height=100,
l_v=240
):
"""
Given an input audio/video file, generate subtitles in the specified language and format.
"""
# Opens the Video file
print(f"starting: using cloud {cloud}, source_path {source_path}")
if not cloud:
ocr = PaddleOCR(lang='ch', use_gpu=False,
rec_model_dir=r"C:\autosub_models\rec",
cls_model_dir=r"C:\autosub_models\cls",
det_model_dir=r"C:\autosub_models\det",
use_angle_cls=True,
rec_char_type='ch',
drop_score=0.8,
det_db_box_thresh=0.3,
cls=True)
cap = cv2.VideoCapture(source_path)
# cap.set(3, 1280)
# cap.set(4, 720)
# cv2.createTrackbar("L - V", "Trackbars", 0, 100, nothing)
# cv2.createTrackbar("Min height", "Trackbars", 80, 100, nothing)
# cv2.createTrackbar("Max Height", "Trackbars", 100, 100, nothing)
fps = cap.get(cv2.CAP_PROP_FPS)
print(f"fps {fps}")
time_per_frame = 1 / fps
i = 0
div_frame = 6 # 5 frame /s
sub_idx = 1
list_srt = []
old_des = ""
prev_time = 0
current_time = 0
file_name = os.path.basename(source_path)
extenstion = ".srt" if not disable_time else ".txt"
filesub = f"{os.path.splitext(file_name)[0]}{extenstion}"
if os.path.isfile(filesub):
os.remove(filesub)
while (cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
# min_height = cv2.getTrackbarPos("Min height", "Trackbars")
# max_height = cv2.getTrackbarPos("Max Height", "Trackbars")
# if max_height < min_height:
# max_height = min_height + 10
# l_v = cv2.getTrackbarPos("L - V", "Trackbars")
if i % div_frame == 0:
prev_time_ts = datetime.utcfromtimestamp(prev_time).strftime('%H:%M:%S,%f')[:-4]
current_time_ts = datetime.utcfromtimestamp(current_time).strftime('%H:%M:%S,%f')[:-4]
h, w, c = frame.shape
crop_img = frame[int(h * min_height/100):int(h * max_height/100), 0:w]
hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)
# define range of white color in HSV
# change it according to your need !
lower_white = np.array([0, 0, 246], dtype=np.uint8)
upper_white = np.array([157, 21, 255], dtype=np.uint8)
# Threshold the HSV image to get only white colors
mask = cv2.inRange(hsv, lower_white, upper_white)
# Bitwise-AND mask and original image
crop_img = cv2.bitwise_and(crop_img, crop_img, mask=mask)
# crop_img = cv2.cvtColor(crop_img, cv2.COLOR_HSV2RGB)
# crop_img = cv2.cvtColor(crop_img, cv2.COLOR_RGB2GRAY)
description = ""
if cloud:
success, encoded_image = cv2.imencode('.jpg', crop_img)
description = detect_texts_google_cloud(encoded_image.tobytes())
else:
# dst = cv2.fastNlMeansDenoisingColored(crop_img,None,10,10,7,21)
# stacked = np.hstack((dst, crop_img))
if debug:
# cv2.imshow('dst', dst)
cv2.imshow('crop_img', crop_img)
cv2.imshow('frame', frame)
cv2.waitKey(1)
result = ocr.ocr(crop_img, det=False, rec=True, cls=False)
for line in result:
# print(current_time_ts, line)
if line[1] > 0.7:
description = html.unescape(line[0].strip().replace(',', '').replace('、', '').replace('.', ''))
break
description = "" if len(description) < 6 else description
prev_des = ""
ratio = fuzz.ratio(description.lower(), old_des.lower())
if len(list_srt) > 0:
prev_des = list_srt[-1]['description']
print(current_time_ts, description, ratio)
if (old_des != "" or description == "") and (ratio < 70) and current_time - prev_time > 0.5:
list_srt.append({
"description": old_des,
"translate": translate_text(dst_language, old_des),
"first_time": prev_time_ts,
"last_time": current_time_ts,
"sub_idx": sub_idx
})
# with open(f"{os.path.splitext(file_name)[0]}_raw.srt", "a", encoding="utf-8") as myfile:
# myfile.write(f"{list_srt[-1]['sub_idx']}\n")
# myfile.write(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
# myfile.write(f"{list_srt[-1]['description']}\n")
# myfile.write('\n')
# myfile.close()
with open(filesub, "a", encoding="utf-8") as myfile_vi:
if not disable_time:
myfile_vi.write(f"{list_srt[-1]['sub_idx']}\n")
myfile_vi.write(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
myfile_vi.write(f"{list_srt[-1]['translate']}\n")
myfile_vi.write('\n')
myfile_vi.close()
print(f"{list_srt[-1]['sub_idx']}\n")
print(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
print(f"{list_srt[-1]['description']}\n")
print(f"{list_srt[-1]['translate']}\n")
print(f"Similarity{ratio}\n")
print('\n')
sub_idx += 1
prev_time = current_time
if description == "":
prev_time = current_time
old_des = description
current_time += time_per_frame * div_frame
i += 1
cap.release()
return output
def validate(args):
"""
Check that the CLI arguments passed to autosub are valid.
"""
if args.format not in FORMATTERS:
print(
"Subtitle format not supported. "
"Run with --list-formats to see all supported formats."
)
return False
if args.src_language not in LANGUAGE_CODES.keys():
print(
"Source language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if args.dst_language not in LANGUAGE_CODES.keys():
print(
"Destination language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if not args.source_path:
print("Error: You need to specify a source path.")
return False
return True
def main():
"""
Run autosub as a command-line program.
"""
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the video or audio file to subtitle",
nargs='?')
parser.add_argument('-C', '--concurrency', help="Number of concurrent API requests to make",
type=int, default=DEFAULT_CONCURRENCY)
parser.add_argument('-o', '--output',
help="Output path for subtitles (by default, subtitles are saved in \
the same directory and name as the source path)")
parser.add_argument('-F', '--format', help="Destination subtitle format",
default=DEFAULT_SUBTITLE_FORMAT)
parser.add_argument('-S', '--src-language', help="Language spoken in source file",
default=DEFAULT_SRC_LANGUAGE) | parser.add_argument('-D', '--dst-language', help="Desired language for the subtitles",
default=DEFAULT_DST_LANGUAGE)
parser.add_argument('-K', '--api-key',
help="The Google Translate API key to be used. \
(Required for subtitle translation)")
parser.add_argument('--list-formats', help="List all available subtitle formats",
action='store_true')
parser.add_argument('--list-languages', help="List all available source/destination languages",
action='store_true')
parser.add_argument('--min_height', help="minimum height from 0 - 100%", type=float, default=93)
parser.add_argument('--max_height', help="maximum height from 0 - 100%", type=float, default=99)
parser.add_argument('--l_v', help="Light sensitive", type=float, default=210)
parser.add_argument('--debug', help="Allows to show cropped image on the desktop", action='store_true', default=True)
parser.add_argument('--cloud', help="Use google cloud compute to extract text", action='store_true', default=False)
parser.add_argument('--disable_time', help="Parse time function", action='store_true')
parser.add_argument('--all', help="Render all files", action='store_true')
args = parser.parse_args()
if args.list_formats:
print("List of formats:")
for subtitle_format in FORMATTERS:
print("{format}".format(format=subtitle_format))
return 0
if args.list_languages:
print("List of all languages:")
for code, language in sorted(LANGUAGE_CODES.items()):
print("{code}\t{language}".format(code=code, language=language))
return 0
if not validate(args):
return 1
try:
if args.all:
for file in os.listdir():
# *.avi *.flv *.mkv *.mpg *.mp4 *.webm
if file.endswith('.avi') or file.endswith('.flv') or file.endswith('.mkv') or file.endswith('.mpg') or file.endswith('.mp4') or file.endswith(".webm"):
st = time.time()
subtitle_file_path = generate_subtitles(
source_path=file,
dst_language=args.dst_language,
output=args.output,
debug=args.debug,
cloud=args.cloud,
disable_time=args.disable_time,
min_height=args.min_height,
max_height=args.max_height,
l_v=args.l_v,
)
print("Subtitles file created at {} time consumer: {}".format(subtitle_file_path, time.time() - st))
else:
st = time.time()
subtitle_file_path = generate_subtitles(
source_path=args.source_path,
dst_language=args.dst_language,
output=args.output,
debug=args.debug,
cloud=args.cloud,
disable_time=args.disable_time,
min_height=args.min_height,
max_height=args.max_height,
l_v=args.l_v,
)
print("Subtitles file created at {} time consumer: {}".format(subtitle_file_path, time.time() - st))
except KeyboardInterrupt:
return 1
return 0
if __name__ == '__main__':
sys.exit(main()) | random_line_split | |
__init__.py | """
Defines autosub's main functionality.
"""
from __future__ import absolute_import, print_function, unicode_literals
from fuzzywuzzy import fuzz
import time
import argparse
import cv2
import os
import sys
import html
from datetime import datetime
import numpy as np
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from autosub_v2.constants import (
LANGUAGE_CODES, GOOGLE_SPEECH_API_KEY, GOOGLE_SPEECH_API_URL,
)
from autosub_v2.formatters import FORMATTERS
from paddleocr import PaddleOCR
def nothing(x):
pass
# cv2.namedWindow("Trackbars")
DEFAULT_SUBTITLE_FORMAT = 'srt'
DEFAULT_CONCURRENCY = 10
DEFAULT_SRC_LANGUAGE = 'en'
DEFAULT_DST_LANGUAGE = 'vi'
import six
from google.oauth2 import service_account
from google.cloud import translate_v2 as translate
from google.cloud import vision
credentials = service_account.Credentials.from_service_account_file(r"C:\autosub_models\key.json")
client = vision.ImageAnnotatorClient(credentials=credentials)
translate_client = translate.Client(credentials=credentials)
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def detect_texts_google_cloud(content):
"""Detects text in the file."""
image = vision.Image(content=content)
response = client.text_detection(image=image, image_context={"language_hints": ["zh"]})
texts = response.text_annotations
predict_des = ""
for text in texts:
predict_des = text.description
predict_des = predict_des.strip().replace("-", "")
return predict_des
return predict_des
# def detect_texts(img_path, ocr):
# """Detects text in the file."""
# result = ocr.ocr(img_path, det=False, rec=True, cls=False)
# for line in result:
# # print(line[0])
# if line[1] > 0.7:
# return line[0]
# return ""
def translate_text_google_cloud(target, text):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
if isinstance(text, six.binary_type):
text = text.decode("utf-8")
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
result = translate_client.translate(text, target_language=target)
result = html.unescape(result["translatedText"])
# print(u"Text: {}".format(result["input"]))
print("Translation: {}".format(result))
# print(u"Detected source language: {}".format(result["detectedSourceLanguage"]))
return result
def translate_text(target, text):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
return text
def generate_subtitles(
source_path,
output=None,
dst_language=DEFAULT_DST_LANGUAGE,
debug=False,
cloud=False,
disable_time=False,
min_height=80,
max_height=100,
l_v=240
):
"""
Given an input audio/video file, generate subtitles in the specified language and format.
"""
# Opens the Video file
print(f"starting: using cloud {cloud}, source_path {source_path}")
if not cloud:
ocr = PaddleOCR(lang='ch', use_gpu=False,
rec_model_dir=r"C:\autosub_models\rec",
cls_model_dir=r"C:\autosub_models\cls",
det_model_dir=r"C:\autosub_models\det",
use_angle_cls=True,
rec_char_type='ch',
drop_score=0.8,
det_db_box_thresh=0.3,
cls=True)
cap = cv2.VideoCapture(source_path)
# cap.set(3, 1280)
# cap.set(4, 720)
# cv2.createTrackbar("L - V", "Trackbars", 0, 100, nothing)
# cv2.createTrackbar("Min height", "Trackbars", 80, 100, nothing)
# cv2.createTrackbar("Max Height", "Trackbars", 100, 100, nothing)
fps = cap.get(cv2.CAP_PROP_FPS)
print(f"fps {fps}")
time_per_frame = 1 / fps
i = 0
div_frame = 6 # 5 frame /s
sub_idx = 1
list_srt = []
old_des = ""
prev_time = 0
current_time = 0
file_name = os.path.basename(source_path)
extenstion = ".srt" if not disable_time else ".txt"
filesub = f"{os.path.splitext(file_name)[0]}{extenstion}"
if os.path.isfile(filesub):
os.remove(filesub)
while (cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
# min_height = cv2.getTrackbarPos("Min height", "Trackbars")
# max_height = cv2.getTrackbarPos("Max Height", "Trackbars")
# if max_height < min_height:
# max_height = min_height + 10
# l_v = cv2.getTrackbarPos("L - V", "Trackbars")
if i % div_frame == 0:
prev_time_ts = datetime.utcfromtimestamp(prev_time).strftime('%H:%M:%S,%f')[:-4]
current_time_ts = datetime.utcfromtimestamp(current_time).strftime('%H:%M:%S,%f')[:-4]
h, w, c = frame.shape
crop_img = frame[int(h * min_height/100):int(h * max_height/100), 0:w]
hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)
# define range of white color in HSV
# change it according to your need !
lower_white = np.array([0, 0, 246], dtype=np.uint8)
upper_white = np.array([157, 21, 255], dtype=np.uint8)
# Threshold the HSV image to get only white colors
mask = cv2.inRange(hsv, lower_white, upper_white)
# Bitwise-AND mask and original image
crop_img = cv2.bitwise_and(crop_img, crop_img, mask=mask)
# crop_img = cv2.cvtColor(crop_img, cv2.COLOR_HSV2RGB)
# crop_img = cv2.cvtColor(crop_img, cv2.COLOR_RGB2GRAY)
description = ""
if cloud:
success, encoded_image = cv2.imencode('.jpg', crop_img)
description = detect_texts_google_cloud(encoded_image.tobytes())
else:
# dst = cv2.fastNlMeansDenoisingColored(crop_img,None,10,10,7,21)
# stacked = np.hstack((dst, crop_img))
if debug:
# cv2.imshow('dst', dst)
cv2.imshow('crop_img', crop_img)
cv2.imshow('frame', frame)
cv2.waitKey(1)
result = ocr.ocr(crop_img, det=False, rec=True, cls=False)
for line in result:
# print(current_time_ts, line)
if line[1] > 0.7:
description = html.unescape(line[0].strip().replace(',', '').replace('、', '').replace('.', ''))
break
description = "" if len(description) < 6 else description
prev_des = ""
ratio = fuzz.ratio(description.lower(), old_des.lower())
if len(list_srt) > 0:
prev_des = list_srt[-1]['description']
print(current_time_ts, description, ratio)
if (old_des != "" or description == "") and (ratio < 70) and current_time - prev_time > 0.5:
list_srt.append({
"description": old_des,
"translate": translate_text(dst_language, old_des),
"first_time": prev_time_ts,
"last_time": current_time_ts,
"sub_idx": sub_idx
})
# with open(f"{os.path.splitext(file_name)[0]}_raw.srt", "a", encoding="utf-8") as myfile:
# myfile.write(f"{list_srt[-1]['sub_idx']}\n")
# myfile.write(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
# myfile.write(f"{list_srt[-1]['description']}\n")
# myfile.write('\n')
# myfile.close()
with open(filesub, "a", encoding="utf-8") as myfile_vi:
if not disable_time:
myfile_vi.write(f"{list_srt[-1]['sub_idx']}\n")
myfile_vi.write(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
myfile_vi.write(f"{list_srt[-1]['translate']}\n")
myfile_vi.write('\n')
myfile_vi.close()
print(f"{list_srt[-1]['sub_idx']}\n")
print(f"{list_srt[-1]['first_time']} --> {list_srt[-1]['last_time']}\n")
print(f"{list_srt[-1]['description']}\n")
print(f"{list_srt[-1]['translate']}\n")
print(f"Similarity{ratio}\n")
print('\n')
sub_idx += 1
prev_time = current_time
if description == "":
prev_time = current_time
old_des = description
current_time += time_per_frame * div_frame
i += 1
cap.release()
return output
def validate(args):
"""
Check that the CLI arguments passed to autosub are valid.
"""
if args.format not in FORMATTERS:
print(
"Subtitle format not supported. "
"Run with --list-formats to see all supported formats."
)
return False
if args.src_language not in LANGUAGE_CODES.keys():
print(
"Source language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if args.dst_language not in LANGUAGE_CODES.keys():
print(
"Destination language not supported. "
"Run with --list-languages to see all supported languages."
)
return False
if not args.source_path:
print("Error: You need to specify a source path.")
return False
return True
def main():
"""
Run autosub as a command-line program.
"""
parser = argparse.ArgumentParser()
parser.add_argument('source_path', help="Path to the video or audio file to subtitle",
nargs='?')
parser.add_argument('-C', '--concurrency', help="Number of concurrent API requests to make",
type=int, default=DEFAULT_CONCURRENCY)
parser.add_argument('-o', '--output',
help="Output path for subtitles (by default, subtitles are saved in \
the same directory and name as the source path)")
parser.add_argument('-F', '--format', help="Destination subtitle format",
default=DEFAULT_SUBTITLE_FORMAT)
parser.add_argument('-S', '--src-language', help="Language spoken in source file",
default=DEFAULT_SRC_LANGUAGE)
parser.add_argument('-D', '--dst-language', help="Desired language for the subtitles",
default=DEFAULT_DST_LANGUAGE)
parser.add_argument('-K', '--api-key',
help="The Google Translate API key to be used. \
(Required for subtitle translation)")
parser.add_argument('--list-formats', help="List all available subtitle formats",
action='store_true')
parser.add_argument('--list-languages', help="List all available source/destination languages",
action='store_true')
parser.add_argument('--min_height', help="minimum height from 0 - 100%", type=float, default=93)
parser.add_argument('--max_height', help="maximum height from 0 - 100%", type=float, default=99)
parser.add_argument('--l_v', help="Light sensitive", type=float, default=210)
parser.add_argument('--debug', help="Allows to show cropped image on the desktop", action='store_true', default=True)
parser.add_argument('--cloud', help="Use google cloud compute to extract text", action='store_true', default=False)
parser.add_argument('--disable_time', help="Parse time function", action='store_true')
parser.add_argument('--all', help="Render all files", action='store_true')
args = parser.parse_args()
if args.list_formats:
print( | if args.list_languages:
print("List of all languages:")
for code, language in sorted(LANGUAGE_CODES.items()):
print("{code}\t{language}".format(code=code, language=language))
return 0
if not validate(args):
return 1
try:
if args.all:
for file in os.listdir():
# *.avi *.flv *.mkv *.mpg *.mp4 *.webm
if file.endswith('.avi') or file.endswith('.flv') or file.endswith('.mkv') or file.endswith('.mpg') or file.endswith('.mp4') or file.endswith(".webm"):
st = time.time()
subtitle_file_path = generate_subtitles(
source_path=file,
dst_language=args.dst_language,
output=args.output,
debug=args.debug,
cloud=args.cloud,
disable_time=args.disable_time,
min_height=args.min_height,
max_height=args.max_height,
l_v=args.l_v,
)
print("Subtitles file created at {} time consumer: {}".format(subtitle_file_path, time.time() - st))
else:
st = time.time()
subtitle_file_path = generate_subtitles(
source_path=args.source_path,
dst_language=args.dst_language,
output=args.output,
debug=args.debug,
cloud=args.cloud,
disable_time=args.disable_time,
min_height=args.min_height,
max_height=args.max_height,
l_v=args.l_v,
)
print("Subtitles file created at {} time consumer: {}".format(subtitle_file_path, time.time() - st))
except KeyboardInterrupt:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| "List of formats:")
for subtitle_format in FORMATTERS:
print("{format}".format(format=subtitle_format))
return 0
| conditional_block |
socketclient.go | // Copyright (c) 2019 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package socketclient
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"github.com/sirupsen/logrus"
"go.fd.io/govpp/adapter"
"go.fd.io/govpp/binapi/memclnt"
"go.fd.io/govpp/codec"
)
const (
// DefaultSocketName is default VPP API socket file path.
DefaultSocketName = "/run/vpp/api.sock"
// DefaultClientName is used for identifying client in socket registration
DefaultClientName = "govppsock"
)
var (
// DefaultConnectTimeout is default timeout for connecting
DefaultConnectTimeout = time.Second * 3
// DefaultDisconnectTimeout is default timeout for discconnecting
DefaultDisconnectTimeout = time.Millisecond * 100
// MaxWaitReady defines maximum duration of waiting for socket file
MaxWaitReady = time.Second * 3
)
var (
debug = strings.Contains(os.Getenv("DEBUG_GOVPP"), "socketclient")
debugMsgIds = strings.Contains(os.Getenv("DEBUG_GOVPP"), "msgtable")
log logrus.FieldLogger
)
// SetLogger sets global logger.
func SetLogger(logger logrus.FieldLogger) {
log = logger
}
func init() {
logger := logrus.New()
if debug {
logger.Level = logrus.DebugLevel
logger.Debug("govpp: debug level enabled for socketclient")
}
log = logger.WithField("logger", "govpp/socketclient")
}
type Client struct {
socketPath string
clientName string
conn *net.UnixConn
reader *bufio.Reader
writer *bufio.Writer
connectTimeout time.Duration
disconnectTimeout time.Duration
msgCallback adapter.MsgCallback
clientIndex uint32
msgTable map[string]uint16
msgTableMu sync.RWMutex
sockDelMsgId uint16
writeMu sync.Mutex
headerPool *sync.Pool
quit chan struct{}
wg sync.WaitGroup
}
// NewVppClient returns a new Client using socket.
// If socket is empty string DefaultSocketName is used.
func NewVppClient(socket string) *Client {
if socket == "" {
socket = DefaultSocketName
}
return &Client{
socketPath: socket,
clientName: DefaultClientName,
connectTimeout: DefaultConnectTimeout,
disconnectTimeout: DefaultDisconnectTimeout,
headerPool: &sync.Pool{New: func() interface{} {
x := make([]byte, 16)
return &x
}},
msgCallback: func(msgID uint16, data []byte) {
log.Debugf("no callback set, dropping message: ID=%v len=%d", msgID, len(data))
},
}
}
// SetClientName sets a client name used for identification.
func (c *Client) SetClientName(name string) {
c.clientName = name
}
// SetConnectTimeout sets timeout used during connecting.
func (c *Client) SetConnectTimeout(t time.Duration) {
c.connectTimeout = t
}
// SetDisconnectTimeout sets timeout used during disconnecting.
func (c *Client) SetDisconnectTimeout(t time.Duration) {
c.disconnectTimeout = t
}
// SetMsgCallback sets the callback for incoming messages.
func (c *Client) SetMsgCallback(cb adapter.MsgCallback) {
log.Debug("SetMsgCallback")
c.msgCallback = cb
}
// WaitReady checks if the socket file exists and if it does not exist waits for
// it for the duration defined by MaxWaitReady.
func (c *Client) WaitReady() error {
socketDir, _ := filepath.Split(c.socketPath)
dirChain := strings.Split(filepath.ToSlash(filepath.Clean(socketDir)), "/")
dir := "/"
for _, dirElem := range dirChain {
dir = filepath.Join(dir, dirElem)
if err := waitForDir(dir); err != nil {
return err
}
log.Debugf("dir ready: %v", dir)
}
// check if socket already exists
if _, err := os.Stat(c.socketPath); err == nil {
return nil // socket exists, we are ready
} else if !errors.Is(err, fs.ErrNotExist) {
log.Debugf("error is: %+v", err)
return err // some other error occurred
}
log.Debugf("waiting for file: %v", c.socketPath)
// socket does not exist, watch for it
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer func() {
if err := watcher.Close(); err != nil {
log.Debugf("failed to close file watcher: %v", err)
}
}()
// start directory watcher
d := filepath.Dir(c.socketPath)
if err := watcher.Add(d); err != nil {
log.Debugf("watcher add(%v) error: %v", d, err)
return err
}
timeout := time.NewTimer(MaxWaitReady)
defer timeout.Stop()
for {
select {
case <-timeout.C:
log.Debugf("watcher timeout after: %v", MaxWaitReady)
return fmt.Errorf("timeout waiting (%s) for socket file: %s", MaxWaitReady, c.socketPath)
case e := <-watcher.Errors:
log.Debugf("watcher error: %+v", e)
return e
case ev := <-watcher.Events:
log.Debugf("watcher event: %+v", ev)
if ev.Name == c.socketPath && (ev.Op&fsnotify.Create) == fsnotify.Create {
// socket created, we are ready
return nil
}
}
}
}
func waitForDir(dir string) error {
// check if dir already exists
if _, err := os.Stat(dir); err == nil {
return nil // dir exists, we are ready
} else if !errors.Is(err, fs.ErrNotExist) {
log.Debugf("error is: %+v", err)
return err // some other error occurred
}
log.Debugf("waiting for dir: %v", dir)
// dir does not exist, watch for it
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer func() {
if err := watcher.Close(); err != nil {
log.Debugf("failed to close file watcher: %v", err)
}
}()
// start watching directory
d := filepath.Dir(dir)
if err := watcher.Add(d); err != nil {
log.Debugf("watcher add (%v) error: %v", d, err)
return err
}
timeout := time.NewTimer(MaxWaitReady)
defer timeout.Stop()
for {
select {
case <-timeout.C:
log.Debugf("watcher timeout after: %v", MaxWaitReady)
return fmt.Errorf("timeout waiting (%s) for directory: %s", MaxWaitReady, dir)
case e := <-watcher.Errors:
log.Debugf("watcher error: %+v", e)
return e
case ev := <-watcher.Events:
log.Debugf("watcher event: %+v", ev)
if ev.Name == dir && (ev.Op&fsnotify.Create) == fsnotify.Create {
// socket created, we are ready
return nil
}
}
}
}
func (c *Client) Connect() error {
// check if socket exists
if _, err := os.Stat(c.socketPath); os.IsNotExist(err) {
return fmt.Errorf("VPP API socket file %s does not exist", c.socketPath)
} else if err != nil {
return fmt.Errorf("VPP API socket error: %v", err)
}
if err := c.connect(c.socketPath); err != nil {
return err
}
if err := c.open(); err != nil {
_ = c.disconnect()
return err
}
c.quit = make(chan struct{})
c.wg.Add(1)
go c.readerLoop()
return nil
}
func (c *Client) Disconnect() error {
if c.conn == nil {
return nil
}
log.Debugf("Disconnecting..")
close(c.quit)
if err := c.conn.CloseRead(); err != nil {
log.Debugf("closing readMsg failed: %v", err)
}
// wait for readerLoop to return
c.wg.Wait()
// Don't bother sending a vl_api_sockclnt_delete_t message,
// just close the socket.
if err := c.disconnect(); err != nil {
return err
}
return nil
}
const defaultBufferSize = 4096
func (c *Client) connect(sockAddr string) error {
addr := &net.UnixAddr{Name: sockAddr, Net: "unix"}
log.Debugf("Connecting to: %v", c.socketPath)
conn, err := net.DialUnix("unix", nil, addr)
if err != nil {
// we try different type of socket for backwards compatbility with VPP<=19.04
if strings.Contains(err.Error(), "wrong type for socket") {
addr.Net = "unixpacket"
log.Debugf("%s, retrying connect with type unixpacket", err)
conn, err = net.DialUnix("unixpacket", nil, addr)
}
if err != nil {
log.Debugf("Connecting to socket %s failed: %s", addr, err)
return err
}
}
c.conn = conn
log.Debugf("Connected to socket (local addr: %v)", c.conn.LocalAddr().(*net.UnixAddr))
c.reader = bufio.NewReaderSize(c.conn, defaultBufferSize)
c.writer = bufio.NewWriterSize(c.conn, defaultBufferSize)
return nil
}
func (c *Client) disconnect() error {
log.Debugf("Closing socket")
// cleanup msg table
c.setMsgTable(make(map[string]uint16), 0)
if err := c.conn.Close(); err != nil {
log.Debugln("Closing socket failed:", err)
return err
}
return nil
}
const (
sockCreateMsgId = 15 // hard-coded sockclnt_create message ID
createMsgContext = byte(123)
deleteMsgContext = byte(124)
)
func (c *Client) open() error {
var msgCodec = codec.DefaultCodec
// Request socket client create
req := &memclnt.SockclntCreate{
Name: c.clientName,
}
msg, err := msgCodec.EncodeMsg(req, sockCreateMsgId)
if err != nil {
log.Debugln("Encode error:", err)
return err
}
// set non-0 context
msg[5] = createMsgContext
if err := c.writeMsg(msg); err != nil {
log.Debugln("Write error: ", err)
return err
}
msgReply, err := c.readMsgTimeout(nil, c.connectTimeout)
if err != nil {
log.Println("Read error:", err)
return err
}
reply := new(memclnt.SockclntCreateReply)
if err := msgCodec.DecodeMsg(msgReply, reply); err != nil {
log.Println("Decoding sockclnt_create_reply failed:", err)
return err
} else if reply.Response != 0 {
return fmt.Errorf("sockclnt_create_reply: response error (%d)", reply.Response)
}
log.Debugf("SockclntCreateReply: Response=%v Index=%v Count=%v",
reply.Response, reply.Index, reply.Count)
c.clientIndex = reply.Index
msgTable := make(map[string]uint16, reply.Count)
var sockDelMsgId uint16
for _, x := range reply.MessageTable {
msgName := strings.Split(x.Name, "\x00")[0]
name := strings.TrimSuffix(msgName, "\x13")
msgTable[name] = x.Index
if strings.HasPrefix(name, "sockclnt_delete_") {
sockDelMsgId = x.Index
}
if debugMsgIds {
log.Debugf(" - %4d: %q", x.Index, name)
}
}
c.setMsgTable(msgTable, sockDelMsgId)
return nil
}
func (c *Client) setMsgTable(msgTable map[string]uint16, sockDelMsgId uint16) {
c.msgTableMu.Lock()
defer c.msgTableMu.Unlock()
c.msgTable = msgTable
c.sockDelMsgId = sockDelMsgId
}
func (c *Client) GetMsgID(msgName string, msgCrc string) (uint16, error) {
c.msgTableMu.RLock()
defer c.msgTableMu.RUnlock()
if msgID, ok := c.msgTable[msgName+"_"+msgCrc]; ok {
return msgID, nil
}
return 0, &adapter.UnknownMsgError{
MsgName: msgName,
MsgCrc: msgCrc,
}
}
func (c *Client) SendMsg(context uint32, data []byte) error {
if len(data) < 10 {
return fmt.Errorf("invalid message data, length must be at least 10 bytes")
}
setMsgRequestHeader(data, c.clientIndex, context)
if debug {
log.Debugf("sendMsg (%d) context=%v client=%d: % 02X", len(data), context, c.clientIndex, data)
}
if err := c.writeMsg(data); err != nil {
log.Debugln("writeMsg error: ", err)
return err
}
return nil
}
// setMsgRequestHeader sets client index and context in the message request header
//
// Message request has following structure:
//
// type msgRequestHeader struct {
// MsgID uint16
// ClientIndex uint32
// Context uint32
// }
func setMsgRequestHeader(data []byte, clientIndex, context uint32) {
// message ID is already set
binary.BigEndian.PutUint32(data[2:6], clientIndex)
binary.BigEndian.PutUint32(data[6:10], context)
}
func (c *Client) writeMsg(msg []byte) error {
// we lock to prevent mixing multiple message writes
c.writeMu.Lock()
defer c.writeMu.Unlock()
header, ok := c.headerPool.Get().(*[]byte)
if !ok {
return fmt.Errorf("failed to get header from pool")
}
err := writeMsgHeader(c.writer, *header, len(msg))
if err != nil {
return err
}
c.headerPool.Put(header)
if err := writeMsgData(c.writer, msg, c.writer.Size()); err != nil {
return err
}
if err := c.writer.Flush(); err != nil {
return err
}
log.Debugf(" -- writeMsg done")
return nil
}
func writeMsgHeader(w io.Writer, header []byte, dataLen int) error {
binary.BigEndian.PutUint32(header[8:12], uint32(dataLen))
n, err := w.Write(header)
if err != nil {
return err
}
if debug {
log.Debugf(" - header sent (%d/%d): % 0X", n, len(header), header)
}
return nil
}
func writeMsgData(w io.Writer, msg []byte, writerSize int) error {
for i := 0; i <= len(msg)/writerSize; i++ {
x := i*writerSize + writerSize
if x > len(msg) {
x = len(msg)
}
if debug {
log.Debugf(" - x=%v i=%v len=%v mod=%v", x, i, len(msg), len(msg)/writerSize)
}
n, err := w.Write(msg[i*writerSize : x])
if err != nil {
return err
}
if debug {
log.Debugf(" - data sent x=%d (%d/%d): % 0X", x, n, len(msg), msg)
}
}
return nil
}
func (c *Client) readerLoop() {
defer c.wg.Done()
defer log.Debugf("reader loop done")
var buf [8192]byte
for {
select {
case <-c.quit:
return
default:
}
msg, err := c.readMsg(buf[:])
if err != nil {
if isClosedError(err) {
return
}
log.Debugf("readMsg error: %v", err)
continue
}
msgID, context := getMsgReplyHeader(msg)
if debug {
log.Debugf("recvMsg (%d) msgID=%d context=%v", len(msg), msgID, context)
}
c.msgCallback(msgID, msg)
}
}
// getMsgReplyHeader gets message ID and context from the message reply header
//
// Message reply has the following structure:
//
// type msgReplyHeader struct {
// MsgID uint16
// Context uint32
// }
func getMsgReplyHeader(msg []byte) (msgID uint16, context uint32) {
msgID = binary.BigEndian.Uint16(msg[0:2])
context = binary.BigEndian.Uint32(msg[2:6])
return
}
func (c *Client) readMsgTimeout(buf []byte, timeout time.Duration) ([]byte, error) {
// set read deadline
readDeadline := time.Now().Add(timeout)
if err := c.conn.SetReadDeadline(readDeadline); err != nil {
return nil, err
}
// read message
msgReply, err := c.readMsg(buf)
if err != nil {
return nil, err
}
// reset read deadline
if err := c.conn.SetReadDeadline(time.Time{}); err != nil {
return nil, err
}
return msgReply, nil
}
func (c *Client) readMsg(buf []byte) ([]byte, error) {
log.Debug("reading msg..")
header, ok := c.headerPool.Get().(*[]byte)
if !ok {
return nil, fmt.Errorf("failed to get header from pool")
}
msgLen, err := readMsgHeader(c.reader, *header)
if err != nil {
return nil, err
}
c.headerPool.Put(header)
msg, err := readMsgData(c.reader, buf, msgLen)
if err != nil {
return nil, err
}
log.Debugf(" -- readMsg done (buffered: %d)", c.reader.Buffered())
return msg, nil
}
func readMsgHeader(r io.Reader, header []byte) (int, error) |
func readMsgData(r io.Reader, buf []byte, dataLen int) ([]byte, error) {
var msg []byte
if buf == nil || len(buf) < dataLen {
msg = make([]byte, dataLen)
} else {
msg = buf[0:dataLen]
}
n, err := r.Read(msg)
if err != nil {
return nil, err
}
if debug {
log.Debugf(" - read data (%d bytes): % 0X", n, msg[:n])
}
if dataLen > n {
remain := dataLen - n
log.Debugf("continue reading remaining %d bytes", remain)
view := msg[n:]
for remain > 0 {
nbytes, err := r.Read(view)
if err != nil {
return nil, err
} else if nbytes == 0 {
return nil, fmt.Errorf("zero nbytes")
}
remain -= nbytes
log.Debugf("another data received: %d bytes (remain: %d)", nbytes, remain)
view = view[nbytes:]
}
}
return msg, nil
}
func isClosedError(err error) bool {
if errors.Is(err, io.EOF) {
return true
}
return strings.HasSuffix(err.Error(), "use of closed network connection")
}
| {
n, err := io.ReadAtLeast(r, header, 16)
if err != nil {
return 0, err
}
if n == 0 {
log.Debugln("zero bytes header")
return 0, nil
} else if n != 16 {
log.Debugf("invalid header (%d bytes): % 0X", n, header[:n])
return 0, fmt.Errorf("invalid header (expected 16 bytes, got %d)", n)
}
dataLen := binary.BigEndian.Uint32(header[8:12])
return int(dataLen), nil
} | identifier_body |
socketclient.go | // Copyright (c) 2019 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package socketclient
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"github.com/sirupsen/logrus"
"go.fd.io/govpp/adapter"
"go.fd.io/govpp/binapi/memclnt"
"go.fd.io/govpp/codec"
)
const (
// DefaultSocketName is default VPP API socket file path.
DefaultSocketName = "/run/vpp/api.sock"
// DefaultClientName is used for identifying client in socket registration
DefaultClientName = "govppsock"
)
var (
// DefaultConnectTimeout is default timeout for connecting
DefaultConnectTimeout = time.Second * 3
// DefaultDisconnectTimeout is default timeout for discconnecting
DefaultDisconnectTimeout = time.Millisecond * 100
// MaxWaitReady defines maximum duration of waiting for socket file
MaxWaitReady = time.Second * 3
)
var (
debug = strings.Contains(os.Getenv("DEBUG_GOVPP"), "socketclient")
debugMsgIds = strings.Contains(os.Getenv("DEBUG_GOVPP"), "msgtable")
log logrus.FieldLogger
)
// SetLogger sets global logger.
func SetLogger(logger logrus.FieldLogger) {
log = logger
}
func init() {
logger := logrus.New()
if debug {
logger.Level = logrus.DebugLevel
logger.Debug("govpp: debug level enabled for socketclient")
}
log = logger.WithField("logger", "govpp/socketclient")
}
type Client struct {
socketPath string
clientName string
conn *net.UnixConn
reader *bufio.Reader
writer *bufio.Writer
connectTimeout time.Duration
disconnectTimeout time.Duration
msgCallback adapter.MsgCallback
clientIndex uint32
msgTable map[string]uint16
msgTableMu sync.RWMutex
sockDelMsgId uint16
writeMu sync.Mutex
headerPool *sync.Pool
quit chan struct{}
wg sync.WaitGroup
}
// NewVppClient returns a new Client using socket.
// If socket is empty string DefaultSocketName is used.
func NewVppClient(socket string) *Client {
if socket == "" {
socket = DefaultSocketName
}
return &Client{
socketPath: socket,
clientName: DefaultClientName,
connectTimeout: DefaultConnectTimeout,
disconnectTimeout: DefaultDisconnectTimeout,
headerPool: &sync.Pool{New: func() interface{} {
x := make([]byte, 16)
return &x
}},
msgCallback: func(msgID uint16, data []byte) {
log.Debugf("no callback set, dropping message: ID=%v len=%d", msgID, len(data))
},
}
}
// SetClientName sets a client name used for identification.
func (c *Client) SetClientName(name string) {
c.clientName = name
}
// SetConnectTimeout sets timeout used during connecting.
func (c *Client) SetConnectTimeout(t time.Duration) {
c.connectTimeout = t
}
// SetDisconnectTimeout sets timeout used during disconnecting.
func (c *Client) SetDisconnectTimeout(t time.Duration) {
c.disconnectTimeout = t
}
// SetMsgCallback sets the callback for incoming messages.
func (c *Client) SetMsgCallback(cb adapter.MsgCallback) {
log.Debug("SetMsgCallback")
c.msgCallback = cb
}
// WaitReady checks if the socket file exists and if it does not exist waits for
// it for the duration defined by MaxWaitReady.
func (c *Client) WaitReady() error {
socketDir, _ := filepath.Split(c.socketPath)
dirChain := strings.Split(filepath.ToSlash(filepath.Clean(socketDir)), "/")
dir := "/"
for _, dirElem := range dirChain {
dir = filepath.Join(dir, dirElem)
if err := waitForDir(dir); err != nil {
return err
}
log.Debugf("dir ready: %v", dir)
}
// check if socket already exists
if _, err := os.Stat(c.socketPath); err == nil {
return nil // socket exists, we are ready
} else if !errors.Is(err, fs.ErrNotExist) {
log.Debugf("error is: %+v", err)
return err // some other error occurred
}
log.Debugf("waiting for file: %v", c.socketPath)
// socket does not exist, watch for it
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer func() {
if err := watcher.Close(); err != nil {
log.Debugf("failed to close file watcher: %v", err)
}
}()
// start directory watcher
d := filepath.Dir(c.socketPath)
if err := watcher.Add(d); err != nil {
log.Debugf("watcher add(%v) error: %v", d, err)
return err
}
timeout := time.NewTimer(MaxWaitReady)
defer timeout.Stop()
for {
select {
case <-timeout.C:
log.Debugf("watcher timeout after: %v", MaxWaitReady)
return fmt.Errorf("timeout waiting (%s) for socket file: %s", MaxWaitReady, c.socketPath)
case e := <-watcher.Errors:
log.Debugf("watcher error: %+v", e)
return e
case ev := <-watcher.Events:
log.Debugf("watcher event: %+v", ev)
if ev.Name == c.socketPath && (ev.Op&fsnotify.Create) == fsnotify.Create {
// socket created, we are ready
return nil
}
}
}
}
func waitForDir(dir string) error {
// check if dir already exists
if _, err := os.Stat(dir); err == nil {
return nil // dir exists, we are ready
} else if !errors.Is(err, fs.ErrNotExist) {
log.Debugf("error is: %+v", err)
return err // some other error occurred
}
log.Debugf("waiting for dir: %v", dir)
// dir does not exist, watch for it
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer func() {
if err := watcher.Close(); err != nil {
log.Debugf("failed to close file watcher: %v", err)
}
}()
// start watching directory
d := filepath.Dir(dir)
if err := watcher.Add(d); err != nil {
log.Debugf("watcher add (%v) error: %v", d, err)
return err
}
timeout := time.NewTimer(MaxWaitReady)
defer timeout.Stop()
for {
select {
case <-timeout.C:
log.Debugf("watcher timeout after: %v", MaxWaitReady)
return fmt.Errorf("timeout waiting (%s) for directory: %s", MaxWaitReady, dir)
case e := <-watcher.Errors:
log.Debugf("watcher error: %+v", e)
return e
case ev := <-watcher.Events:
log.Debugf("watcher event: %+v", ev)
if ev.Name == dir && (ev.Op&fsnotify.Create) == fsnotify.Create {
// socket created, we are ready
return nil
}
}
}
}
func (c *Client) Connect() error {
// check if socket exists
if _, err := os.Stat(c.socketPath); os.IsNotExist(err) {
return fmt.Errorf("VPP API socket file %s does not exist", c.socketPath)
} else if err != nil {
return fmt.Errorf("VPP API socket error: %v", err)
}
if err := c.connect(c.socketPath); err != nil {
return err
}
if err := c.open(); err != nil {
_ = c.disconnect()
return err
}
c.quit = make(chan struct{})
c.wg.Add(1)
go c.readerLoop()
return nil
}
func (c *Client) Disconnect() error {
if c.conn == nil {
return nil
}
log.Debugf("Disconnecting..")
close(c.quit)
if err := c.conn.CloseRead(); err != nil {
log.Debugf("closing readMsg failed: %v", err)
}
// wait for readerLoop to return
c.wg.Wait()
// Don't bother sending a vl_api_sockclnt_delete_t message,
// just close the socket.
if err := c.disconnect(); err != nil {
return err
}
return nil
}
const defaultBufferSize = 4096
func (c *Client) connect(sockAddr string) error {
addr := &net.UnixAddr{Name: sockAddr, Net: "unix"}
log.Debugf("Connecting to: %v", c.socketPath)
conn, err := net.DialUnix("unix", nil, addr)
if err != nil {
// we try different type of socket for backwards compatbility with VPP<=19.04
if strings.Contains(err.Error(), "wrong type for socket") {
addr.Net = "unixpacket"
log.Debugf("%s, retrying connect with type unixpacket", err)
conn, err = net.DialUnix("unixpacket", nil, addr)
}
if err != nil {
log.Debugf("Connecting to socket %s failed: %s", addr, err)
return err
}
}
c.conn = conn
log.Debugf("Connected to socket (local addr: %v)", c.conn.LocalAddr().(*net.UnixAddr))
c.reader = bufio.NewReaderSize(c.conn, defaultBufferSize)
c.writer = bufio.NewWriterSize(c.conn, defaultBufferSize)
return nil
}
func (c *Client) disconnect() error {
log.Debugf("Closing socket")
// cleanup msg table
c.setMsgTable(make(map[string]uint16), 0)
if err := c.conn.Close(); err != nil {
log.Debugln("Closing socket failed:", err)
return err
}
return nil
}
const (
sockCreateMsgId = 15 // hard-coded sockclnt_create message ID
createMsgContext = byte(123)
deleteMsgContext = byte(124) |
func (c *Client) open() error {
var msgCodec = codec.DefaultCodec
// Request socket client create
req := &memclnt.SockclntCreate{
Name: c.clientName,
}
msg, err := msgCodec.EncodeMsg(req, sockCreateMsgId)
if err != nil {
log.Debugln("Encode error:", err)
return err
}
// set non-0 context
msg[5] = createMsgContext
if err := c.writeMsg(msg); err != nil {
log.Debugln("Write error: ", err)
return err
}
msgReply, err := c.readMsgTimeout(nil, c.connectTimeout)
if err != nil {
log.Println("Read error:", err)
return err
}
reply := new(memclnt.SockclntCreateReply)
if err := msgCodec.DecodeMsg(msgReply, reply); err != nil {
log.Println("Decoding sockclnt_create_reply failed:", err)
return err
} else if reply.Response != 0 {
return fmt.Errorf("sockclnt_create_reply: response error (%d)", reply.Response)
}
log.Debugf("SockclntCreateReply: Response=%v Index=%v Count=%v",
reply.Response, reply.Index, reply.Count)
c.clientIndex = reply.Index
msgTable := make(map[string]uint16, reply.Count)
var sockDelMsgId uint16
for _, x := range reply.MessageTable {
msgName := strings.Split(x.Name, "\x00")[0]
name := strings.TrimSuffix(msgName, "\x13")
msgTable[name] = x.Index
if strings.HasPrefix(name, "sockclnt_delete_") {
sockDelMsgId = x.Index
}
if debugMsgIds {
log.Debugf(" - %4d: %q", x.Index, name)
}
}
c.setMsgTable(msgTable, sockDelMsgId)
return nil
}
func (c *Client) setMsgTable(msgTable map[string]uint16, sockDelMsgId uint16) {
c.msgTableMu.Lock()
defer c.msgTableMu.Unlock()
c.msgTable = msgTable
c.sockDelMsgId = sockDelMsgId
}
func (c *Client) GetMsgID(msgName string, msgCrc string) (uint16, error) {
c.msgTableMu.RLock()
defer c.msgTableMu.RUnlock()
if msgID, ok := c.msgTable[msgName+"_"+msgCrc]; ok {
return msgID, nil
}
return 0, &adapter.UnknownMsgError{
MsgName: msgName,
MsgCrc: msgCrc,
}
}
func (c *Client) SendMsg(context uint32, data []byte) error {
if len(data) < 10 {
return fmt.Errorf("invalid message data, length must be at least 10 bytes")
}
setMsgRequestHeader(data, c.clientIndex, context)
if debug {
log.Debugf("sendMsg (%d) context=%v client=%d: % 02X", len(data), context, c.clientIndex, data)
}
if err := c.writeMsg(data); err != nil {
log.Debugln("writeMsg error: ", err)
return err
}
return nil
}
// setMsgRequestHeader sets client index and context in the message request header
//
// Message request has following structure:
//
// type msgRequestHeader struct {
// MsgID uint16
// ClientIndex uint32
// Context uint32
// }
func setMsgRequestHeader(data []byte, clientIndex, context uint32) {
// message ID is already set
binary.BigEndian.PutUint32(data[2:6], clientIndex)
binary.BigEndian.PutUint32(data[6:10], context)
}
func (c *Client) writeMsg(msg []byte) error {
// we lock to prevent mixing multiple message writes
c.writeMu.Lock()
defer c.writeMu.Unlock()
header, ok := c.headerPool.Get().(*[]byte)
if !ok {
return fmt.Errorf("failed to get header from pool")
}
err := writeMsgHeader(c.writer, *header, len(msg))
if err != nil {
return err
}
c.headerPool.Put(header)
if err := writeMsgData(c.writer, msg, c.writer.Size()); err != nil {
return err
}
if err := c.writer.Flush(); err != nil {
return err
}
log.Debugf(" -- writeMsg done")
return nil
}
func writeMsgHeader(w io.Writer, header []byte, dataLen int) error {
binary.BigEndian.PutUint32(header[8:12], uint32(dataLen))
n, err := w.Write(header)
if err != nil {
return err
}
if debug {
log.Debugf(" - header sent (%d/%d): % 0X", n, len(header), header)
}
return nil
}
func writeMsgData(w io.Writer, msg []byte, writerSize int) error {
for i := 0; i <= len(msg)/writerSize; i++ {
x := i*writerSize + writerSize
if x > len(msg) {
x = len(msg)
}
if debug {
log.Debugf(" - x=%v i=%v len=%v mod=%v", x, i, len(msg), len(msg)/writerSize)
}
n, err := w.Write(msg[i*writerSize : x])
if err != nil {
return err
}
if debug {
log.Debugf(" - data sent x=%d (%d/%d): % 0X", x, n, len(msg), msg)
}
}
return nil
}
func (c *Client) readerLoop() {
defer c.wg.Done()
defer log.Debugf("reader loop done")
var buf [8192]byte
for {
select {
case <-c.quit:
return
default:
}
msg, err := c.readMsg(buf[:])
if err != nil {
if isClosedError(err) {
return
}
log.Debugf("readMsg error: %v", err)
continue
}
msgID, context := getMsgReplyHeader(msg)
if debug {
log.Debugf("recvMsg (%d) msgID=%d context=%v", len(msg), msgID, context)
}
c.msgCallback(msgID, msg)
}
}
// getMsgReplyHeader gets message ID and context from the message reply header
//
// Message reply has the following structure:
//
// type msgReplyHeader struct {
// MsgID uint16
// Context uint32
// }
func getMsgReplyHeader(msg []byte) (msgID uint16, context uint32) {
msgID = binary.BigEndian.Uint16(msg[0:2])
context = binary.BigEndian.Uint32(msg[2:6])
return
}
func (c *Client) readMsgTimeout(buf []byte, timeout time.Duration) ([]byte, error) {
// set read deadline
readDeadline := time.Now().Add(timeout)
if err := c.conn.SetReadDeadline(readDeadline); err != nil {
return nil, err
}
// read message
msgReply, err := c.readMsg(buf)
if err != nil {
return nil, err
}
// reset read deadline
if err := c.conn.SetReadDeadline(time.Time{}); err != nil {
return nil, err
}
return msgReply, nil
}
func (c *Client) readMsg(buf []byte) ([]byte, error) {
log.Debug("reading msg..")
header, ok := c.headerPool.Get().(*[]byte)
if !ok {
return nil, fmt.Errorf("failed to get header from pool")
}
msgLen, err := readMsgHeader(c.reader, *header)
if err != nil {
return nil, err
}
c.headerPool.Put(header)
msg, err := readMsgData(c.reader, buf, msgLen)
if err != nil {
return nil, err
}
log.Debugf(" -- readMsg done (buffered: %d)", c.reader.Buffered())
return msg, nil
}
func readMsgHeader(r io.Reader, header []byte) (int, error) {
n, err := io.ReadAtLeast(r, header, 16)
if err != nil {
return 0, err
}
if n == 0 {
log.Debugln("zero bytes header")
return 0, nil
} else if n != 16 {
log.Debugf("invalid header (%d bytes): % 0X", n, header[:n])
return 0, fmt.Errorf("invalid header (expected 16 bytes, got %d)", n)
}
dataLen := binary.BigEndian.Uint32(header[8:12])
return int(dataLen), nil
}
func readMsgData(r io.Reader, buf []byte, dataLen int) ([]byte, error) {
var msg []byte
if buf == nil || len(buf) < dataLen {
msg = make([]byte, dataLen)
} else {
msg = buf[0:dataLen]
}
n, err := r.Read(msg)
if err != nil {
return nil, err
}
if debug {
log.Debugf(" - read data (%d bytes): % 0X", n, msg[:n])
}
if dataLen > n {
remain := dataLen - n
log.Debugf("continue reading remaining %d bytes", remain)
view := msg[n:]
for remain > 0 {
nbytes, err := r.Read(view)
if err != nil {
return nil, err
} else if nbytes == 0 {
return nil, fmt.Errorf("zero nbytes")
}
remain -= nbytes
log.Debugf("another data received: %d bytes (remain: %d)", nbytes, remain)
view = view[nbytes:]
}
}
return msg, nil
}
func isClosedError(err error) bool {
if errors.Is(err, io.EOF) {
return true
}
return strings.HasSuffix(err.Error(), "use of closed network connection")
} | ) | random_line_split |
socketclient.go | // Copyright (c) 2019 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package socketclient
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"github.com/sirupsen/logrus"
"go.fd.io/govpp/adapter"
"go.fd.io/govpp/binapi/memclnt"
"go.fd.io/govpp/codec"
)
const (
// DefaultSocketName is default VPP API socket file path.
DefaultSocketName = "/run/vpp/api.sock"
// DefaultClientName is used for identifying client in socket registration
DefaultClientName = "govppsock"
)
var (
// DefaultConnectTimeout is default timeout for connecting
DefaultConnectTimeout = time.Second * 3
// DefaultDisconnectTimeout is default timeout for discconnecting
DefaultDisconnectTimeout = time.Millisecond * 100
// MaxWaitReady defines maximum duration of waiting for socket file
MaxWaitReady = time.Second * 3
)
var (
debug = strings.Contains(os.Getenv("DEBUG_GOVPP"), "socketclient")
debugMsgIds = strings.Contains(os.Getenv("DEBUG_GOVPP"), "msgtable")
log logrus.FieldLogger
)
// SetLogger sets global logger.
func | (logger logrus.FieldLogger) {
log = logger
}
func init() {
logger := logrus.New()
if debug {
logger.Level = logrus.DebugLevel
logger.Debug("govpp: debug level enabled for socketclient")
}
log = logger.WithField("logger", "govpp/socketclient")
}
type Client struct {
socketPath string
clientName string
conn *net.UnixConn
reader *bufio.Reader
writer *bufio.Writer
connectTimeout time.Duration
disconnectTimeout time.Duration
msgCallback adapter.MsgCallback
clientIndex uint32
msgTable map[string]uint16
msgTableMu sync.RWMutex
sockDelMsgId uint16
writeMu sync.Mutex
headerPool *sync.Pool
quit chan struct{}
wg sync.WaitGroup
}
// NewVppClient returns a new Client using socket.
// If socket is empty string DefaultSocketName is used.
func NewVppClient(socket string) *Client {
if socket == "" {
socket = DefaultSocketName
}
return &Client{
socketPath: socket,
clientName: DefaultClientName,
connectTimeout: DefaultConnectTimeout,
disconnectTimeout: DefaultDisconnectTimeout,
headerPool: &sync.Pool{New: func() interface{} {
x := make([]byte, 16)
return &x
}},
msgCallback: func(msgID uint16, data []byte) {
log.Debugf("no callback set, dropping message: ID=%v len=%d", msgID, len(data))
},
}
}
// SetClientName sets a client name used for identification.
func (c *Client) SetClientName(name string) {
c.clientName = name
}
// SetConnectTimeout sets timeout used during connecting.
func (c *Client) SetConnectTimeout(t time.Duration) {
c.connectTimeout = t
}
// SetDisconnectTimeout sets timeout used during disconnecting.
func (c *Client) SetDisconnectTimeout(t time.Duration) {
c.disconnectTimeout = t
}
// SetMsgCallback sets the callback for incoming messages.
func (c *Client) SetMsgCallback(cb adapter.MsgCallback) {
log.Debug("SetMsgCallback")
c.msgCallback = cb
}
// WaitReady checks if the socket file exists and if it does not exist waits for
// it for the duration defined by MaxWaitReady.
func (c *Client) WaitReady() error {
socketDir, _ := filepath.Split(c.socketPath)
dirChain := strings.Split(filepath.ToSlash(filepath.Clean(socketDir)), "/")
dir := "/"
for _, dirElem := range dirChain {
dir = filepath.Join(dir, dirElem)
if err := waitForDir(dir); err != nil {
return err
}
log.Debugf("dir ready: %v", dir)
}
// check if socket already exists
if _, err := os.Stat(c.socketPath); err == nil {
return nil // socket exists, we are ready
} else if !errors.Is(err, fs.ErrNotExist) {
log.Debugf("error is: %+v", err)
return err // some other error occurred
}
log.Debugf("waiting for file: %v", c.socketPath)
// socket does not exist, watch for it
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer func() {
if err := watcher.Close(); err != nil {
log.Debugf("failed to close file watcher: %v", err)
}
}()
// start directory watcher
d := filepath.Dir(c.socketPath)
if err := watcher.Add(d); err != nil {
log.Debugf("watcher add(%v) error: %v", d, err)
return err
}
timeout := time.NewTimer(MaxWaitReady)
defer timeout.Stop()
for {
select {
case <-timeout.C:
log.Debugf("watcher timeout after: %v", MaxWaitReady)
return fmt.Errorf("timeout waiting (%s) for socket file: %s", MaxWaitReady, c.socketPath)
case e := <-watcher.Errors:
log.Debugf("watcher error: %+v", e)
return e
case ev := <-watcher.Events:
log.Debugf("watcher event: %+v", ev)
if ev.Name == c.socketPath && (ev.Op&fsnotify.Create) == fsnotify.Create {
// socket created, we are ready
return nil
}
}
}
}
func waitForDir(dir string) error {
// check if dir already exists
if _, err := os.Stat(dir); err == nil {
return nil // dir exists, we are ready
} else if !errors.Is(err, fs.ErrNotExist) {
log.Debugf("error is: %+v", err)
return err // some other error occurred
}
log.Debugf("waiting for dir: %v", dir)
// dir does not exist, watch for it
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer func() {
if err := watcher.Close(); err != nil {
log.Debugf("failed to close file watcher: %v", err)
}
}()
// start watching directory
d := filepath.Dir(dir)
if err := watcher.Add(d); err != nil {
log.Debugf("watcher add (%v) error: %v", d, err)
return err
}
timeout := time.NewTimer(MaxWaitReady)
defer timeout.Stop()
for {
select {
case <-timeout.C:
log.Debugf("watcher timeout after: %v", MaxWaitReady)
return fmt.Errorf("timeout waiting (%s) for directory: %s", MaxWaitReady, dir)
case e := <-watcher.Errors:
log.Debugf("watcher error: %+v", e)
return e
case ev := <-watcher.Events:
log.Debugf("watcher event: %+v", ev)
if ev.Name == dir && (ev.Op&fsnotify.Create) == fsnotify.Create {
// socket created, we are ready
return nil
}
}
}
}
func (c *Client) Connect() error {
// check if socket exists
if _, err := os.Stat(c.socketPath); os.IsNotExist(err) {
return fmt.Errorf("VPP API socket file %s does not exist", c.socketPath)
} else if err != nil {
return fmt.Errorf("VPP API socket error: %v", err)
}
if err := c.connect(c.socketPath); err != nil {
return err
}
if err := c.open(); err != nil {
_ = c.disconnect()
return err
}
c.quit = make(chan struct{})
c.wg.Add(1)
go c.readerLoop()
return nil
}
func (c *Client) Disconnect() error {
if c.conn == nil {
return nil
}
log.Debugf("Disconnecting..")
close(c.quit)
if err := c.conn.CloseRead(); err != nil {
log.Debugf("closing readMsg failed: %v", err)
}
// wait for readerLoop to return
c.wg.Wait()
// Don't bother sending a vl_api_sockclnt_delete_t message,
// just close the socket.
if err := c.disconnect(); err != nil {
return err
}
return nil
}
const defaultBufferSize = 4096
func (c *Client) connect(sockAddr string) error {
addr := &net.UnixAddr{Name: sockAddr, Net: "unix"}
log.Debugf("Connecting to: %v", c.socketPath)
conn, err := net.DialUnix("unix", nil, addr)
if err != nil {
// we try different type of socket for backwards compatbility with VPP<=19.04
if strings.Contains(err.Error(), "wrong type for socket") {
addr.Net = "unixpacket"
log.Debugf("%s, retrying connect with type unixpacket", err)
conn, err = net.DialUnix("unixpacket", nil, addr)
}
if err != nil {
log.Debugf("Connecting to socket %s failed: %s", addr, err)
return err
}
}
c.conn = conn
log.Debugf("Connected to socket (local addr: %v)", c.conn.LocalAddr().(*net.UnixAddr))
c.reader = bufio.NewReaderSize(c.conn, defaultBufferSize)
c.writer = bufio.NewWriterSize(c.conn, defaultBufferSize)
return nil
}
func (c *Client) disconnect() error {
log.Debugf("Closing socket")
// cleanup msg table
c.setMsgTable(make(map[string]uint16), 0)
if err := c.conn.Close(); err != nil {
log.Debugln("Closing socket failed:", err)
return err
}
return nil
}
const (
sockCreateMsgId = 15 // hard-coded sockclnt_create message ID
createMsgContext = byte(123)
deleteMsgContext = byte(124)
)
func (c *Client) open() error {
var msgCodec = codec.DefaultCodec
// Request socket client create
req := &memclnt.SockclntCreate{
Name: c.clientName,
}
msg, err := msgCodec.EncodeMsg(req, sockCreateMsgId)
if err != nil {
log.Debugln("Encode error:", err)
return err
}
// set non-0 context
msg[5] = createMsgContext
if err := c.writeMsg(msg); err != nil {
log.Debugln("Write error: ", err)
return err
}
msgReply, err := c.readMsgTimeout(nil, c.connectTimeout)
if err != nil {
log.Println("Read error:", err)
return err
}
reply := new(memclnt.SockclntCreateReply)
if err := msgCodec.DecodeMsg(msgReply, reply); err != nil {
log.Println("Decoding sockclnt_create_reply failed:", err)
return err
} else if reply.Response != 0 {
return fmt.Errorf("sockclnt_create_reply: response error (%d)", reply.Response)
}
log.Debugf("SockclntCreateReply: Response=%v Index=%v Count=%v",
reply.Response, reply.Index, reply.Count)
c.clientIndex = reply.Index
msgTable := make(map[string]uint16, reply.Count)
var sockDelMsgId uint16
for _, x := range reply.MessageTable {
msgName := strings.Split(x.Name, "\x00")[0]
name := strings.TrimSuffix(msgName, "\x13")
msgTable[name] = x.Index
if strings.HasPrefix(name, "sockclnt_delete_") {
sockDelMsgId = x.Index
}
if debugMsgIds {
log.Debugf(" - %4d: %q", x.Index, name)
}
}
c.setMsgTable(msgTable, sockDelMsgId)
return nil
}
func (c *Client) setMsgTable(msgTable map[string]uint16, sockDelMsgId uint16) {
c.msgTableMu.Lock()
defer c.msgTableMu.Unlock()
c.msgTable = msgTable
c.sockDelMsgId = sockDelMsgId
}
func (c *Client) GetMsgID(msgName string, msgCrc string) (uint16, error) {
c.msgTableMu.RLock()
defer c.msgTableMu.RUnlock()
if msgID, ok := c.msgTable[msgName+"_"+msgCrc]; ok {
return msgID, nil
}
return 0, &adapter.UnknownMsgError{
MsgName: msgName,
MsgCrc: msgCrc,
}
}
func (c *Client) SendMsg(context uint32, data []byte) error {
if len(data) < 10 {
return fmt.Errorf("invalid message data, length must be at least 10 bytes")
}
setMsgRequestHeader(data, c.clientIndex, context)
if debug {
log.Debugf("sendMsg (%d) context=%v client=%d: % 02X", len(data), context, c.clientIndex, data)
}
if err := c.writeMsg(data); err != nil {
log.Debugln("writeMsg error: ", err)
return err
}
return nil
}
// setMsgRequestHeader sets client index and context in the message request header
//
// Message request has following structure:
//
// type msgRequestHeader struct {
// MsgID uint16
// ClientIndex uint32
// Context uint32
// }
func setMsgRequestHeader(data []byte, clientIndex, context uint32) {
// message ID is already set
binary.BigEndian.PutUint32(data[2:6], clientIndex)
binary.BigEndian.PutUint32(data[6:10], context)
}
func (c *Client) writeMsg(msg []byte) error {
// we lock to prevent mixing multiple message writes
c.writeMu.Lock()
defer c.writeMu.Unlock()
header, ok := c.headerPool.Get().(*[]byte)
if !ok {
return fmt.Errorf("failed to get header from pool")
}
err := writeMsgHeader(c.writer, *header, len(msg))
if err != nil {
return err
}
c.headerPool.Put(header)
if err := writeMsgData(c.writer, msg, c.writer.Size()); err != nil {
return err
}
if err := c.writer.Flush(); err != nil {
return err
}
log.Debugf(" -- writeMsg done")
return nil
}
func writeMsgHeader(w io.Writer, header []byte, dataLen int) error {
binary.BigEndian.PutUint32(header[8:12], uint32(dataLen))
n, err := w.Write(header)
if err != nil {
return err
}
if debug {
log.Debugf(" - header sent (%d/%d): % 0X", n, len(header), header)
}
return nil
}
func writeMsgData(w io.Writer, msg []byte, writerSize int) error {
for i := 0; i <= len(msg)/writerSize; i++ {
x := i*writerSize + writerSize
if x > len(msg) {
x = len(msg)
}
if debug {
log.Debugf(" - x=%v i=%v len=%v mod=%v", x, i, len(msg), len(msg)/writerSize)
}
n, err := w.Write(msg[i*writerSize : x])
if err != nil {
return err
}
if debug {
log.Debugf(" - data sent x=%d (%d/%d): % 0X", x, n, len(msg), msg)
}
}
return nil
}
func (c *Client) readerLoop() {
defer c.wg.Done()
defer log.Debugf("reader loop done")
var buf [8192]byte
for {
select {
case <-c.quit:
return
default:
}
msg, err := c.readMsg(buf[:])
if err != nil {
if isClosedError(err) {
return
}
log.Debugf("readMsg error: %v", err)
continue
}
msgID, context := getMsgReplyHeader(msg)
if debug {
log.Debugf("recvMsg (%d) msgID=%d context=%v", len(msg), msgID, context)
}
c.msgCallback(msgID, msg)
}
}
// getMsgReplyHeader gets message ID and context from the message reply header
//
// Message reply has the following structure:
//
// type msgReplyHeader struct {
// MsgID uint16
// Context uint32
// }
func getMsgReplyHeader(msg []byte) (msgID uint16, context uint32) {
msgID = binary.BigEndian.Uint16(msg[0:2])
context = binary.BigEndian.Uint32(msg[2:6])
return
}
func (c *Client) readMsgTimeout(buf []byte, timeout time.Duration) ([]byte, error) {
// set read deadline
readDeadline := time.Now().Add(timeout)
if err := c.conn.SetReadDeadline(readDeadline); err != nil {
return nil, err
}
// read message
msgReply, err := c.readMsg(buf)
if err != nil {
return nil, err
}
// reset read deadline
if err := c.conn.SetReadDeadline(time.Time{}); err != nil {
return nil, err
}
return msgReply, nil
}
func (c *Client) readMsg(buf []byte) ([]byte, error) {
log.Debug("reading msg..")
header, ok := c.headerPool.Get().(*[]byte)
if !ok {
return nil, fmt.Errorf("failed to get header from pool")
}
msgLen, err := readMsgHeader(c.reader, *header)
if err != nil {
return nil, err
}
c.headerPool.Put(header)
msg, err := readMsgData(c.reader, buf, msgLen)
if err != nil {
return nil, err
}
log.Debugf(" -- readMsg done (buffered: %d)", c.reader.Buffered())
return msg, nil
}
func readMsgHeader(r io.Reader, header []byte) (int, error) {
n, err := io.ReadAtLeast(r, header, 16)
if err != nil {
return 0, err
}
if n == 0 {
log.Debugln("zero bytes header")
return 0, nil
} else if n != 16 {
log.Debugf("invalid header (%d bytes): % 0X", n, header[:n])
return 0, fmt.Errorf("invalid header (expected 16 bytes, got %d)", n)
}
dataLen := binary.BigEndian.Uint32(header[8:12])
return int(dataLen), nil
}
func readMsgData(r io.Reader, buf []byte, dataLen int) ([]byte, error) {
var msg []byte
if buf == nil || len(buf) < dataLen {
msg = make([]byte, dataLen)
} else {
msg = buf[0:dataLen]
}
n, err := r.Read(msg)
if err != nil {
return nil, err
}
if debug {
log.Debugf(" - read data (%d bytes): % 0X", n, msg[:n])
}
if dataLen > n {
remain := dataLen - n
log.Debugf("continue reading remaining %d bytes", remain)
view := msg[n:]
for remain > 0 {
nbytes, err := r.Read(view)
if err != nil {
return nil, err
} else if nbytes == 0 {
return nil, fmt.Errorf("zero nbytes")
}
remain -= nbytes
log.Debugf("another data received: %d bytes (remain: %d)", nbytes, remain)
view = view[nbytes:]
}
}
return msg, nil
}
func isClosedError(err error) bool {
if errors.Is(err, io.EOF) {
return true
}
return strings.HasSuffix(err.Error(), "use of closed network connection")
}
| SetLogger | identifier_name |
socketclient.go | // Copyright (c) 2019 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package socketclient
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"github.com/sirupsen/logrus"
"go.fd.io/govpp/adapter"
"go.fd.io/govpp/binapi/memclnt"
"go.fd.io/govpp/codec"
)
const (
// DefaultSocketName is default VPP API socket file path.
DefaultSocketName = "/run/vpp/api.sock"
// DefaultClientName is used for identifying client in socket registration
DefaultClientName = "govppsock"
)
var (
// DefaultConnectTimeout is default timeout for connecting
DefaultConnectTimeout = time.Second * 3
// DefaultDisconnectTimeout is default timeout for discconnecting
DefaultDisconnectTimeout = time.Millisecond * 100
// MaxWaitReady defines maximum duration of waiting for socket file
MaxWaitReady = time.Second * 3
)
var (
debug = strings.Contains(os.Getenv("DEBUG_GOVPP"), "socketclient")
debugMsgIds = strings.Contains(os.Getenv("DEBUG_GOVPP"), "msgtable")
log logrus.FieldLogger
)
// SetLogger sets global logger.
func SetLogger(logger logrus.FieldLogger) {
log = logger
}
func init() {
logger := logrus.New()
if debug {
logger.Level = logrus.DebugLevel
logger.Debug("govpp: debug level enabled for socketclient")
}
log = logger.WithField("logger", "govpp/socketclient")
}
type Client struct {
socketPath string
clientName string
conn *net.UnixConn
reader *bufio.Reader
writer *bufio.Writer
connectTimeout time.Duration
disconnectTimeout time.Duration
msgCallback adapter.MsgCallback
clientIndex uint32
msgTable map[string]uint16
msgTableMu sync.RWMutex
sockDelMsgId uint16
writeMu sync.Mutex
headerPool *sync.Pool
quit chan struct{}
wg sync.WaitGroup
}
// NewVppClient returns a new Client using socket.
// If socket is empty string DefaultSocketName is used.
func NewVppClient(socket string) *Client {
if socket == "" {
socket = DefaultSocketName
}
return &Client{
socketPath: socket,
clientName: DefaultClientName,
connectTimeout: DefaultConnectTimeout,
disconnectTimeout: DefaultDisconnectTimeout,
headerPool: &sync.Pool{New: func() interface{} {
x := make([]byte, 16)
return &x
}},
msgCallback: func(msgID uint16, data []byte) {
log.Debugf("no callback set, dropping message: ID=%v len=%d", msgID, len(data))
},
}
}
// SetClientName sets a client name used for identification.
func (c *Client) SetClientName(name string) {
c.clientName = name
}
// SetConnectTimeout sets timeout used during connecting.
func (c *Client) SetConnectTimeout(t time.Duration) {
c.connectTimeout = t
}
// SetDisconnectTimeout sets timeout used during disconnecting.
func (c *Client) SetDisconnectTimeout(t time.Duration) {
c.disconnectTimeout = t
}
// SetMsgCallback sets the callback for incoming messages.
func (c *Client) SetMsgCallback(cb adapter.MsgCallback) {
log.Debug("SetMsgCallback")
c.msgCallback = cb
}
// WaitReady checks if the socket file exists and if it does not exist waits for
// it for the duration defined by MaxWaitReady.
func (c *Client) WaitReady() error {
socketDir, _ := filepath.Split(c.socketPath)
dirChain := strings.Split(filepath.ToSlash(filepath.Clean(socketDir)), "/")
dir := "/"
for _, dirElem := range dirChain {
dir = filepath.Join(dir, dirElem)
if err := waitForDir(dir); err != nil {
return err
}
log.Debugf("dir ready: %v", dir)
}
// check if socket already exists
if _, err := os.Stat(c.socketPath); err == nil {
return nil // socket exists, we are ready
} else if !errors.Is(err, fs.ErrNotExist) {
log.Debugf("error is: %+v", err)
return err // some other error occurred
}
log.Debugf("waiting for file: %v", c.socketPath)
// socket does not exist, watch for it
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer func() {
if err := watcher.Close(); err != nil {
log.Debugf("failed to close file watcher: %v", err)
}
}()
// start directory watcher
d := filepath.Dir(c.socketPath)
if err := watcher.Add(d); err != nil {
log.Debugf("watcher add(%v) error: %v", d, err)
return err
}
timeout := time.NewTimer(MaxWaitReady)
defer timeout.Stop()
for {
select {
case <-timeout.C:
log.Debugf("watcher timeout after: %v", MaxWaitReady)
return fmt.Errorf("timeout waiting (%s) for socket file: %s", MaxWaitReady, c.socketPath)
case e := <-watcher.Errors:
log.Debugf("watcher error: %+v", e)
return e
case ev := <-watcher.Events:
log.Debugf("watcher event: %+v", ev)
if ev.Name == c.socketPath && (ev.Op&fsnotify.Create) == fsnotify.Create {
// socket created, we are ready
return nil
}
}
}
}
func waitForDir(dir string) error {
// check if dir already exists
if _, err := os.Stat(dir); err == nil {
return nil // dir exists, we are ready
} else if !errors.Is(err, fs.ErrNotExist) {
log.Debugf("error is: %+v", err)
return err // some other error occurred
}
log.Debugf("waiting for dir: %v", dir)
// dir does not exist, watch for it
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
defer func() {
if err := watcher.Close(); err != nil {
log.Debugf("failed to close file watcher: %v", err)
}
}()
// start watching directory
d := filepath.Dir(dir)
if err := watcher.Add(d); err != nil {
log.Debugf("watcher add (%v) error: %v", d, err)
return err
}
timeout := time.NewTimer(MaxWaitReady)
defer timeout.Stop()
for {
select {
case <-timeout.C:
log.Debugf("watcher timeout after: %v", MaxWaitReady)
return fmt.Errorf("timeout waiting (%s) for directory: %s", MaxWaitReady, dir)
case e := <-watcher.Errors:
log.Debugf("watcher error: %+v", e)
return e
case ev := <-watcher.Events:
log.Debugf("watcher event: %+v", ev)
if ev.Name == dir && (ev.Op&fsnotify.Create) == fsnotify.Create {
// socket created, we are ready
return nil
}
}
}
}
func (c *Client) Connect() error {
// check if socket exists
if _, err := os.Stat(c.socketPath); os.IsNotExist(err) {
return fmt.Errorf("VPP API socket file %s does not exist", c.socketPath)
} else if err != nil {
return fmt.Errorf("VPP API socket error: %v", err)
}
if err := c.connect(c.socketPath); err != nil {
return err
}
if err := c.open(); err != nil {
_ = c.disconnect()
return err
}
c.quit = make(chan struct{})
c.wg.Add(1)
go c.readerLoop()
return nil
}
func (c *Client) Disconnect() error {
if c.conn == nil {
return nil
}
log.Debugf("Disconnecting..")
close(c.quit)
if err := c.conn.CloseRead(); err != nil {
log.Debugf("closing readMsg failed: %v", err)
}
// wait for readerLoop to return
c.wg.Wait()
// Don't bother sending a vl_api_sockclnt_delete_t message,
// just close the socket.
if err := c.disconnect(); err != nil {
return err
}
return nil
}
const defaultBufferSize = 4096
func (c *Client) connect(sockAddr string) error {
addr := &net.UnixAddr{Name: sockAddr, Net: "unix"}
log.Debugf("Connecting to: %v", c.socketPath)
conn, err := net.DialUnix("unix", nil, addr)
if err != nil {
// we try different type of socket for backwards compatbility with VPP<=19.04
if strings.Contains(err.Error(), "wrong type for socket") {
addr.Net = "unixpacket"
log.Debugf("%s, retrying connect with type unixpacket", err)
conn, err = net.DialUnix("unixpacket", nil, addr)
}
if err != nil {
log.Debugf("Connecting to socket %s failed: %s", addr, err)
return err
}
}
c.conn = conn
log.Debugf("Connected to socket (local addr: %v)", c.conn.LocalAddr().(*net.UnixAddr))
c.reader = bufio.NewReaderSize(c.conn, defaultBufferSize)
c.writer = bufio.NewWriterSize(c.conn, defaultBufferSize)
return nil
}
func (c *Client) disconnect() error {
log.Debugf("Closing socket")
// cleanup msg table
c.setMsgTable(make(map[string]uint16), 0)
if err := c.conn.Close(); err != nil {
log.Debugln("Closing socket failed:", err)
return err
}
return nil
}
const (
sockCreateMsgId = 15 // hard-coded sockclnt_create message ID
createMsgContext = byte(123)
deleteMsgContext = byte(124)
)
func (c *Client) open() error {
var msgCodec = codec.DefaultCodec
// Request socket client create
req := &memclnt.SockclntCreate{
Name: c.clientName,
}
msg, err := msgCodec.EncodeMsg(req, sockCreateMsgId)
if err != nil {
log.Debugln("Encode error:", err)
return err
}
// set non-0 context
msg[5] = createMsgContext
if err := c.writeMsg(msg); err != nil {
log.Debugln("Write error: ", err)
return err
}
msgReply, err := c.readMsgTimeout(nil, c.connectTimeout)
if err != nil {
log.Println("Read error:", err)
return err
}
reply := new(memclnt.SockclntCreateReply)
if err := msgCodec.DecodeMsg(msgReply, reply); err != nil {
log.Println("Decoding sockclnt_create_reply failed:", err)
return err
} else if reply.Response != 0 |
log.Debugf("SockclntCreateReply: Response=%v Index=%v Count=%v",
reply.Response, reply.Index, reply.Count)
c.clientIndex = reply.Index
msgTable := make(map[string]uint16, reply.Count)
var sockDelMsgId uint16
for _, x := range reply.MessageTable {
msgName := strings.Split(x.Name, "\x00")[0]
name := strings.TrimSuffix(msgName, "\x13")
msgTable[name] = x.Index
if strings.HasPrefix(name, "sockclnt_delete_") {
sockDelMsgId = x.Index
}
if debugMsgIds {
log.Debugf(" - %4d: %q", x.Index, name)
}
}
c.setMsgTable(msgTable, sockDelMsgId)
return nil
}
func (c *Client) setMsgTable(msgTable map[string]uint16, sockDelMsgId uint16) {
c.msgTableMu.Lock()
defer c.msgTableMu.Unlock()
c.msgTable = msgTable
c.sockDelMsgId = sockDelMsgId
}
func (c *Client) GetMsgID(msgName string, msgCrc string) (uint16, error) {
c.msgTableMu.RLock()
defer c.msgTableMu.RUnlock()
if msgID, ok := c.msgTable[msgName+"_"+msgCrc]; ok {
return msgID, nil
}
return 0, &adapter.UnknownMsgError{
MsgName: msgName,
MsgCrc: msgCrc,
}
}
func (c *Client) SendMsg(context uint32, data []byte) error {
if len(data) < 10 {
return fmt.Errorf("invalid message data, length must be at least 10 bytes")
}
setMsgRequestHeader(data, c.clientIndex, context)
if debug {
log.Debugf("sendMsg (%d) context=%v client=%d: % 02X", len(data), context, c.clientIndex, data)
}
if err := c.writeMsg(data); err != nil {
log.Debugln("writeMsg error: ", err)
return err
}
return nil
}
// setMsgRequestHeader sets client index and context in the message request header
//
// Message request has following structure:
//
// type msgRequestHeader struct {
// MsgID uint16
// ClientIndex uint32
// Context uint32
// }
func setMsgRequestHeader(data []byte, clientIndex, context uint32) {
// message ID is already set
binary.BigEndian.PutUint32(data[2:6], clientIndex)
binary.BigEndian.PutUint32(data[6:10], context)
}
func (c *Client) writeMsg(msg []byte) error {
// we lock to prevent mixing multiple message writes
c.writeMu.Lock()
defer c.writeMu.Unlock()
header, ok := c.headerPool.Get().(*[]byte)
if !ok {
return fmt.Errorf("failed to get header from pool")
}
err := writeMsgHeader(c.writer, *header, len(msg))
if err != nil {
return err
}
c.headerPool.Put(header)
if err := writeMsgData(c.writer, msg, c.writer.Size()); err != nil {
return err
}
if err := c.writer.Flush(); err != nil {
return err
}
log.Debugf(" -- writeMsg done")
return nil
}
func writeMsgHeader(w io.Writer, header []byte, dataLen int) error {
binary.BigEndian.PutUint32(header[8:12], uint32(dataLen))
n, err := w.Write(header)
if err != nil {
return err
}
if debug {
log.Debugf(" - header sent (%d/%d): % 0X", n, len(header), header)
}
return nil
}
func writeMsgData(w io.Writer, msg []byte, writerSize int) error {
for i := 0; i <= len(msg)/writerSize; i++ {
x := i*writerSize + writerSize
if x > len(msg) {
x = len(msg)
}
if debug {
log.Debugf(" - x=%v i=%v len=%v mod=%v", x, i, len(msg), len(msg)/writerSize)
}
n, err := w.Write(msg[i*writerSize : x])
if err != nil {
return err
}
if debug {
log.Debugf(" - data sent x=%d (%d/%d): % 0X", x, n, len(msg), msg)
}
}
return nil
}
func (c *Client) readerLoop() {
defer c.wg.Done()
defer log.Debugf("reader loop done")
var buf [8192]byte
for {
select {
case <-c.quit:
return
default:
}
msg, err := c.readMsg(buf[:])
if err != nil {
if isClosedError(err) {
return
}
log.Debugf("readMsg error: %v", err)
continue
}
msgID, context := getMsgReplyHeader(msg)
if debug {
log.Debugf("recvMsg (%d) msgID=%d context=%v", len(msg), msgID, context)
}
c.msgCallback(msgID, msg)
}
}
// getMsgReplyHeader gets message ID and context from the message reply header
//
// Message reply has the following structure:
//
// type msgReplyHeader struct {
// MsgID uint16
// Context uint32
// }
func getMsgReplyHeader(msg []byte) (msgID uint16, context uint32) {
msgID = binary.BigEndian.Uint16(msg[0:2])
context = binary.BigEndian.Uint32(msg[2:6])
return
}
func (c *Client) readMsgTimeout(buf []byte, timeout time.Duration) ([]byte, error) {
// set read deadline
readDeadline := time.Now().Add(timeout)
if err := c.conn.SetReadDeadline(readDeadline); err != nil {
return nil, err
}
// read message
msgReply, err := c.readMsg(buf)
if err != nil {
return nil, err
}
// reset read deadline
if err := c.conn.SetReadDeadline(time.Time{}); err != nil {
return nil, err
}
return msgReply, nil
}
func (c *Client) readMsg(buf []byte) ([]byte, error) {
log.Debug("reading msg..")
header, ok := c.headerPool.Get().(*[]byte)
if !ok {
return nil, fmt.Errorf("failed to get header from pool")
}
msgLen, err := readMsgHeader(c.reader, *header)
if err != nil {
return nil, err
}
c.headerPool.Put(header)
msg, err := readMsgData(c.reader, buf, msgLen)
if err != nil {
return nil, err
}
log.Debugf(" -- readMsg done (buffered: %d)", c.reader.Buffered())
return msg, nil
}
func readMsgHeader(r io.Reader, header []byte) (int, error) {
n, err := io.ReadAtLeast(r, header, 16)
if err != nil {
return 0, err
}
if n == 0 {
log.Debugln("zero bytes header")
return 0, nil
} else if n != 16 {
log.Debugf("invalid header (%d bytes): % 0X", n, header[:n])
return 0, fmt.Errorf("invalid header (expected 16 bytes, got %d)", n)
}
dataLen := binary.BigEndian.Uint32(header[8:12])
return int(dataLen), nil
}
func readMsgData(r io.Reader, buf []byte, dataLen int) ([]byte, error) {
var msg []byte
if buf == nil || len(buf) < dataLen {
msg = make([]byte, dataLen)
} else {
msg = buf[0:dataLen]
}
n, err := r.Read(msg)
if err != nil {
return nil, err
}
if debug {
log.Debugf(" - read data (%d bytes): % 0X", n, msg[:n])
}
if dataLen > n {
remain := dataLen - n
log.Debugf("continue reading remaining %d bytes", remain)
view := msg[n:]
for remain > 0 {
nbytes, err := r.Read(view)
if err != nil {
return nil, err
} else if nbytes == 0 {
return nil, fmt.Errorf("zero nbytes")
}
remain -= nbytes
log.Debugf("another data received: %d bytes (remain: %d)", nbytes, remain)
view = view[nbytes:]
}
}
return msg, nil
}
func isClosedError(err error) bool {
if errors.Is(err, io.EOF) {
return true
}
return strings.HasSuffix(err.Error(), "use of closed network connection")
}
| {
return fmt.Errorf("sockclnt_create_reply: response error (%d)", reply.Response)
} | conditional_block |
tx.go | package tsdb
import (
"encoding/binary"
"fmt"
"math"
"sort"
"time"
"github.com/boltdb/bolt"
"github.com/influxdb/influxdb/influxql"
"github.com/influxdb/influxdb/meta"
)
// tx represents a transaction that spans multiple shard data stores.
// This transaction will open and close all data stores atomically.
type tx struct {
now time.Time
// used by DecodeFields and FieldIDs. Only used in a raw query, which won't let you select from more than one measurement
measurement *Measurement
meta metaStore
store localStore
}
type metaStore interface {
RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error)
}
type localStore interface {
Measurement(database, name string) *Measurement
ValidateAggregateFieldsInStatement(shardID uint64, measurementName string, stmt *influxql.SelectStatement) error
Shard(shardID uint64) *Shard
}
// newTx return a new initialized Tx.
func newTx(meta metaStore, store localStore) *tx {
return &tx{
meta: meta,
store: store,
now: time.Now(),
}
}
// SetNow sets the current time for the transaction.
func (tx *tx) | (now time.Time) { tx.now = now }
// CreateMappers will create a set of mappers that need to be run to execute the map phase of a MapReduceJob.
func (tx *tx) CreateMapReduceJobs(stmt *influxql.SelectStatement, tagKeys []string) ([]*influxql.MapReduceJob, error) {
jobs := []*influxql.MapReduceJob{}
for _, src := range stmt.Sources {
mm, ok := src.(*influxql.Measurement)
if !ok {
return nil, fmt.Errorf("invalid source type: %#v", src)
}
// get the index and the retention policy
rp, err := tx.meta.RetentionPolicy(mm.Database, mm.RetentionPolicy)
if err != nil {
return nil, err
}
m := tx.store.Measurement(mm.Database, mm.Name)
if m == nil {
return nil, ErrMeasurementNotFound(influxql.QuoteIdent([]string{mm.Database, "", mm.Name}...))
}
tx.measurement = m
// Validate the fields and tags asked for exist and keep track of which are in the select vs the where
var selectFields []string
var whereFields []string
var selectTags []string
for _, n := range stmt.NamesInSelect() {
if m.HasField(n) {
selectFields = append(selectFields, n)
continue
}
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in select clause: %s", n)
}
selectTags = append(selectTags, n)
tagKeys = append(tagKeys, n)
}
for _, n := range stmt.NamesInWhere() {
if n == "time" {
continue
}
if m.HasField(n) {
whereFields = append(whereFields, n)
continue
}
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in where clause: %s", n)
}
}
if len(selectFields) == 0 && len(stmt.FunctionCalls()) == 0 {
return nil, fmt.Errorf("select statement must include at least one field or function call")
}
// Validate that group by is not a field
for _, d := range stmt.Dimensions {
switch e := d.Expr.(type) {
case *influxql.VarRef:
if !m.HasTagKey(e.Val) {
return nil, fmt.Errorf("can not use field in group by clause: %s", e.Val)
}
}
}
// Grab time range from statement.
tmin, tmax := influxql.TimeRange(stmt.Condition)
if tmax.IsZero() {
tmax = tx.now
}
if tmin.IsZero() {
tmin = time.Unix(0, 0)
}
// Find shard groups within time range.
var shardGroups []*meta.ShardGroupInfo
for _, group := range rp.ShardGroups {
if group.Overlaps(tmin, tmax) {
g := group
shardGroups = append(shardGroups, &g)
}
}
if len(shardGroups) == 0 {
return nil, nil
}
// get the group by interval, if there is one
var interval int64
if d, err := stmt.GroupByInterval(); err != nil {
return nil, err
} else {
interval = d.Nanoseconds()
}
// get the sorted unique tag sets for this query.
tagSets, err := m.TagSets(stmt, tagKeys)
if err != nil {
return nil, err
}
for _, t := range tagSets {
// make a job for each tagset
job := &influxql.MapReduceJob{
MeasurementName: m.Name,
TagSet: t,
TMin: tmin.UnixNano(),
TMax: tmax.UnixNano(),
}
// make a mapper for each shard that must be hit. We may need to hit multiple shards within a shard group
var mappers []influxql.Mapper
// create mappers for each shard we need to hit
for _, sg := range shardGroups {
// TODO: implement distributed queries
if len(sg.Shards) != 1 {
return nil, fmt.Errorf("distributed queries aren't supported yet. You have a replication policy with RF < # of servers in cluster")
}
shard := tx.store.Shard(sg.Shards[0].ID)
if shard == nil {
// the store returned nil which means we haven't written any data into this shard yet, so ignore it
continue
}
// get the codec for this measuremnt. If this is nil it just means this measurement was
// never written into this shard, so we can skip it and continue.
codec := shard.FieldCodec(m.Name)
if codec == nil {
continue
}
var mapper influxql.Mapper
mapper = &LocalMapper{
seriesKeys: t.SeriesKeys,
shard: shard,
db: shard.DB(),
job: job,
decoder: codec,
filters: t.Filters,
whereFields: whereFields,
selectFields: selectFields,
selectTags: selectTags,
tmin: tmin.UnixNano(),
tmax: tmax.UnixNano(),
interval: interval,
// multiple mappers may need to be merged together to get the results
// for a raw query. So each mapper will have to read at least the
// limit plus the offset in data points to ensure we've hit our mark
limit: uint64(stmt.Limit) + uint64(stmt.Offset),
}
mappers = append(mappers, mapper)
}
job.Mappers = mappers
jobs = append(jobs, job)
}
}
// always return them in sorted order so the results from running the jobs are returned in a deterministic order
sort.Sort(influxql.MapReduceJobs(jobs))
return jobs, nil
}
// LocalMapper implements the influxql.Mapper interface for running map tasks over a shard that is local to this server
type LocalMapper struct {
cursorsEmpty bool // boolean that lets us know if the cursors are empty
decoder *FieldCodec // decoder for the raw data bytes
filters []influxql.Expr // filters for each series
cursors []*shardCursor // bolt cursors for each series id
seriesKeys []string // seriesKeys to be read from this shard
shard *Shard // original shard
db *bolt.DB // bolt store for the shard accessed by this mapper
txn *bolt.Tx // read transactions by shard id
job *influxql.MapReduceJob // the MRJob this mapper belongs to
mapFunc influxql.MapFunc // the map func
fieldID uint8 // the field ID associated with the mapFunc curently being run
fieldName string // the field name associated with the mapFunc currently being run
keyBuffer []int64 // the current timestamp key for each cursor
valueBuffer [][]byte // the current value for each cursor
tmin int64 // the min of the current group by interval being iterated over
tmax int64 // the max of the current group by interval being iterated over
additionalNames []string // additional field or tag names that might be requested from the map function
whereFields []string // field names that occur in the where clause
selectFields []string // field names that occur in the select clause
selectTags []string // tag keys that occur in the select clause
isRaw bool // if the query is a non-aggregate query
interval int64 // the group by interval of the query, if any
limit uint64 // used for raw queries for LIMIT
perIntervalLimit int // used for raw queries to determine how far into a chunk we are
chunkSize int // used for raw queries to determine how much data to read before flushing to client
}
// Open opens the LocalMapper.
func (l *LocalMapper) Open() error {
// Obtain shard lock to copy in-cache points.
l.shard.mu.Lock()
defer l.shard.mu.Unlock()
// Open the data store
txn, err := l.db.Begin(false)
if err != nil {
return err
}
l.txn = txn
// create a bolt cursor for each unique series id
l.cursors = make([]*shardCursor, len(l.seriesKeys))
for i, key := range l.seriesKeys {
// Retrieve key bucket.
b := l.txn.Bucket([]byte(key))
// Ignore if there is no bucket or points in the cache.
partitionID := WALPartition([]byte(key))
if b == nil && len(l.shard.cache[partitionID][key]) == 0 {
continue
}
// Retrieve a copy of the in-cache points for the key.
cache := make([][]byte, len(l.shard.cache[partitionID][key]))
copy(cache, l.shard.cache[partitionID][key])
// Build a cursor that merges the bucket and cache together.
cur := &shardCursor{cache: cache}
if b != nil {
cur.cursor = b.Cursor()
}
l.cursors[i] = cur
}
return nil
}
// Close closes the LocalMapper.
func (l *LocalMapper) Close() {
if l.txn != nil {
_ = l.txn.Rollback()
}
}
// Begin will set up the mapper to run the map function for a given aggregate call starting at the passed in time
func (l *LocalMapper) Begin(c *influxql.Call, startingTime int64, chunkSize int) error {
// set up the buffers. These ensure that we return data in time order
mapFunc, err := influxql.InitializeMapFunc(c)
if err != nil {
return err
}
l.mapFunc = mapFunc
l.keyBuffer = make([]int64, len(l.cursors))
l.valueBuffer = make([][]byte, len(l.cursors))
l.chunkSize = chunkSize
l.tmin = startingTime
var isCountDistinct bool
// determine if this is a raw data query with a single field, multiple fields, or an aggregate
var fieldName string
if c == nil { // its a raw data query
l.isRaw = true
if len(l.selectFields) == 1 {
fieldName = l.selectFields[0]
}
// if they haven't set a limit, just set it to the max int size
if l.limit == 0 {
l.limit = math.MaxUint64
}
} else {
// Check for calls like `derivative(mean(value), 1d)`
var nested *influxql.Call = c
if fn, ok := c.Args[0].(*influxql.Call); ok {
nested = fn
}
switch lit := nested.Args[0].(type) {
case *influxql.VarRef:
fieldName = lit.Val
case *influxql.Distinct:
if c.Name != "count" {
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
isCountDistinct = true
fieldName = lit.Val
default:
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
isCountDistinct = isCountDistinct || (c.Name == "count" && nested.Name == "distinct")
}
// set up the field info if a specific field was set for this mapper
if fieldName != "" {
fid, err := l.decoder.FieldIDByName(fieldName)
if err != nil {
switch {
case c != nil && c.Name == "distinct":
return fmt.Errorf(`%s isn't a field on measurement %s; to query the unique values for a tag use SHOW TAG VALUES FROM %[2]s WITH KEY = "%[1]s`, fieldName, l.job.MeasurementName)
case isCountDistinct:
return fmt.Errorf("%s isn't a field on measurement %s; count(distinct) on tags isn't yet supported", fieldName, l.job.MeasurementName)
}
}
l.fieldID = fid
l.fieldName = fieldName
}
// seek the bolt cursors and fill the buffers
for i, c := range l.cursors {
// this series may have never been written in this shard group (time range) so the cursor would be nil
if c == nil {
l.keyBuffer[i] = 0
l.valueBuffer[i] = nil
continue
}
k, v := c.Seek(u64tob(uint64(l.job.TMin)))
if k == nil {
l.keyBuffer[i] = 0
l.valueBuffer[i] = nil
continue
}
l.cursorsEmpty = false
t := int64(btou64(k))
l.keyBuffer[i] = t
l.valueBuffer[i] = v
}
return nil
}
// NextInterval will get the time ordered next interval of the given interval size from the mapper. This is a
// forward only operation from the start time passed into Begin. Will return nil when there is no more data to be read.
// If this is a raw query, interval should be the max time to hit in the query
func (l *LocalMapper) NextInterval() (interface{}, error) {
if l.cursorsEmpty || l.tmin > l.job.TMax {
return nil, nil
}
// after we call to the mapper, this will be the tmin for the next interval.
nextMin := l.tmin + l.interval
// Set the upper bound of the interval.
if l.isRaw {
l.perIntervalLimit = l.chunkSize
} else if l.interval > 0 {
// Set tmax to ensure that the interval lands on the boundary of the interval
if l.tmin%l.interval != 0 {
// the first interval in a query with a group by may be smaller than the others. This happens when they have a
// where time > clause that is in the middle of the bucket that the group by time creates. That will be the
// case on the first interval when the tmin % the interval isn't equal to zero
nextMin = l.tmin/l.interval*l.interval + l.interval
}
l.tmax = nextMin - 1
}
// Execute the map function. This local mapper acts as the iterator
val := l.mapFunc(l)
// see if all the cursors are empty
l.cursorsEmpty = true
for _, k := range l.keyBuffer {
if k != 0 {
l.cursorsEmpty = false
break
}
}
// Move the interval forward if it's not a raw query. For raw queries we use the limit to advance intervals.
if !l.isRaw {
l.tmin = nextMin
}
return val, nil
}
// Next returns the next matching timestamped value for the LocalMapper.
func (l *LocalMapper) Next() (seriesKey string, timestamp int64, value interface{}) {
for {
// if it's a raw query and we've hit the limit of the number of points to read in
// for either this chunk or for the absolute query, bail
if l.isRaw && (l.limit == 0 || l.perIntervalLimit == 0) {
return "", int64(0), nil
}
// find the minimum timestamp
min := -1
minKey := int64(math.MaxInt64)
for i, k := range l.keyBuffer {
if k != 0 && k <= l.tmax && k < minKey && k >= l.tmin {
min = i
minKey = k
}
}
// return if there is no more data in this group by interval
if min == -1 {
return "", 0, nil
}
// set the current timestamp and seriesID
timestamp = l.keyBuffer[min]
seriesKey = l.seriesKeys[min]
// decode either the value, or values we need. Also filter if necessary
var value interface{}
var err error
if l.isRaw && len(l.selectFields) > 1 {
if fieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min]); err == nil {
value = fieldsWithNames
// if there's a where clause, make sure we don't need to filter this value
if l.filters[min] != nil {
if !matchesWhere(l.filters[min], fieldsWithNames) {
value = nil
}
}
}
} else {
value, err = l.decoder.DecodeByID(l.fieldID, l.valueBuffer[min])
// if there's a where clase, see if we need to filter
if l.filters[min] != nil {
// see if the where is only on this field or on one or more other fields. if the latter, we'll have to decode everything
if len(l.whereFields) == 1 && l.whereFields[0] == l.fieldName {
if !matchesWhere(l.filters[min], map[string]interface{}{l.fieldName: value}) {
value = nil
}
} else { // decode everything
fieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min])
if err != nil || !matchesWhere(l.filters[min], fieldsWithNames) {
value = nil
}
}
}
}
// advance the cursor
nextKey, nextVal := l.cursors[min].Next()
if nextKey == nil {
l.keyBuffer[min] = 0
} else {
l.keyBuffer[min] = int64(btou64(nextKey))
}
l.valueBuffer[min] = nextVal
// if the value didn't match our filter or if we didn't find the field keep iterating
if err != nil || value == nil {
continue
}
// if it's a raw query, we always limit the amount we read in
if l.isRaw {
l.limit--
l.perIntervalLimit--
}
return seriesKey, timestamp, value
}
}
// IsEmpty returns true if either all cursors are nil or all cursors are past the passed in max time
func (l *LocalMapper) IsEmpty(tmax int64) bool {
if l.cursorsEmpty || l.limit == 0 {
return true
}
// look at the next time for each cursor
for _, t := range l.keyBuffer {
// if the time is less than the max, we haven't emptied this mapper yet
if t != 0 && t <= tmax {
return false
}
}
return true
}
// matchesFilter returns true if the value matches the where clause
func matchesWhere(f influxql.Expr, fields map[string]interface{}) bool {
if ok, _ := influxql.Eval(f, fields).(bool); !ok {
return false
}
return true
}
// btou64 converts an 8-byte slice into an uint64.
func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) }
| SetNow | identifier_name |
tx.go | package tsdb
import (
"encoding/binary"
"fmt"
"math"
"sort"
"time"
"github.com/boltdb/bolt"
"github.com/influxdb/influxdb/influxql"
"github.com/influxdb/influxdb/meta"
)
// tx represents a transaction that spans multiple shard data stores.
// This transaction will open and close all data stores atomically.
type tx struct {
now time.Time
// used by DecodeFields and FieldIDs. Only used in a raw query, which won't let you select from more than one measurement
measurement *Measurement
meta metaStore
store localStore
}
type metaStore interface {
RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error)
}
type localStore interface {
Measurement(database, name string) *Measurement
ValidateAggregateFieldsInStatement(shardID uint64, measurementName string, stmt *influxql.SelectStatement) error
Shard(shardID uint64) *Shard
}
// newTx return a new initialized Tx.
func newTx(meta metaStore, store localStore) *tx {
return &tx{
meta: meta,
store: store,
now: time.Now(),
}
}
// SetNow sets the current time for the transaction.
func (tx *tx) SetNow(now time.Time) { tx.now = now }
// CreateMappers will create a set of mappers that need to be run to execute the map phase of a MapReduceJob.
func (tx *tx) CreateMapReduceJobs(stmt *influxql.SelectStatement, tagKeys []string) ([]*influxql.MapReduceJob, error) {
jobs := []*influxql.MapReduceJob{}
for _, src := range stmt.Sources {
mm, ok := src.(*influxql.Measurement)
if !ok {
return nil, fmt.Errorf("invalid source type: %#v", src)
}
// get the index and the retention policy
rp, err := tx.meta.RetentionPolicy(mm.Database, mm.RetentionPolicy)
if err != nil {
return nil, err
}
m := tx.store.Measurement(mm.Database, mm.Name)
if m == nil {
return nil, ErrMeasurementNotFound(influxql.QuoteIdent([]string{mm.Database, "", mm.Name}...))
}
tx.measurement = m
// Validate the fields and tags asked for exist and keep track of which are in the select vs the where
var selectFields []string
var whereFields []string
var selectTags []string
for _, n := range stmt.NamesInSelect() {
if m.HasField(n) {
selectFields = append(selectFields, n)
continue
}
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in select clause: %s", n)
}
selectTags = append(selectTags, n)
tagKeys = append(tagKeys, n)
}
for _, n := range stmt.NamesInWhere() {
if n == "time" {
continue
}
if m.HasField(n) {
whereFields = append(whereFields, n)
continue
}
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in where clause: %s", n)
}
}
if len(selectFields) == 0 && len(stmt.FunctionCalls()) == 0 {
return nil, fmt.Errorf("select statement must include at least one field or function call")
}
// Validate that group by is not a field
for _, d := range stmt.Dimensions {
switch e := d.Expr.(type) {
case *influxql.VarRef:
if !m.HasTagKey(e.Val) {
return nil, fmt.Errorf("can not use field in group by clause: %s", e.Val)
}
}
}
// Grab time range from statement.
tmin, tmax := influxql.TimeRange(stmt.Condition)
if tmax.IsZero() {
tmax = tx.now
}
if tmin.IsZero() {
tmin = time.Unix(0, 0)
}
// Find shard groups within time range.
var shardGroups []*meta.ShardGroupInfo
for _, group := range rp.ShardGroups {
if group.Overlaps(tmin, tmax) {
g := group
shardGroups = append(shardGroups, &g)
}
}
if len(shardGroups) == 0 {
return nil, nil
}
// get the group by interval, if there is one
var interval int64
if d, err := stmt.GroupByInterval(); err != nil {
return nil, err
} else {
interval = d.Nanoseconds()
}
// get the sorted unique tag sets for this query.
tagSets, err := m.TagSets(stmt, tagKeys)
if err != nil {
return nil, err
}
for _, t := range tagSets {
// make a job for each tagset
job := &influxql.MapReduceJob{
MeasurementName: m.Name,
TagSet: t,
TMin: tmin.UnixNano(),
TMax: tmax.UnixNano(),
}
// make a mapper for each shard that must be hit. We may need to hit multiple shards within a shard group
var mappers []influxql.Mapper
// create mappers for each shard we need to hit
for _, sg := range shardGroups {
// TODO: implement distributed queries
if len(sg.Shards) != 1 {
return nil, fmt.Errorf("distributed queries aren't supported yet. You have a replication policy with RF < # of servers in cluster")
}
shard := tx.store.Shard(sg.Shards[0].ID)
if shard == nil {
// the store returned nil which means we haven't written any data into this shard yet, so ignore it
continue
}
// get the codec for this measuremnt. If this is nil it just means this measurement was
// never written into this shard, so we can skip it and continue.
codec := shard.FieldCodec(m.Name)
if codec == nil {
continue
}
var mapper influxql.Mapper
mapper = &LocalMapper{
seriesKeys: t.SeriesKeys,
shard: shard,
db: shard.DB(),
job: job,
decoder: codec,
filters: t.Filters,
whereFields: whereFields,
selectFields: selectFields,
selectTags: selectTags,
tmin: tmin.UnixNano(),
tmax: tmax.UnixNano(),
interval: interval,
// multiple mappers may need to be merged together to get the results
// for a raw query. So each mapper will have to read at least the
// limit plus the offset in data points to ensure we've hit our mark
limit: uint64(stmt.Limit) + uint64(stmt.Offset),
}
mappers = append(mappers, mapper)
}
job.Mappers = mappers
jobs = append(jobs, job)
}
}
// always return them in sorted order so the results from running the jobs are returned in a deterministic order
sort.Sort(influxql.MapReduceJobs(jobs))
return jobs, nil
}
// LocalMapper implements the influxql.Mapper interface for running map tasks over a shard that is local to this server
type LocalMapper struct {
cursorsEmpty bool // boolean that lets us know if the cursors are empty
decoder *FieldCodec // decoder for the raw data bytes
filters []influxql.Expr // filters for each series
cursors []*shardCursor // bolt cursors for each series id
seriesKeys []string // seriesKeys to be read from this shard
shard *Shard // original shard
db *bolt.DB // bolt store for the shard accessed by this mapper
txn *bolt.Tx // read transactions by shard id
job *influxql.MapReduceJob // the MRJob this mapper belongs to
mapFunc influxql.MapFunc // the map func
fieldID uint8 // the field ID associated with the mapFunc curently being run
fieldName string // the field name associated with the mapFunc currently being run
keyBuffer []int64 // the current timestamp key for each cursor
valueBuffer [][]byte // the current value for each cursor
tmin int64 // the min of the current group by interval being iterated over
tmax int64 // the max of the current group by interval being iterated over
additionalNames []string // additional field or tag names that might be requested from the map function
whereFields []string // field names that occur in the where clause
selectFields []string // field names that occur in the select clause
selectTags []string // tag keys that occur in the select clause
isRaw bool // if the query is a non-aggregate query
interval int64 // the group by interval of the query, if any
limit uint64 // used for raw queries for LIMIT
perIntervalLimit int // used for raw queries to determine how far into a chunk we are
chunkSize int // used for raw queries to determine how much data to read before flushing to client
}
// Open opens the LocalMapper.
func (l *LocalMapper) Open() error {
// Obtain shard lock to copy in-cache points.
l.shard.mu.Lock()
defer l.shard.mu.Unlock()
// Open the data store
txn, err := l.db.Begin(false)
if err != nil {
return err
}
l.txn = txn
// create a bolt cursor for each unique series id
l.cursors = make([]*shardCursor, len(l.seriesKeys))
for i, key := range l.seriesKeys {
// Retrieve key bucket.
b := l.txn.Bucket([]byte(key))
// Ignore if there is no bucket or points in the cache.
partitionID := WALPartition([]byte(key))
if b == nil && len(l.shard.cache[partitionID][key]) == 0 {
continue
}
// Retrieve a copy of the in-cache points for the key.
cache := make([][]byte, len(l.shard.cache[partitionID][key]))
copy(cache, l.shard.cache[partitionID][key])
// Build a cursor that merges the bucket and cache together.
cur := &shardCursor{cache: cache}
if b != nil {
cur.cursor = b.Cursor()
}
l.cursors[i] = cur
}
return nil
}
// Close closes the LocalMapper.
func (l *LocalMapper) Close() {
if l.txn != nil {
_ = l.txn.Rollback()
}
}
// Begin will set up the mapper to run the map function for a given aggregate call starting at the passed in time
func (l *LocalMapper) Begin(c *influxql.Call, startingTime int64, chunkSize int) error {
// set up the buffers. These ensure that we return data in time order
mapFunc, err := influxql.InitializeMapFunc(c)
if err != nil {
return err
}
l.mapFunc = mapFunc
l.keyBuffer = make([]int64, len(l.cursors))
l.valueBuffer = make([][]byte, len(l.cursors))
l.chunkSize = chunkSize
l.tmin = startingTime
var isCountDistinct bool
// determine if this is a raw data query with a single field, multiple fields, or an aggregate
var fieldName string
if c == nil { // its a raw data query
l.isRaw = true
if len(l.selectFields) == 1 {
fieldName = l.selectFields[0]
}
// if they haven't set a limit, just set it to the max int size
if l.limit == 0 {
l.limit = math.MaxUint64
}
} else {
// Check for calls like `derivative(mean(value), 1d)`
var nested *influxql.Call = c
if fn, ok := c.Args[0].(*influxql.Call); ok {
nested = fn
}
switch lit := nested.Args[0].(type) {
case *influxql.VarRef:
fieldName = lit.Val
case *influxql.Distinct:
if c.Name != "count" {
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
isCountDistinct = true
fieldName = lit.Val
default:
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
isCountDistinct = isCountDistinct || (c.Name == "count" && nested.Name == "distinct")
}
// set up the field info if a specific field was set for this mapper
if fieldName != "" {
fid, err := l.decoder.FieldIDByName(fieldName)
if err != nil {
switch {
case c != nil && c.Name == "distinct":
return fmt.Errorf(`%s isn't a field on measurement %s; to query the unique values for a tag use SHOW TAG VALUES FROM %[2]s WITH KEY = "%[1]s`, fieldName, l.job.MeasurementName)
case isCountDistinct:
return fmt.Errorf("%s isn't a field on measurement %s; count(distinct) on tags isn't yet supported", fieldName, l.job.MeasurementName)
}
}
l.fieldID = fid
l.fieldName = fieldName
}
// seek the bolt cursors and fill the buffers
for i, c := range l.cursors {
// this series may have never been written in this shard group (time range) so the cursor would be nil
if c == nil {
l.keyBuffer[i] = 0
l.valueBuffer[i] = nil
continue
}
k, v := c.Seek(u64tob(uint64(l.job.TMin)))
if k == nil {
l.keyBuffer[i] = 0
l.valueBuffer[i] = nil
continue
}
l.cursorsEmpty = false
t := int64(btou64(k))
l.keyBuffer[i] = t
l.valueBuffer[i] = v
}
return nil
}
// NextInterval will get the time ordered next interval of the given interval size from the mapper. This is a
// forward only operation from the start time passed into Begin. Will return nil when there is no more data to be read.
// If this is a raw query, interval should be the max time to hit in the query
func (l *LocalMapper) NextInterval() (interface{}, error) {
if l.cursorsEmpty || l.tmin > l.job.TMax {
return nil, nil
}
// after we call to the mapper, this will be the tmin for the next interval.
nextMin := l.tmin + l.interval
// Set the upper bound of the interval.
if l.isRaw {
l.perIntervalLimit = l.chunkSize
} else if l.interval > 0 {
// Set tmax to ensure that the interval lands on the boundary of the interval
if l.tmin%l.interval != 0 {
// the first interval in a query with a group by may be smaller than the others. This happens when they have a
// where time > clause that is in the middle of the bucket that the group by time creates. That will be the
// case on the first interval when the tmin % the interval isn't equal to zero
nextMin = l.tmin/l.interval*l.interval + l.interval
}
l.tmax = nextMin - 1
}
// Execute the map function. This local mapper acts as the iterator
val := l.mapFunc(l)
// see if all the cursors are empty
l.cursorsEmpty = true
for _, k := range l.keyBuffer {
if k != 0 {
l.cursorsEmpty = false
break
}
}
// Move the interval forward if it's not a raw query. For raw queries we use the limit to advance intervals.
if !l.isRaw {
l.tmin = nextMin
}
return val, nil
}
// Next returns the next matching timestamped value for the LocalMapper.
func (l *LocalMapper) Next() (seriesKey string, timestamp int64, value interface{}) {
for {
// if it's a raw query and we've hit the limit of the number of points to read in
// for either this chunk or for the absolute query, bail
if l.isRaw && (l.limit == 0 || l.perIntervalLimit == 0) {
return "", int64(0), nil
}
// find the minimum timestamp
min := -1
minKey := int64(math.MaxInt64)
for i, k := range l.keyBuffer {
if k != 0 && k <= l.tmax && k < minKey && k >= l.tmin {
min = i
minKey = k | if min == -1 {
return "", 0, nil
}
// set the current timestamp and seriesID
timestamp = l.keyBuffer[min]
seriesKey = l.seriesKeys[min]
// decode either the value, or values we need. Also filter if necessary
var value interface{}
var err error
if l.isRaw && len(l.selectFields) > 1 {
if fieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min]); err == nil {
value = fieldsWithNames
// if there's a where clause, make sure we don't need to filter this value
if l.filters[min] != nil {
if !matchesWhere(l.filters[min], fieldsWithNames) {
value = nil
}
}
}
} else {
value, err = l.decoder.DecodeByID(l.fieldID, l.valueBuffer[min])
// if there's a where clase, see if we need to filter
if l.filters[min] != nil {
// see if the where is only on this field or on one or more other fields. if the latter, we'll have to decode everything
if len(l.whereFields) == 1 && l.whereFields[0] == l.fieldName {
if !matchesWhere(l.filters[min], map[string]interface{}{l.fieldName: value}) {
value = nil
}
} else { // decode everything
fieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min])
if err != nil || !matchesWhere(l.filters[min], fieldsWithNames) {
value = nil
}
}
}
}
// advance the cursor
nextKey, nextVal := l.cursors[min].Next()
if nextKey == nil {
l.keyBuffer[min] = 0
} else {
l.keyBuffer[min] = int64(btou64(nextKey))
}
l.valueBuffer[min] = nextVal
// if the value didn't match our filter or if we didn't find the field keep iterating
if err != nil || value == nil {
continue
}
// if it's a raw query, we always limit the amount we read in
if l.isRaw {
l.limit--
l.perIntervalLimit--
}
return seriesKey, timestamp, value
}
}
// IsEmpty returns true if either all cursors are nil or all cursors are past the passed in max time
func (l *LocalMapper) IsEmpty(tmax int64) bool {
if l.cursorsEmpty || l.limit == 0 {
return true
}
// look at the next time for each cursor
for _, t := range l.keyBuffer {
// if the time is less than the max, we haven't emptied this mapper yet
if t != 0 && t <= tmax {
return false
}
}
return true
}
// matchesFilter returns true if the value matches the where clause
func matchesWhere(f influxql.Expr, fields map[string]interface{}) bool {
if ok, _ := influxql.Eval(f, fields).(bool); !ok {
return false
}
return true
}
// btou64 converts an 8-byte slice into an uint64.
func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } | }
}
// return if there is no more data in this group by interval | random_line_split |
tx.go | package tsdb
import (
"encoding/binary"
"fmt"
"math"
"sort"
"time"
"github.com/boltdb/bolt"
"github.com/influxdb/influxdb/influxql"
"github.com/influxdb/influxdb/meta"
)
// tx represents a transaction that spans multiple shard data stores.
// This transaction will open and close all data stores atomically.
type tx struct {
now time.Time
// used by DecodeFields and FieldIDs. Only used in a raw query, which won't let you select from more than one measurement
measurement *Measurement
meta metaStore
store localStore
}
type metaStore interface {
RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error)
}
type localStore interface {
Measurement(database, name string) *Measurement
ValidateAggregateFieldsInStatement(shardID uint64, measurementName string, stmt *influxql.SelectStatement) error
Shard(shardID uint64) *Shard
}
// newTx return a new initialized Tx.
func newTx(meta metaStore, store localStore) *tx |
// SetNow sets the current time for the transaction.
func (tx *tx) SetNow(now time.Time) { tx.now = now }
// CreateMappers will create a set of mappers that need to be run to execute the map phase of a MapReduceJob.
func (tx *tx) CreateMapReduceJobs(stmt *influxql.SelectStatement, tagKeys []string) ([]*influxql.MapReduceJob, error) {
jobs := []*influxql.MapReduceJob{}
for _, src := range stmt.Sources {
mm, ok := src.(*influxql.Measurement)
if !ok {
return nil, fmt.Errorf("invalid source type: %#v", src)
}
// get the index and the retention policy
rp, err := tx.meta.RetentionPolicy(mm.Database, mm.RetentionPolicy)
if err != nil {
return nil, err
}
m := tx.store.Measurement(mm.Database, mm.Name)
if m == nil {
return nil, ErrMeasurementNotFound(influxql.QuoteIdent([]string{mm.Database, "", mm.Name}...))
}
tx.measurement = m
// Validate the fields and tags asked for exist and keep track of which are in the select vs the where
var selectFields []string
var whereFields []string
var selectTags []string
for _, n := range stmt.NamesInSelect() {
if m.HasField(n) {
selectFields = append(selectFields, n)
continue
}
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in select clause: %s", n)
}
selectTags = append(selectTags, n)
tagKeys = append(tagKeys, n)
}
for _, n := range stmt.NamesInWhere() {
if n == "time" {
continue
}
if m.HasField(n) {
whereFields = append(whereFields, n)
continue
}
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in where clause: %s", n)
}
}
if len(selectFields) == 0 && len(stmt.FunctionCalls()) == 0 {
return nil, fmt.Errorf("select statement must include at least one field or function call")
}
// Validate that group by is not a field
for _, d := range stmt.Dimensions {
switch e := d.Expr.(type) {
case *influxql.VarRef:
if !m.HasTagKey(e.Val) {
return nil, fmt.Errorf("can not use field in group by clause: %s", e.Val)
}
}
}
// Grab time range from statement.
tmin, tmax := influxql.TimeRange(stmt.Condition)
if tmax.IsZero() {
tmax = tx.now
}
if tmin.IsZero() {
tmin = time.Unix(0, 0)
}
// Find shard groups within time range.
var shardGroups []*meta.ShardGroupInfo
for _, group := range rp.ShardGroups {
if group.Overlaps(tmin, tmax) {
g := group
shardGroups = append(shardGroups, &g)
}
}
if len(shardGroups) == 0 {
return nil, nil
}
// get the group by interval, if there is one
var interval int64
if d, err := stmt.GroupByInterval(); err != nil {
return nil, err
} else {
interval = d.Nanoseconds()
}
// get the sorted unique tag sets for this query.
tagSets, err := m.TagSets(stmt, tagKeys)
if err != nil {
return nil, err
}
for _, t := range tagSets {
// make a job for each tagset
job := &influxql.MapReduceJob{
MeasurementName: m.Name,
TagSet: t,
TMin: tmin.UnixNano(),
TMax: tmax.UnixNano(),
}
// make a mapper for each shard that must be hit. We may need to hit multiple shards within a shard group
var mappers []influxql.Mapper
// create mappers for each shard we need to hit
for _, sg := range shardGroups {
// TODO: implement distributed queries
if len(sg.Shards) != 1 {
return nil, fmt.Errorf("distributed queries aren't supported yet. You have a replication policy with RF < # of servers in cluster")
}
shard := tx.store.Shard(sg.Shards[0].ID)
if shard == nil {
// the store returned nil which means we haven't written any data into this shard yet, so ignore it
continue
}
// get the codec for this measuremnt. If this is nil it just means this measurement was
// never written into this shard, so we can skip it and continue.
codec := shard.FieldCodec(m.Name)
if codec == nil {
continue
}
var mapper influxql.Mapper
mapper = &LocalMapper{
seriesKeys: t.SeriesKeys,
shard: shard,
db: shard.DB(),
job: job,
decoder: codec,
filters: t.Filters,
whereFields: whereFields,
selectFields: selectFields,
selectTags: selectTags,
tmin: tmin.UnixNano(),
tmax: tmax.UnixNano(),
interval: interval,
// multiple mappers may need to be merged together to get the results
// for a raw query. So each mapper will have to read at least the
// limit plus the offset in data points to ensure we've hit our mark
limit: uint64(stmt.Limit) + uint64(stmt.Offset),
}
mappers = append(mappers, mapper)
}
job.Mappers = mappers
jobs = append(jobs, job)
}
}
// always return them in sorted order so the results from running the jobs are returned in a deterministic order
sort.Sort(influxql.MapReduceJobs(jobs))
return jobs, nil
}
// LocalMapper implements the influxql.Mapper interface for running map tasks over a shard that is local to this server
type LocalMapper struct {
cursorsEmpty bool // boolean that lets us know if the cursors are empty
decoder *FieldCodec // decoder for the raw data bytes
filters []influxql.Expr // filters for each series
cursors []*shardCursor // bolt cursors for each series id
seriesKeys []string // seriesKeys to be read from this shard
shard *Shard // original shard
db *bolt.DB // bolt store for the shard accessed by this mapper
txn *bolt.Tx // read transactions by shard id
job *influxql.MapReduceJob // the MRJob this mapper belongs to
mapFunc influxql.MapFunc // the map func
fieldID uint8 // the field ID associated with the mapFunc curently being run
fieldName string // the field name associated with the mapFunc currently being run
keyBuffer []int64 // the current timestamp key for each cursor
valueBuffer [][]byte // the current value for each cursor
tmin int64 // the min of the current group by interval being iterated over
tmax int64 // the max of the current group by interval being iterated over
additionalNames []string // additional field or tag names that might be requested from the map function
whereFields []string // field names that occur in the where clause
selectFields []string // field names that occur in the select clause
selectTags []string // tag keys that occur in the select clause
isRaw bool // if the query is a non-aggregate query
interval int64 // the group by interval of the query, if any
limit uint64 // used for raw queries for LIMIT
perIntervalLimit int // used for raw queries to determine how far into a chunk we are
chunkSize int // used for raw queries to determine how much data to read before flushing to client
}
// Open opens the LocalMapper.
func (l *LocalMapper) Open() error {
// Obtain shard lock to copy in-cache points.
l.shard.mu.Lock()
defer l.shard.mu.Unlock()
// Open the data store
txn, err := l.db.Begin(false)
if err != nil {
return err
}
l.txn = txn
// create a bolt cursor for each unique series id
l.cursors = make([]*shardCursor, len(l.seriesKeys))
for i, key := range l.seriesKeys {
// Retrieve key bucket.
b := l.txn.Bucket([]byte(key))
// Ignore if there is no bucket or points in the cache.
partitionID := WALPartition([]byte(key))
if b == nil && len(l.shard.cache[partitionID][key]) == 0 {
continue
}
// Retrieve a copy of the in-cache points for the key.
cache := make([][]byte, len(l.shard.cache[partitionID][key]))
copy(cache, l.shard.cache[partitionID][key])
// Build a cursor that merges the bucket and cache together.
cur := &shardCursor{cache: cache}
if b != nil {
cur.cursor = b.Cursor()
}
l.cursors[i] = cur
}
return nil
}
// Close closes the LocalMapper.
func (l *LocalMapper) Close() {
if l.txn != nil {
_ = l.txn.Rollback()
}
}
// Begin will set up the mapper to run the map function for a given aggregate call starting at the passed in time
func (l *LocalMapper) Begin(c *influxql.Call, startingTime int64, chunkSize int) error {
// set up the buffers. These ensure that we return data in time order
mapFunc, err := influxql.InitializeMapFunc(c)
if err != nil {
return err
}
l.mapFunc = mapFunc
l.keyBuffer = make([]int64, len(l.cursors))
l.valueBuffer = make([][]byte, len(l.cursors))
l.chunkSize = chunkSize
l.tmin = startingTime
var isCountDistinct bool
// determine if this is a raw data query with a single field, multiple fields, or an aggregate
var fieldName string
if c == nil { // its a raw data query
l.isRaw = true
if len(l.selectFields) == 1 {
fieldName = l.selectFields[0]
}
// if they haven't set a limit, just set it to the max int size
if l.limit == 0 {
l.limit = math.MaxUint64
}
} else {
// Check for calls like `derivative(mean(value), 1d)`
var nested *influxql.Call = c
if fn, ok := c.Args[0].(*influxql.Call); ok {
nested = fn
}
switch lit := nested.Args[0].(type) {
case *influxql.VarRef:
fieldName = lit.Val
case *influxql.Distinct:
if c.Name != "count" {
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
isCountDistinct = true
fieldName = lit.Val
default:
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
isCountDistinct = isCountDistinct || (c.Name == "count" && nested.Name == "distinct")
}
// set up the field info if a specific field was set for this mapper
if fieldName != "" {
fid, err := l.decoder.FieldIDByName(fieldName)
if err != nil {
switch {
case c != nil && c.Name == "distinct":
return fmt.Errorf(`%s isn't a field on measurement %s; to query the unique values for a tag use SHOW TAG VALUES FROM %[2]s WITH KEY = "%[1]s`, fieldName, l.job.MeasurementName)
case isCountDistinct:
return fmt.Errorf("%s isn't a field on measurement %s; count(distinct) on tags isn't yet supported", fieldName, l.job.MeasurementName)
}
}
l.fieldID = fid
l.fieldName = fieldName
}
// seek the bolt cursors and fill the buffers
for i, c := range l.cursors {
// this series may have never been written in this shard group (time range) so the cursor would be nil
if c == nil {
l.keyBuffer[i] = 0
l.valueBuffer[i] = nil
continue
}
k, v := c.Seek(u64tob(uint64(l.job.TMin)))
if k == nil {
l.keyBuffer[i] = 0
l.valueBuffer[i] = nil
continue
}
l.cursorsEmpty = false
t := int64(btou64(k))
l.keyBuffer[i] = t
l.valueBuffer[i] = v
}
return nil
}
// NextInterval will get the time ordered next interval of the given interval size from the mapper. This is a
// forward only operation from the start time passed into Begin. Will return nil when there is no more data to be read.
// If this is a raw query, interval should be the max time to hit in the query
func (l *LocalMapper) NextInterval() (interface{}, error) {
if l.cursorsEmpty || l.tmin > l.job.TMax {
return nil, nil
}
// after we call to the mapper, this will be the tmin for the next interval.
nextMin := l.tmin + l.interval
// Set the upper bound of the interval.
if l.isRaw {
l.perIntervalLimit = l.chunkSize
} else if l.interval > 0 {
// Set tmax to ensure that the interval lands on the boundary of the interval
if l.tmin%l.interval != 0 {
// the first interval in a query with a group by may be smaller than the others. This happens when they have a
// where time > clause that is in the middle of the bucket that the group by time creates. That will be the
// case on the first interval when the tmin % the interval isn't equal to zero
nextMin = l.tmin/l.interval*l.interval + l.interval
}
l.tmax = nextMin - 1
}
// Execute the map function. This local mapper acts as the iterator
val := l.mapFunc(l)
// see if all the cursors are empty
l.cursorsEmpty = true
for _, k := range l.keyBuffer {
if k != 0 {
l.cursorsEmpty = false
break
}
}
// Move the interval forward if it's not a raw query. For raw queries we use the limit to advance intervals.
if !l.isRaw {
l.tmin = nextMin
}
return val, nil
}
// Next returns the next matching timestamped value for the LocalMapper.
func (l *LocalMapper) Next() (seriesKey string, timestamp int64, value interface{}) {
for {
// if it's a raw query and we've hit the limit of the number of points to read in
// for either this chunk or for the absolute query, bail
if l.isRaw && (l.limit == 0 || l.perIntervalLimit == 0) {
return "", int64(0), nil
}
// find the minimum timestamp
min := -1
minKey := int64(math.MaxInt64)
for i, k := range l.keyBuffer {
if k != 0 && k <= l.tmax && k < minKey && k >= l.tmin {
min = i
minKey = k
}
}
// return if there is no more data in this group by interval
if min == -1 {
return "", 0, nil
}
// set the current timestamp and seriesID
timestamp = l.keyBuffer[min]
seriesKey = l.seriesKeys[min]
// decode either the value, or values we need. Also filter if necessary
var value interface{}
var err error
if l.isRaw && len(l.selectFields) > 1 {
if fieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min]); err == nil {
value = fieldsWithNames
// if there's a where clause, make sure we don't need to filter this value
if l.filters[min] != nil {
if !matchesWhere(l.filters[min], fieldsWithNames) {
value = nil
}
}
}
} else {
value, err = l.decoder.DecodeByID(l.fieldID, l.valueBuffer[min])
// if there's a where clase, see if we need to filter
if l.filters[min] != nil {
// see if the where is only on this field or on one or more other fields. if the latter, we'll have to decode everything
if len(l.whereFields) == 1 && l.whereFields[0] == l.fieldName {
if !matchesWhere(l.filters[min], map[string]interface{}{l.fieldName: value}) {
value = nil
}
} else { // decode everything
fieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min])
if err != nil || !matchesWhere(l.filters[min], fieldsWithNames) {
value = nil
}
}
}
}
// advance the cursor
nextKey, nextVal := l.cursors[min].Next()
if nextKey == nil {
l.keyBuffer[min] = 0
} else {
l.keyBuffer[min] = int64(btou64(nextKey))
}
l.valueBuffer[min] = nextVal
// if the value didn't match our filter or if we didn't find the field keep iterating
if err != nil || value == nil {
continue
}
// if it's a raw query, we always limit the amount we read in
if l.isRaw {
l.limit--
l.perIntervalLimit--
}
return seriesKey, timestamp, value
}
}
// IsEmpty returns true if either all cursors are nil or all cursors are past the passed in max time
func (l *LocalMapper) IsEmpty(tmax int64) bool {
if l.cursorsEmpty || l.limit == 0 {
return true
}
// look at the next time for each cursor
for _, t := range l.keyBuffer {
// if the time is less than the max, we haven't emptied this mapper yet
if t != 0 && t <= tmax {
return false
}
}
return true
}
// matchesFilter returns true if the value matches the where clause
func matchesWhere(f influxql.Expr, fields map[string]interface{}) bool {
if ok, _ := influxql.Eval(f, fields).(bool); !ok {
return false
}
return true
}
// btou64 converts an 8-byte slice into an uint64.
func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) }
| {
return &tx{
meta: meta,
store: store,
now: time.Now(),
}
} | identifier_body |
tx.go | package tsdb
import (
"encoding/binary"
"fmt"
"math"
"sort"
"time"
"github.com/boltdb/bolt"
"github.com/influxdb/influxdb/influxql"
"github.com/influxdb/influxdb/meta"
)
// tx represents a transaction that spans multiple shard data stores.
// This transaction will open and close all data stores atomically.
type tx struct {
now time.Time
// used by DecodeFields and FieldIDs. Only used in a raw query, which won't let you select from more than one measurement
measurement *Measurement
meta metaStore
store localStore
}
type metaStore interface {
RetentionPolicy(database, name string) (rpi *meta.RetentionPolicyInfo, err error)
}
type localStore interface {
Measurement(database, name string) *Measurement
ValidateAggregateFieldsInStatement(shardID uint64, measurementName string, stmt *influxql.SelectStatement) error
Shard(shardID uint64) *Shard
}
// newTx return a new initialized Tx.
func newTx(meta metaStore, store localStore) *tx {
return &tx{
meta: meta,
store: store,
now: time.Now(),
}
}
// SetNow sets the current time for the transaction.
func (tx *tx) SetNow(now time.Time) { tx.now = now }
// CreateMappers will create a set of mappers that need to be run to execute the map phase of a MapReduceJob.
func (tx *tx) CreateMapReduceJobs(stmt *influxql.SelectStatement, tagKeys []string) ([]*influxql.MapReduceJob, error) {
jobs := []*influxql.MapReduceJob{}
for _, src := range stmt.Sources {
mm, ok := src.(*influxql.Measurement)
if !ok {
return nil, fmt.Errorf("invalid source type: %#v", src)
}
// get the index and the retention policy
rp, err := tx.meta.RetentionPolicy(mm.Database, mm.RetentionPolicy)
if err != nil {
return nil, err
}
m := tx.store.Measurement(mm.Database, mm.Name)
if m == nil {
return nil, ErrMeasurementNotFound(influxql.QuoteIdent([]string{mm.Database, "", mm.Name}...))
}
tx.measurement = m
// Validate the fields and tags asked for exist and keep track of which are in the select vs the where
var selectFields []string
var whereFields []string
var selectTags []string
for _, n := range stmt.NamesInSelect() {
if m.HasField(n) |
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in select clause: %s", n)
}
selectTags = append(selectTags, n)
tagKeys = append(tagKeys, n)
}
for _, n := range stmt.NamesInWhere() {
if n == "time" {
continue
}
if m.HasField(n) {
whereFields = append(whereFields, n)
continue
}
if !m.HasTagKey(n) {
return nil, fmt.Errorf("unknown field or tag name in where clause: %s", n)
}
}
if len(selectFields) == 0 && len(stmt.FunctionCalls()) == 0 {
return nil, fmt.Errorf("select statement must include at least one field or function call")
}
// Validate that group by is not a field
for _, d := range stmt.Dimensions {
switch e := d.Expr.(type) {
case *influxql.VarRef:
if !m.HasTagKey(e.Val) {
return nil, fmt.Errorf("can not use field in group by clause: %s", e.Val)
}
}
}
// Grab time range from statement.
tmin, tmax := influxql.TimeRange(stmt.Condition)
if tmax.IsZero() {
tmax = tx.now
}
if tmin.IsZero() {
tmin = time.Unix(0, 0)
}
// Find shard groups within time range.
var shardGroups []*meta.ShardGroupInfo
for _, group := range rp.ShardGroups {
if group.Overlaps(tmin, tmax) {
g := group
shardGroups = append(shardGroups, &g)
}
}
if len(shardGroups) == 0 {
return nil, nil
}
// get the group by interval, if there is one
var interval int64
if d, err := stmt.GroupByInterval(); err != nil {
return nil, err
} else {
interval = d.Nanoseconds()
}
// get the sorted unique tag sets for this query.
tagSets, err := m.TagSets(stmt, tagKeys)
if err != nil {
return nil, err
}
for _, t := range tagSets {
// make a job for each tagset
job := &influxql.MapReduceJob{
MeasurementName: m.Name,
TagSet: t,
TMin: tmin.UnixNano(),
TMax: tmax.UnixNano(),
}
// make a mapper for each shard that must be hit. We may need to hit multiple shards within a shard group
var mappers []influxql.Mapper
// create mappers for each shard we need to hit
for _, sg := range shardGroups {
// TODO: implement distributed queries
if len(sg.Shards) != 1 {
return nil, fmt.Errorf("distributed queries aren't supported yet. You have a replication policy with RF < # of servers in cluster")
}
shard := tx.store.Shard(sg.Shards[0].ID)
if shard == nil {
// the store returned nil which means we haven't written any data into this shard yet, so ignore it
continue
}
// get the codec for this measuremnt. If this is nil it just means this measurement was
// never written into this shard, so we can skip it and continue.
codec := shard.FieldCodec(m.Name)
if codec == nil {
continue
}
var mapper influxql.Mapper
mapper = &LocalMapper{
seriesKeys: t.SeriesKeys,
shard: shard,
db: shard.DB(),
job: job,
decoder: codec,
filters: t.Filters,
whereFields: whereFields,
selectFields: selectFields,
selectTags: selectTags,
tmin: tmin.UnixNano(),
tmax: tmax.UnixNano(),
interval: interval,
// multiple mappers may need to be merged together to get the results
// for a raw query. So each mapper will have to read at least the
// limit plus the offset in data points to ensure we've hit our mark
limit: uint64(stmt.Limit) + uint64(stmt.Offset),
}
mappers = append(mappers, mapper)
}
job.Mappers = mappers
jobs = append(jobs, job)
}
}
// always return them in sorted order so the results from running the jobs are returned in a deterministic order
sort.Sort(influxql.MapReduceJobs(jobs))
return jobs, nil
}
// LocalMapper implements the influxql.Mapper interface for running map tasks over a shard that is local to this server
type LocalMapper struct {
cursorsEmpty bool // boolean that lets us know if the cursors are empty
decoder *FieldCodec // decoder for the raw data bytes
filters []influxql.Expr // filters for each series
cursors []*shardCursor // bolt cursors for each series id
seriesKeys []string // seriesKeys to be read from this shard
shard *Shard // original shard
db *bolt.DB // bolt store for the shard accessed by this mapper
txn *bolt.Tx // read transactions by shard id
job *influxql.MapReduceJob // the MRJob this mapper belongs to
mapFunc influxql.MapFunc // the map func
fieldID uint8 // the field ID associated with the mapFunc curently being run
fieldName string // the field name associated with the mapFunc currently being run
keyBuffer []int64 // the current timestamp key for each cursor
valueBuffer [][]byte // the current value for each cursor
tmin int64 // the min of the current group by interval being iterated over
tmax int64 // the max of the current group by interval being iterated over
additionalNames []string // additional field or tag names that might be requested from the map function
whereFields []string // field names that occur in the where clause
selectFields []string // field names that occur in the select clause
selectTags []string // tag keys that occur in the select clause
isRaw bool // if the query is a non-aggregate query
interval int64 // the group by interval of the query, if any
limit uint64 // used for raw queries for LIMIT
perIntervalLimit int // used for raw queries to determine how far into a chunk we are
chunkSize int // used for raw queries to determine how much data to read before flushing to client
}
// Open opens the LocalMapper.
func (l *LocalMapper) Open() error {
// Obtain shard lock to copy in-cache points.
l.shard.mu.Lock()
defer l.shard.mu.Unlock()
// Open the data store
txn, err := l.db.Begin(false)
if err != nil {
return err
}
l.txn = txn
// create a bolt cursor for each unique series id
l.cursors = make([]*shardCursor, len(l.seriesKeys))
for i, key := range l.seriesKeys {
// Retrieve key bucket.
b := l.txn.Bucket([]byte(key))
// Ignore if there is no bucket or points in the cache.
partitionID := WALPartition([]byte(key))
if b == nil && len(l.shard.cache[partitionID][key]) == 0 {
continue
}
// Retrieve a copy of the in-cache points for the key.
cache := make([][]byte, len(l.shard.cache[partitionID][key]))
copy(cache, l.shard.cache[partitionID][key])
// Build a cursor that merges the bucket and cache together.
cur := &shardCursor{cache: cache}
if b != nil {
cur.cursor = b.Cursor()
}
l.cursors[i] = cur
}
return nil
}
// Close closes the LocalMapper.
func (l *LocalMapper) Close() {
if l.txn != nil {
_ = l.txn.Rollback()
}
}
// Begin will set up the mapper to run the map function for a given aggregate call starting at the passed in time
func (l *LocalMapper) Begin(c *influxql.Call, startingTime int64, chunkSize int) error {
// set up the buffers. These ensure that we return data in time order
mapFunc, err := influxql.InitializeMapFunc(c)
if err != nil {
return err
}
l.mapFunc = mapFunc
l.keyBuffer = make([]int64, len(l.cursors))
l.valueBuffer = make([][]byte, len(l.cursors))
l.chunkSize = chunkSize
l.tmin = startingTime
var isCountDistinct bool
// determine if this is a raw data query with a single field, multiple fields, or an aggregate
var fieldName string
if c == nil { // its a raw data query
l.isRaw = true
if len(l.selectFields) == 1 {
fieldName = l.selectFields[0]
}
// if they haven't set a limit, just set it to the max int size
if l.limit == 0 {
l.limit = math.MaxUint64
}
} else {
// Check for calls like `derivative(mean(value), 1d)`
var nested *influxql.Call = c
if fn, ok := c.Args[0].(*influxql.Call); ok {
nested = fn
}
switch lit := nested.Args[0].(type) {
case *influxql.VarRef:
fieldName = lit.Val
case *influxql.Distinct:
if c.Name != "count" {
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
isCountDistinct = true
fieldName = lit.Val
default:
return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
}
isCountDistinct = isCountDistinct || (c.Name == "count" && nested.Name == "distinct")
}
// set up the field info if a specific field was set for this mapper
if fieldName != "" {
fid, err := l.decoder.FieldIDByName(fieldName)
if err != nil {
switch {
case c != nil && c.Name == "distinct":
return fmt.Errorf(`%s isn't a field on measurement %s; to query the unique values for a tag use SHOW TAG VALUES FROM %[2]s WITH KEY = "%[1]s`, fieldName, l.job.MeasurementName)
case isCountDistinct:
return fmt.Errorf("%s isn't a field on measurement %s; count(distinct) on tags isn't yet supported", fieldName, l.job.MeasurementName)
}
}
l.fieldID = fid
l.fieldName = fieldName
}
// seek the bolt cursors and fill the buffers
for i, c := range l.cursors {
// this series may have never been written in this shard group (time range) so the cursor would be nil
if c == nil {
l.keyBuffer[i] = 0
l.valueBuffer[i] = nil
continue
}
k, v := c.Seek(u64tob(uint64(l.job.TMin)))
if k == nil {
l.keyBuffer[i] = 0
l.valueBuffer[i] = nil
continue
}
l.cursorsEmpty = false
t := int64(btou64(k))
l.keyBuffer[i] = t
l.valueBuffer[i] = v
}
return nil
}
// NextInterval will get the time ordered next interval of the given interval size from the mapper. This is a
// forward only operation from the start time passed into Begin. Will return nil when there is no more data to be read.
// If this is a raw query, interval should be the max time to hit in the query
func (l *LocalMapper) NextInterval() (interface{}, error) {
if l.cursorsEmpty || l.tmin > l.job.TMax {
return nil, nil
}
// after we call to the mapper, this will be the tmin for the next interval.
nextMin := l.tmin + l.interval
// Set the upper bound of the interval.
if l.isRaw {
l.perIntervalLimit = l.chunkSize
} else if l.interval > 0 {
// Set tmax to ensure that the interval lands on the boundary of the interval
if l.tmin%l.interval != 0 {
// the first interval in a query with a group by may be smaller than the others. This happens when they have a
// where time > clause that is in the middle of the bucket that the group by time creates. That will be the
// case on the first interval when the tmin % the interval isn't equal to zero
nextMin = l.tmin/l.interval*l.interval + l.interval
}
l.tmax = nextMin - 1
}
// Execute the map function. This local mapper acts as the iterator
val := l.mapFunc(l)
// see if all the cursors are empty
l.cursorsEmpty = true
for _, k := range l.keyBuffer {
if k != 0 {
l.cursorsEmpty = false
break
}
}
// Move the interval forward if it's not a raw query. For raw queries we use the limit to advance intervals.
if !l.isRaw {
l.tmin = nextMin
}
return val, nil
}
// Next returns the next matching timestamped value for the LocalMapper.
func (l *LocalMapper) Next() (seriesKey string, timestamp int64, value interface{}) {
for {
// if it's a raw query and we've hit the limit of the number of points to read in
// for either this chunk or for the absolute query, bail
if l.isRaw && (l.limit == 0 || l.perIntervalLimit == 0) {
return "", int64(0), nil
}
// find the minimum timestamp
min := -1
minKey := int64(math.MaxInt64)
for i, k := range l.keyBuffer {
if k != 0 && k <= l.tmax && k < minKey && k >= l.tmin {
min = i
minKey = k
}
}
// return if there is no more data in this group by interval
if min == -1 {
return "", 0, nil
}
// set the current timestamp and seriesID
timestamp = l.keyBuffer[min]
seriesKey = l.seriesKeys[min]
// decode either the value, or values we need. Also filter if necessary
var value interface{}
var err error
if l.isRaw && len(l.selectFields) > 1 {
if fieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min]); err == nil {
value = fieldsWithNames
// if there's a where clause, make sure we don't need to filter this value
if l.filters[min] != nil {
if !matchesWhere(l.filters[min], fieldsWithNames) {
value = nil
}
}
}
} else {
value, err = l.decoder.DecodeByID(l.fieldID, l.valueBuffer[min])
// if there's a where clase, see if we need to filter
if l.filters[min] != nil {
// see if the where is only on this field or on one or more other fields. if the latter, we'll have to decode everything
if len(l.whereFields) == 1 && l.whereFields[0] == l.fieldName {
if !matchesWhere(l.filters[min], map[string]interface{}{l.fieldName: value}) {
value = nil
}
} else { // decode everything
fieldsWithNames, err := l.decoder.DecodeFieldsWithNames(l.valueBuffer[min])
if err != nil || !matchesWhere(l.filters[min], fieldsWithNames) {
value = nil
}
}
}
}
// advance the cursor
nextKey, nextVal := l.cursors[min].Next()
if nextKey == nil {
l.keyBuffer[min] = 0
} else {
l.keyBuffer[min] = int64(btou64(nextKey))
}
l.valueBuffer[min] = nextVal
// if the value didn't match our filter or if we didn't find the field keep iterating
if err != nil || value == nil {
continue
}
// if it's a raw query, we always limit the amount we read in
if l.isRaw {
l.limit--
l.perIntervalLimit--
}
return seriesKey, timestamp, value
}
}
// IsEmpty returns true if either all cursors are nil or all cursors are past the passed in max time
func (l *LocalMapper) IsEmpty(tmax int64) bool {
if l.cursorsEmpty || l.limit == 0 {
return true
}
// look at the next time for each cursor
for _, t := range l.keyBuffer {
// if the time is less than the max, we haven't emptied this mapper yet
if t != 0 && t <= tmax {
return false
}
}
return true
}
// matchesFilter returns true if the value matches the where clause
func matchesWhere(f influxql.Expr, fields map[string]interface{}) bool {
if ok, _ := influxql.Eval(f, fields).(bool); !ok {
return false
}
return true
}
// btou64 converts an 8-byte slice into an uint64.
func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) }
| {
selectFields = append(selectFields, n)
continue
} | conditional_block |
testing.py | """Tasks related to testing code"""
import logging
import json
import os
import re
from inspect import getfile
from pathlib import Path
from time import sleep
from typing import Callable, List, Optional, Union, Dict
from unittest import mock
from functools import wraps
from requests import Response, ConnectionError
from tamr_unify_client.operation import Operation
from tamr_toolbox import utils
LOGGER = logging.getLogger(__name__)
WINDOWS_RESERVED_CHARACTER_MAP = {
"<": "lt",
">": "gt",
":": "colon",
'"': "dquote",
"/": "fslash",
"\\": "bslash",
"|": "pipe",
"?": "qmark",
"*": "asterisk",
}
def _response_to_json(resp: Response, ip_dict: Dict[str, int]) -> str:
"""Converts a Response object into json string readable by the responses mocking library
Args:
resp: Response from a Tamr API call
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
Returns:
The response represented as a json string
"""
if resp.encoding is None:
resp.encoding = "utf-8"
resp_log = {
"method": resp.request.method,
"url": _anonymize_url(resp.request.url, ip_dict),
"status": resp.status_code,
"content_type": resp.headers.get("Content-Type"),
"body": resp.text,
}
return json.dumps(resp_log, ensure_ascii=False)
def _anonymize_url(url: str, ip_dict: Dict[str, int]) -> str:
"""Returns a anonymized url. Updates the dictionary inplace if a new ip is encountered
Args:
url: A URL
ip_dict: Previously encountered IP addresses and an assigned numeric value
Returns:
URL with the IP address anonymized
"""
regex_match = re.match(r"(?i)(^https?://)(.*?)([/:].*$)", url)
ip = regex_match.group(2)
try:
num = ip_dict[ip]
except KeyError:
ip_dict[ip] = len(ip_dict.values()) + 1
num = ip_dict[ip]
return f"{regex_match.group(1)}ip-{num:05d}{regex_match.group(3)}"
def _collect_operation_calls(
*, response: Response, poll_interval_seconds: int = 3
) -> List[Response]:
"""If the provided response is an Operation, wait for the operation to complete and
return responses related to that operation.
Args:
response: A previous Response generated from the same Tamr client
poll_interval_seconds: Time interval (in seconds) between subsequent polls
Returns:
Responses related to polling the operation
"""
client = utils.client._from_response(response)
op = Operation.from_response(client, response)
LOGGER.info(f"Waiting for operation to complete: {op}")
request_while_pending = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
while op.state == "PENDING":
op = op.poll()
sleep(poll_interval_seconds)
request_while_running = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
op.wait()
request_when_complete = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
return [request_while_pending, request_while_running, request_when_complete]
def _log_response(*, log_path: Path, ip_dict: Dict[str, int], response: Response) -> None:
"""Appends a response to a file. If the response returned is
a Tamr Operation, poll the operation until complete and log those responses as well
Args:
log_path: File to write the response to
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
response: The response to log
"""
LOGGER.info(f"logged request: {response.url}")
with log_path.open(mode="a", encoding="utf-8") as f:
all_responses = [response]
# Poll and wait for operations, if applicable
is_operation_request = bool(
re.match(re.compile(".*/api/versioned/v1/operations/.*"), response.url)
)
is_get_request = response.request.method == "GET"
if is_get_request and is_operation_request:
wait_resp = _collect_operation_calls(response=response)
all_responses.extend(wait_resp)
all_json = [_response_to_json(r, ip_dict) for r in all_responses]
f.writelines([f"{j}\n" for j in all_json])
def _build_response_log_path(
*, test_func: Callable, response_logs_dir: Optional[Union[str, Path]], **kwargs,
) -> Path:
"""Returns a file path for API response logs for a given test and test parameters
Args:
test_func: The test function
**kwargs: Arguments to the test function
Returns:
File path for the API response logs
"""
# Convert test arguments and their values to a string, skipping ignored arguments
test_params = "_".join([f"{k}={v}" for k, v in {**kwargs}.items()])
# Remove reserved characters from ndjson name
for char in WINDOWS_RESERVED_CHARACTER_MAP:
test_params = test_params.replace(char, WINDOWS_RESERVED_CHARACTER_MAP[char])
if len(test_params) > 0:
test_params = "__" + test_params
if response_logs_dir is None:
# If no directory is provided, create a directory with the name of the test file
# in a directory called "response_logs" located in the same directory as the test file
dir_matcher = re.match(r"(.*)(?:\\|/)(.*).py", str(Path(getfile(test_func))))
response_logs_dir = f"{dir_matcher.group(1)}/response_logs/{dir_matcher.group(2)}"
return Path(f"{response_logs_dir}/{test_func.__name__}{test_params}.ndjson")
def mock_api(
*, response_logs_dir: Optional[Union[str, Path]] = None, enforce_online_test=False
) -> Callable:
"""Decorator for `pytest` tests that mocks API requests by reading a file of
pre-generated responses. Will generate responses file based on a real connection
if pre-generated responses are not found.
Args:
response_logs_dir: Directory to read/write response logs
enforce_online_test: Whether an online test should be run, even if a response log
already exists
Returns:
Decorated function
"""
def wrap(test_function: Callable):
@wraps(test_function)
def wrapped(**kwargs):
response_log_path = _build_response_log_path(
test_func=test_function, response_logs_dir=response_logs_dir, **kwargs,
)
if response_log_path.exists() and enforce_online_test:
# Delete the file to enforce an online test
response_log_path.unlink()
if response_log_path.exists():
try:
LOGGER.info(f"Running offline test based on file at {response_log_path}")
_run_offline_test(
response_log_path=response_log_path, test_function=test_function, **kwargs,
)
except ConnectionError as e:
msg = (
f"A required API call was missing from response logs file for this "
f"offline test ({response_log_path}). The response log file must be "
f"regenerated. Delete the existing file to automatically regenerate a "
f"new one. Caused by: {e}"
)
LOGGER.error(msg)
raise ConnectionError(e)
else:
_run_online_test(
response_log_path=response_log_path, test_function=test_function, **kwargs
)
return wrapped
return wrap
# Handle ModuleNotFoundError to allow tamr_toolbox to be used when the optional dependency
# `responses` is not installed
try:
import responses
# Stores the original _real_send function of requests
_BASE_FIND_MATCH = responses.RequestsMock._find_match
# Stores the original _real_send function of responses
_BASE_SEND_REAL = responses._real_send
@responses.activate
@mock.patch.object(Operation.wait, "__defaults__", (0, None)) # sets operation wait time to 0
def _run_offline_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against saved API responses located in a file
Args: | response_log_path: Location of saved API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
with response_log_path.open(encoding="utf-8") as f:
for line in f:
response = json.loads(line)
responses.add(**response)
ip_lookup = {}
def _find_anonymized_match(self, request):
"""Allows responses library to match requests for an ip address to match to an
anonymized ip address
"""
request.url = _anonymize_url(request.url, ip_lookup)
return _BASE_FIND_MATCH(self, request)
with mock.patch("responses.RequestsMock._find_match", new=_find_anonymized_match):
test_function(**kwargs)
@responses.activate
def _run_online_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against a Tamr instance and saves the API responses to a file
Args:
response_log_path: Location to save API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
LOGGER.info(
f"Online test running against Tamr instance. "
f"Creating new file at {response_log_path}. This may take a while ..."
)
os.makedirs(response_log_path.parent, exist_ok=True)
response_log_path.touch()
# Each time an API call is made, allow it to pass through responses and make a real call
# Each time a real call is made, log the response in the response file
responses.add_passthru(re.compile(".*"))
ip_lookup = {}
def _send_real_with_log(*args, **kwargs) -> Response:
"""Logs the response from BASE_SEND_REAL
Args:
*args: The positional arguments for BASE_SEND_REAL
**kwargs: The keyword arguments for BASE_SEND_REAL
Returns:
The response from the call
"""
response = _BASE_SEND_REAL(*args, **kwargs)
# Prevent recursion
with mock.patch("responses._real_send", new=_BASE_SEND_REAL):
_log_response(log_path=response_log_path, response=response, ip_dict=ip_lookup)
return response
with mock.patch("responses._real_send", new=_send_real_with_log):
test_function(**kwargs)
# Setting the passthru above permanently changes state for online testing
# Reset passthru to default
responses.mock.passthru_prefixes = ()
responses._default_mock.passthru_prefixes = ()
except ModuleNotFoundError as err:
# Ensure exception is due to responses package being missing
if err.msg != "No module named 'responses'":
raise err
def _run_offline_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401
def _run_online_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401 | random_line_split | |
testing.py | """Tasks related to testing code"""
import logging
import json
import os
import re
from inspect import getfile
from pathlib import Path
from time import sleep
from typing import Callable, List, Optional, Union, Dict
from unittest import mock
from functools import wraps
from requests import Response, ConnectionError
from tamr_unify_client.operation import Operation
from tamr_toolbox import utils
LOGGER = logging.getLogger(__name__)
WINDOWS_RESERVED_CHARACTER_MAP = {
"<": "lt",
">": "gt",
":": "colon",
'"': "dquote",
"/": "fslash",
"\\": "bslash",
"|": "pipe",
"?": "qmark",
"*": "asterisk",
}
def _response_to_json(resp: Response, ip_dict: Dict[str, int]) -> str:
"""Converts a Response object into json string readable by the responses mocking library
Args:
resp: Response from a Tamr API call
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
Returns:
The response represented as a json string
"""
if resp.encoding is None:
resp.encoding = "utf-8"
resp_log = {
"method": resp.request.method,
"url": _anonymize_url(resp.request.url, ip_dict),
"status": resp.status_code,
"content_type": resp.headers.get("Content-Type"),
"body": resp.text,
}
return json.dumps(resp_log, ensure_ascii=False)
def _anonymize_url(url: str, ip_dict: Dict[str, int]) -> str:
"""Returns a anonymized url. Updates the dictionary inplace if a new ip is encountered
Args:
url: A URL
ip_dict: Previously encountered IP addresses and an assigned numeric value
Returns:
URL with the IP address anonymized
"""
regex_match = re.match(r"(?i)(^https?://)(.*?)([/:].*$)", url)
ip = regex_match.group(2)
try:
num = ip_dict[ip]
except KeyError:
ip_dict[ip] = len(ip_dict.values()) + 1
num = ip_dict[ip]
return f"{regex_match.group(1)}ip-{num:05d}{regex_match.group(3)}"
def _collect_operation_calls(
*, response: Response, poll_interval_seconds: int = 3
) -> List[Response]:
"""If the provided response is an Operation, wait for the operation to complete and
return responses related to that operation.
Args:
response: A previous Response generated from the same Tamr client
poll_interval_seconds: Time interval (in seconds) between subsequent polls
Returns:
Responses related to polling the operation
"""
client = utils.client._from_response(response)
op = Operation.from_response(client, response)
LOGGER.info(f"Waiting for operation to complete: {op}")
request_while_pending = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
while op.state == "PENDING":
op = op.poll()
sleep(poll_interval_seconds)
request_while_running = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
op.wait()
request_when_complete = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
return [request_while_pending, request_while_running, request_when_complete]
def _log_response(*, log_path: Path, ip_dict: Dict[str, int], response: Response) -> None:
"""Appends a response to a file. If the response returned is
a Tamr Operation, poll the operation until complete and log those responses as well
Args:
log_path: File to write the response to
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
response: The response to log
"""
LOGGER.info(f"logged request: {response.url}")
with log_path.open(mode="a", encoding="utf-8") as f:
all_responses = [response]
# Poll and wait for operations, if applicable
is_operation_request = bool(
re.match(re.compile(".*/api/versioned/v1/operations/.*"), response.url)
)
is_get_request = response.request.method == "GET"
if is_get_request and is_operation_request:
wait_resp = _collect_operation_calls(response=response)
all_responses.extend(wait_resp)
all_json = [_response_to_json(r, ip_dict) for r in all_responses]
f.writelines([f"{j}\n" for j in all_json])
def _build_response_log_path(
*, test_func: Callable, response_logs_dir: Optional[Union[str, Path]], **kwargs,
) -> Path:
"""Returns a file path for API response logs for a given test and test parameters
Args:
test_func: The test function
**kwargs: Arguments to the test function
Returns:
File path for the API response logs
"""
# Convert test arguments and their values to a string, skipping ignored arguments
test_params = "_".join([f"{k}={v}" for k, v in {**kwargs}.items()])
# Remove reserved characters from ndjson name
for char in WINDOWS_RESERVED_CHARACTER_MAP:
test_params = test_params.replace(char, WINDOWS_RESERVED_CHARACTER_MAP[char])
if len(test_params) > 0:
test_params = "__" + test_params
if response_logs_dir is None:
# If no directory is provided, create a directory with the name of the test file
# in a directory called "response_logs" located in the same directory as the test file
dir_matcher = re.match(r"(.*)(?:\\|/)(.*).py", str(Path(getfile(test_func))))
response_logs_dir = f"{dir_matcher.group(1)}/response_logs/{dir_matcher.group(2)}"
return Path(f"{response_logs_dir}/{test_func.__name__}{test_params}.ndjson")
def mock_api(
*, response_logs_dir: Optional[Union[str, Path]] = None, enforce_online_test=False
) -> Callable:
|
# Handle ModuleNotFoundError to allow tamr_toolbox to be used when the optional dependency
# `responses` is not installed
try:
import responses
# Stores the original _real_send function of requests
_BASE_FIND_MATCH = responses.RequestsMock._find_match
# Stores the original _real_send function of responses
_BASE_SEND_REAL = responses._real_send
@responses.activate
@mock.patch.object(Operation.wait, "__defaults__", (0, None)) # sets operation wait time to 0
def _run_offline_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against saved API responses located in a file
Args:
response_log_path: Location of saved API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
with response_log_path.open(encoding="utf-8") as f:
for line in f:
response = json.loads(line)
responses.add(**response)
ip_lookup = {}
def _find_anonymized_match(self, request):
"""Allows responses library to match requests for an ip address to match to an
anonymized ip address
"""
request.url = _anonymize_url(request.url, ip_lookup)
return _BASE_FIND_MATCH(self, request)
with mock.patch("responses.RequestsMock._find_match", new=_find_anonymized_match):
test_function(**kwargs)
@responses.activate
def _run_online_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against a Tamr instance and saves the API responses to a file
Args:
response_log_path: Location to save API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
LOGGER.info(
f"Online test running against Tamr instance. "
f"Creating new file at {response_log_path}. This may take a while ..."
)
os.makedirs(response_log_path.parent, exist_ok=True)
response_log_path.touch()
# Each time an API call is made, allow it to pass through responses and make a real call
# Each time a real call is made, log the response in the response file
responses.add_passthru(re.compile(".*"))
ip_lookup = {}
def _send_real_with_log(*args, **kwargs) -> Response:
"""Logs the response from BASE_SEND_REAL
Args:
*args: The positional arguments for BASE_SEND_REAL
**kwargs: The keyword arguments for BASE_SEND_REAL
Returns:
The response from the call
"""
response = _BASE_SEND_REAL(*args, **kwargs)
# Prevent recursion
with mock.patch("responses._real_send", new=_BASE_SEND_REAL):
_log_response(log_path=response_log_path, response=response, ip_dict=ip_lookup)
return response
with mock.patch("responses._real_send", new=_send_real_with_log):
test_function(**kwargs)
# Setting the passthru above permanently changes state for online testing
# Reset passthru to default
responses.mock.passthru_prefixes = ()
responses._default_mock.passthru_prefixes = ()
except ModuleNotFoundError as err:
# Ensure exception is due to responses package being missing
if err.msg != "No module named 'responses'":
raise err
def _run_offline_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401
def _run_online_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401
| """Decorator for `pytest` tests that mocks API requests by reading a file of
pre-generated responses. Will generate responses file based on a real connection
if pre-generated responses are not found.
Args:
response_logs_dir: Directory to read/write response logs
enforce_online_test: Whether an online test should be run, even if a response log
already exists
Returns:
Decorated function
"""
def wrap(test_function: Callable):
@wraps(test_function)
def wrapped(**kwargs):
response_log_path = _build_response_log_path(
test_func=test_function, response_logs_dir=response_logs_dir, **kwargs,
)
if response_log_path.exists() and enforce_online_test:
# Delete the file to enforce an online test
response_log_path.unlink()
if response_log_path.exists():
try:
LOGGER.info(f"Running offline test based on file at {response_log_path}")
_run_offline_test(
response_log_path=response_log_path, test_function=test_function, **kwargs,
)
except ConnectionError as e:
msg = (
f"A required API call was missing from response logs file for this "
f"offline test ({response_log_path}). The response log file must be "
f"regenerated. Delete the existing file to automatically regenerate a "
f"new one. Caused by: {e}"
)
LOGGER.error(msg)
raise ConnectionError(e)
else:
_run_online_test(
response_log_path=response_log_path, test_function=test_function, **kwargs
)
return wrapped
return wrap | identifier_body |
testing.py | """Tasks related to testing code"""
import logging
import json
import os
import re
from inspect import getfile
from pathlib import Path
from time import sleep
from typing import Callable, List, Optional, Union, Dict
from unittest import mock
from functools import wraps
from requests import Response, ConnectionError
from tamr_unify_client.operation import Operation
from tamr_toolbox import utils
LOGGER = logging.getLogger(__name__)
WINDOWS_RESERVED_CHARACTER_MAP = {
"<": "lt",
">": "gt",
":": "colon",
'"': "dquote",
"/": "fslash",
"\\": "bslash",
"|": "pipe",
"?": "qmark",
"*": "asterisk",
}
def _response_to_json(resp: Response, ip_dict: Dict[str, int]) -> str:
"""Converts a Response object into json string readable by the responses mocking library
Args:
resp: Response from a Tamr API call
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
Returns:
The response represented as a json string
"""
if resp.encoding is None:
resp.encoding = "utf-8"
resp_log = {
"method": resp.request.method,
"url": _anonymize_url(resp.request.url, ip_dict),
"status": resp.status_code,
"content_type": resp.headers.get("Content-Type"),
"body": resp.text,
}
return json.dumps(resp_log, ensure_ascii=False)
def _anonymize_url(url: str, ip_dict: Dict[str, int]) -> str:
"""Returns a anonymized url. Updates the dictionary inplace if a new ip is encountered
Args:
url: A URL
ip_dict: Previously encountered IP addresses and an assigned numeric value
Returns:
URL with the IP address anonymized
"""
regex_match = re.match(r"(?i)(^https?://)(.*?)([/:].*$)", url)
ip = regex_match.group(2)
try:
num = ip_dict[ip]
except KeyError:
ip_dict[ip] = len(ip_dict.values()) + 1
num = ip_dict[ip]
return f"{regex_match.group(1)}ip-{num:05d}{regex_match.group(3)}"
def _collect_operation_calls(
*, response: Response, poll_interval_seconds: int = 3
) -> List[Response]:
"""If the provided response is an Operation, wait for the operation to complete and
return responses related to that operation.
Args:
response: A previous Response generated from the same Tamr client
poll_interval_seconds: Time interval (in seconds) between subsequent polls
Returns:
Responses related to polling the operation
"""
client = utils.client._from_response(response)
op = Operation.from_response(client, response)
LOGGER.info(f"Waiting for operation to complete: {op}")
request_while_pending = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
while op.state == "PENDING":
op = op.poll()
sleep(poll_interval_seconds)
request_while_running = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
op.wait()
request_when_complete = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
return [request_while_pending, request_while_running, request_when_complete]
def _log_response(*, log_path: Path, ip_dict: Dict[str, int], response: Response) -> None:
"""Appends a response to a file. If the response returned is
a Tamr Operation, poll the operation until complete and log those responses as well
Args:
log_path: File to write the response to
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
response: The response to log
"""
LOGGER.info(f"logged request: {response.url}")
with log_path.open(mode="a", encoding="utf-8") as f:
all_responses = [response]
# Poll and wait for operations, if applicable
is_operation_request = bool(
re.match(re.compile(".*/api/versioned/v1/operations/.*"), response.url)
)
is_get_request = response.request.method == "GET"
if is_get_request and is_operation_request:
wait_resp = _collect_operation_calls(response=response)
all_responses.extend(wait_resp)
all_json = [_response_to_json(r, ip_dict) for r in all_responses]
f.writelines([f"{j}\n" for j in all_json])
def _build_response_log_path(
*, test_func: Callable, response_logs_dir: Optional[Union[str, Path]], **kwargs,
) -> Path:
"""Returns a file path for API response logs for a given test and test parameters
Args:
test_func: The test function
**kwargs: Arguments to the test function
Returns:
File path for the API response logs
"""
# Convert test arguments and their values to a string, skipping ignored arguments
test_params = "_".join([f"{k}={v}" for k, v in {**kwargs}.items()])
# Remove reserved characters from ndjson name
for char in WINDOWS_RESERVED_CHARACTER_MAP:
test_params = test_params.replace(char, WINDOWS_RESERVED_CHARACTER_MAP[char])
if len(test_params) > 0:
test_params = "__" + test_params
if response_logs_dir is None:
# If no directory is provided, create a directory with the name of the test file
# in a directory called "response_logs" located in the same directory as the test file
dir_matcher = re.match(r"(.*)(?:\\|/)(.*).py", str(Path(getfile(test_func))))
response_logs_dir = f"{dir_matcher.group(1)}/response_logs/{dir_matcher.group(2)}"
return Path(f"{response_logs_dir}/{test_func.__name__}{test_params}.ndjson")
def mock_api(
*, response_logs_dir: Optional[Union[str, Path]] = None, enforce_online_test=False
) -> Callable:
"""Decorator for `pytest` tests that mocks API requests by reading a file of
pre-generated responses. Will generate responses file based on a real connection
if pre-generated responses are not found.
Args:
response_logs_dir: Directory to read/write response logs
enforce_online_test: Whether an online test should be run, even if a response log
already exists
Returns:
Decorated function
"""
def wrap(test_function: Callable):
@wraps(test_function)
def | (**kwargs):
response_log_path = _build_response_log_path(
test_func=test_function, response_logs_dir=response_logs_dir, **kwargs,
)
if response_log_path.exists() and enforce_online_test:
# Delete the file to enforce an online test
response_log_path.unlink()
if response_log_path.exists():
try:
LOGGER.info(f"Running offline test based on file at {response_log_path}")
_run_offline_test(
response_log_path=response_log_path, test_function=test_function, **kwargs,
)
except ConnectionError as e:
msg = (
f"A required API call was missing from response logs file for this "
f"offline test ({response_log_path}). The response log file must be "
f"regenerated. Delete the existing file to automatically regenerate a "
f"new one. Caused by: {e}"
)
LOGGER.error(msg)
raise ConnectionError(e)
else:
_run_online_test(
response_log_path=response_log_path, test_function=test_function, **kwargs
)
return wrapped
return wrap
# Handle ModuleNotFoundError to allow tamr_toolbox to be used when the optional dependency
# `responses` is not installed
try:
import responses
# Stores the original _real_send function of requests
_BASE_FIND_MATCH = responses.RequestsMock._find_match
# Stores the original _real_send function of responses
_BASE_SEND_REAL = responses._real_send
@responses.activate
@mock.patch.object(Operation.wait, "__defaults__", (0, None)) # sets operation wait time to 0
def _run_offline_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against saved API responses located in a file
Args:
response_log_path: Location of saved API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
with response_log_path.open(encoding="utf-8") as f:
for line in f:
response = json.loads(line)
responses.add(**response)
ip_lookup = {}
def _find_anonymized_match(self, request):
"""Allows responses library to match requests for an ip address to match to an
anonymized ip address
"""
request.url = _anonymize_url(request.url, ip_lookup)
return _BASE_FIND_MATCH(self, request)
with mock.patch("responses.RequestsMock._find_match", new=_find_anonymized_match):
test_function(**kwargs)
@responses.activate
def _run_online_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against a Tamr instance and saves the API responses to a file
Args:
response_log_path: Location to save API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
LOGGER.info(
f"Online test running against Tamr instance. "
f"Creating new file at {response_log_path}. This may take a while ..."
)
os.makedirs(response_log_path.parent, exist_ok=True)
response_log_path.touch()
# Each time an API call is made, allow it to pass through responses and make a real call
# Each time a real call is made, log the response in the response file
responses.add_passthru(re.compile(".*"))
ip_lookup = {}
def _send_real_with_log(*args, **kwargs) -> Response:
"""Logs the response from BASE_SEND_REAL
Args:
*args: The positional arguments for BASE_SEND_REAL
**kwargs: The keyword arguments for BASE_SEND_REAL
Returns:
The response from the call
"""
response = _BASE_SEND_REAL(*args, **kwargs)
# Prevent recursion
with mock.patch("responses._real_send", new=_BASE_SEND_REAL):
_log_response(log_path=response_log_path, response=response, ip_dict=ip_lookup)
return response
with mock.patch("responses._real_send", new=_send_real_with_log):
test_function(**kwargs)
# Setting the passthru above permanently changes state for online testing
# Reset passthru to default
responses.mock.passthru_prefixes = ()
responses._default_mock.passthru_prefixes = ()
except ModuleNotFoundError as err:
# Ensure exception is due to responses package being missing
if err.msg != "No module named 'responses'":
raise err
def _run_offline_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401
def _run_online_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401
| wrapped | identifier_name |
testing.py | """Tasks related to testing code"""
import logging
import json
import os
import re
from inspect import getfile
from pathlib import Path
from time import sleep
from typing import Callable, List, Optional, Union, Dict
from unittest import mock
from functools import wraps
from requests import Response, ConnectionError
from tamr_unify_client.operation import Operation
from tamr_toolbox import utils
LOGGER = logging.getLogger(__name__)
WINDOWS_RESERVED_CHARACTER_MAP = {
"<": "lt",
">": "gt",
":": "colon",
'"': "dquote",
"/": "fslash",
"\\": "bslash",
"|": "pipe",
"?": "qmark",
"*": "asterisk",
}
def _response_to_json(resp: Response, ip_dict: Dict[str, int]) -> str:
"""Converts a Response object into json string readable by the responses mocking library
Args:
resp: Response from a Tamr API call
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
Returns:
The response represented as a json string
"""
if resp.encoding is None:
resp.encoding = "utf-8"
resp_log = {
"method": resp.request.method,
"url": _anonymize_url(resp.request.url, ip_dict),
"status": resp.status_code,
"content_type": resp.headers.get("Content-Type"),
"body": resp.text,
}
return json.dumps(resp_log, ensure_ascii=False)
def _anonymize_url(url: str, ip_dict: Dict[str, int]) -> str:
"""Returns a anonymized url. Updates the dictionary inplace if a new ip is encountered
Args:
url: A URL
ip_dict: Previously encountered IP addresses and an assigned numeric value
Returns:
URL with the IP address anonymized
"""
regex_match = re.match(r"(?i)(^https?://)(.*?)([/:].*$)", url)
ip = regex_match.group(2)
try:
num = ip_dict[ip]
except KeyError:
ip_dict[ip] = len(ip_dict.values()) + 1
num = ip_dict[ip]
return f"{regex_match.group(1)}ip-{num:05d}{regex_match.group(3)}"
def _collect_operation_calls(
*, response: Response, poll_interval_seconds: int = 3
) -> List[Response]:
"""If the provided response is an Operation, wait for the operation to complete and
return responses related to that operation.
Args:
response: A previous Response generated from the same Tamr client
poll_interval_seconds: Time interval (in seconds) between subsequent polls
Returns:
Responses related to polling the operation
"""
client = utils.client._from_response(response)
op = Operation.from_response(client, response)
LOGGER.info(f"Waiting for operation to complete: {op}")
request_while_pending = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
while op.state == "PENDING":
op = op.poll()
sleep(poll_interval_seconds)
request_while_running = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
op.wait()
request_when_complete = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
return [request_while_pending, request_while_running, request_when_complete]
def _log_response(*, log_path: Path, ip_dict: Dict[str, int], response: Response) -> None:
"""Appends a response to a file. If the response returned is
a Tamr Operation, poll the operation until complete and log those responses as well
Args:
log_path: File to write the response to
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
response: The response to log
"""
LOGGER.info(f"logged request: {response.url}")
with log_path.open(mode="a", encoding="utf-8") as f:
all_responses = [response]
# Poll and wait for operations, if applicable
is_operation_request = bool(
re.match(re.compile(".*/api/versioned/v1/operations/.*"), response.url)
)
is_get_request = response.request.method == "GET"
if is_get_request and is_operation_request:
wait_resp = _collect_operation_calls(response=response)
all_responses.extend(wait_resp)
all_json = [_response_to_json(r, ip_dict) for r in all_responses]
f.writelines([f"{j}\n" for j in all_json])
def _build_response_log_path(
*, test_func: Callable, response_logs_dir: Optional[Union[str, Path]], **kwargs,
) -> Path:
"""Returns a file path for API response logs for a given test and test parameters
Args:
test_func: The test function
**kwargs: Arguments to the test function
Returns:
File path for the API response logs
"""
# Convert test arguments and their values to a string, skipping ignored arguments
test_params = "_".join([f"{k}={v}" for k, v in {**kwargs}.items()])
# Remove reserved characters from ndjson name
for char in WINDOWS_RESERVED_CHARACTER_MAP:
test_params = test_params.replace(char, WINDOWS_RESERVED_CHARACTER_MAP[char])
if len(test_params) > 0:
test_params = "__" + test_params
if response_logs_dir is None:
# If no directory is provided, create a directory with the name of the test file
# in a directory called "response_logs" located in the same directory as the test file
dir_matcher = re.match(r"(.*)(?:\\|/)(.*).py", str(Path(getfile(test_func))))
response_logs_dir = f"{dir_matcher.group(1)}/response_logs/{dir_matcher.group(2)}"
return Path(f"{response_logs_dir}/{test_func.__name__}{test_params}.ndjson")
def mock_api(
*, response_logs_dir: Optional[Union[str, Path]] = None, enforce_online_test=False
) -> Callable:
"""Decorator for `pytest` tests that mocks API requests by reading a file of
pre-generated responses. Will generate responses file based on a real connection
if pre-generated responses are not found.
Args:
response_logs_dir: Directory to read/write response logs
enforce_online_test: Whether an online test should be run, even if a response log
already exists
Returns:
Decorated function
"""
def wrap(test_function: Callable):
@wraps(test_function)
def wrapped(**kwargs):
response_log_path = _build_response_log_path(
test_func=test_function, response_logs_dir=response_logs_dir, **kwargs,
)
if response_log_path.exists() and enforce_online_test:
# Delete the file to enforce an online test
response_log_path.unlink()
if response_log_path.exists():
try:
LOGGER.info(f"Running offline test based on file at {response_log_path}")
_run_offline_test(
response_log_path=response_log_path, test_function=test_function, **kwargs,
)
except ConnectionError as e:
msg = (
f"A required API call was missing from response logs file for this "
f"offline test ({response_log_path}). The response log file must be "
f"regenerated. Delete the existing file to automatically regenerate a "
f"new one. Caused by: {e}"
)
LOGGER.error(msg)
raise ConnectionError(e)
else:
_run_online_test(
response_log_path=response_log_path, test_function=test_function, **kwargs
)
return wrapped
return wrap
# Handle ModuleNotFoundError to allow tamr_toolbox to be used when the optional dependency
# `responses` is not installed
try:
import responses
# Stores the original _real_send function of requests
_BASE_FIND_MATCH = responses.RequestsMock._find_match
# Stores the original _real_send function of responses
_BASE_SEND_REAL = responses._real_send
@responses.activate
@mock.patch.object(Operation.wait, "__defaults__", (0, None)) # sets operation wait time to 0
def _run_offline_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against saved API responses located in a file
Args:
response_log_path: Location of saved API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
with response_log_path.open(encoding="utf-8") as f:
for line in f:
|
ip_lookup = {}
def _find_anonymized_match(self, request):
"""Allows responses library to match requests for an ip address to match to an
anonymized ip address
"""
request.url = _anonymize_url(request.url, ip_lookup)
return _BASE_FIND_MATCH(self, request)
with mock.patch("responses.RequestsMock._find_match", new=_find_anonymized_match):
test_function(**kwargs)
@responses.activate
def _run_online_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against a Tamr instance and saves the API responses to a file
Args:
response_log_path: Location to save API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
LOGGER.info(
f"Online test running against Tamr instance. "
f"Creating new file at {response_log_path}. This may take a while ..."
)
os.makedirs(response_log_path.parent, exist_ok=True)
response_log_path.touch()
# Each time an API call is made, allow it to pass through responses and make a real call
# Each time a real call is made, log the response in the response file
responses.add_passthru(re.compile(".*"))
ip_lookup = {}
def _send_real_with_log(*args, **kwargs) -> Response:
"""Logs the response from BASE_SEND_REAL
Args:
*args: The positional arguments for BASE_SEND_REAL
**kwargs: The keyword arguments for BASE_SEND_REAL
Returns:
The response from the call
"""
response = _BASE_SEND_REAL(*args, **kwargs)
# Prevent recursion
with mock.patch("responses._real_send", new=_BASE_SEND_REAL):
_log_response(log_path=response_log_path, response=response, ip_dict=ip_lookup)
return response
with mock.patch("responses._real_send", new=_send_real_with_log):
test_function(**kwargs)
# Setting the passthru above permanently changes state for online testing
# Reset passthru to default
responses.mock.passthru_prefixes = ()
responses._default_mock.passthru_prefixes = ()
except ModuleNotFoundError as err:
# Ensure exception is due to responses package being missing
if err.msg != "No module named 'responses'":
raise err
def _run_offline_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401
def _run_online_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401
| response = json.loads(line)
responses.add(**response) | conditional_block |
current_models.py | # -*- coding: utf-8 -*-
"""
current_models - library of ionic current models implemented in Python
Created on Mon Apr 10 16:30:04 2017
@author: Oliver Britton
"""
import os
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
" Voltage clamp generator functions "
" //--Nav models--\\ "
" -- Nav 1.7 models -- "
def nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.7 from Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))
beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))
beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))
beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
" -- Nav 1.8 models -- "
def nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.8 from Huang Waxman 20(14?) "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))
beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))
beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.8 used in Tigerholm model "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))
sinf = 1/(1+np.exp((v+45.0)/8))
stau = 1/(alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))
uinf = 1/(1+np.exp((v+51.0)/8))
utau = 1.0/(alpha_u + beta_u)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
du = (uinf-u)/utau
return [dm, dh, ds, du]
def nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.8 model used in Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
" -- Nav 1.9 models -- "
def nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Huang Waxman 2014"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))
beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))
beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))
beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
def nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Maingret 2008"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
return [dm, dh, ds]
def nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.6 model from Zach Mainen 1994 "
m = Y[0] | h = Y[1]
v = voltage_clamp_func(t,voltage_clamp_params)
vhalf = -43.0
a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))
b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))
m_inf = a_m/(a_m + b_m)
m_tau = 1./(a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))
b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))
h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))
h_tau = 1./(a_h + b_h)
dm = (m_inf-m)/m_tau
dh = (h_inf-h)/h_tau
return [dm, dh]
" Kv models "
def kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Tigerholm version of the Sheets et al. IKdr model "
" Model was developed from data recorded at 21 oC "
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v > -31.0:
tau = 0.16+0.8*np.exp(-0.0267*(v+11))
else:
tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))
ninf = 1/(1 + np.exp(-(v+45)/15.4))
ntau = tau/q10
dn = (ninf-n)/ntau
return [dn]
def km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
# g = gbar * (0.25*ns + 0.75*nf)
v = voltage_clamp_func(t,voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v < -60.0:
nstau = 219.0*q10
else:
nstau = 13.0*v + 1000.0*q10
nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)
nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10
nftau = 1.0/(nftau_alpha + nftau_beta)
ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV
dns = (ninf-ns)/nstau
dnf = (ninf-nf)/nftau
return [dns,dnf]
def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of IA.
"""
# g = gbar * n * h
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
h = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4
ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10
hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))
htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10
# Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms
if htau < 5.0:
htau = 5.0
dn = (ninf-n)/ntau
dh = (hinf-h)/htau
return [dn,dh]
"""
Ca models
Implemented:
cal_ja - Jaffe et al. 1994 ICaL model.
can_mi - Model of N-type Ca current from Migliore 95
To do:
SK
BK
Ca diffusion
"""
def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
tfa = 1.
ki = 0.001 # (mM)
cao = 2.5 # Davidson (mM)
" To do: make cai variable as an input like voltage "
cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007
celsius = 37.
def alpha(v):
return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)
def beta(v):
return 0.29*np.exp(-v/10.86)
def KTF(celsius):
return ((25./293.15)*(celsius + 273.15))
def efun(z):
return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius)/2
nu = v/f
return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)
a = alpha(v)
b = beta(v)
tau = 1./(tfa*(a + b))
minf = a/(a+b)
dm = (minf - m)/tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
" HCN models "
def hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
def hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
"""
# ena, ek, + or -?
Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)
Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek)
"""
" Test models "
def nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh] | random_line_split | |
current_models.py | # -*- coding: utf-8 -*-
"""
current_models - library of ionic current models implemented in Python
Created on Mon Apr 10 16:30:04 2017
@author: Oliver Britton
"""
import os
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
" Voltage clamp generator functions "
" //--Nav models--\\ "
" -- Nav 1.7 models -- "
def nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.7 from Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))
beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))
beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))
beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
" -- Nav 1.8 models -- "
def nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.8 from Huang Waxman 20(14?) "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))
beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))
beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.8 used in Tigerholm model "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))
sinf = 1/(1+np.exp((v+45.0)/8))
stau = 1/(alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))
uinf = 1/(1+np.exp((v+51.0)/8))
utau = 1.0/(alpha_u + beta_u)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
du = (uinf-u)/utau
return [dm, dh, ds, du]
def nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.8 model used in Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
" -- Nav 1.9 models -- "
def nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Huang Waxman 2014"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))
beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))
beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))
beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
def nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Maingret 2008"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
return [dm, dh, ds]
def nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.6 model from Zach Mainen 1994 "
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t,voltage_clamp_params)
vhalf = -43.0
a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))
b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))
m_inf = a_m/(a_m + b_m)
m_tau = 1./(a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))
b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))
h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))
h_tau = 1./(a_h + b_h)
dm = (m_inf-m)/m_tau
dh = (h_inf-h)/h_tau
return [dm, dh]
" Kv models "
def kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Tigerholm version of the Sheets et al. IKdr model "
" Model was developed from data recorded at 21 oC "
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v > -31.0:
tau = 0.16+0.8*np.exp(-0.0267*(v+11))
else:
tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))
ninf = 1/(1 + np.exp(-(v+45)/15.4))
ntau = tau/q10
dn = (ninf-n)/ntau
return [dn]
def km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
# g = gbar * (0.25*ns + 0.75*nf)
v = voltage_clamp_func(t,voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v < -60.0:
nstau = 219.0*q10
else:
nstau = 13.0*v + 1000.0*q10
nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)
nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10
nftau = 1.0/(nftau_alpha + nftau_beta)
ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV
dns = (ninf-ns)/nstau
dnf = (ninf-nf)/nftau
return [dns,dnf]
def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of IA.
"""
# g = gbar * n * h
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
h = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4
ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10
hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))
htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10
# Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms
if htau < 5.0:
htau = 5.0
dn = (ninf-n)/ntau
dh = (hinf-h)/htau
return [dn,dh]
"""
Ca models
Implemented:
cal_ja - Jaffe et al. 1994 ICaL model.
can_mi - Model of N-type Ca current from Migliore 95
To do:
SK
BK
Ca diffusion
"""
def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
tfa = 1.
ki = 0.001 # (mM)
cao = 2.5 # Davidson (mM)
" To do: make cai variable as an input like voltage "
cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007
celsius = 37.
def alpha(v):
return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)
def beta(v):
return 0.29*np.exp(-v/10.86)
def KTF(celsius):
return ((25./293.15)*(celsius + 273.15))
def efun(z):
return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius)/2
nu = v/f
return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)
a = alpha(v)
b = beta(v)
tau = 1./(tfa*(a + b))
minf = a/(a+b)
dm = (minf - m)/tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def | ():
"""
Model of N-type Ca current from Migliore 95
"""
pass
" HCN models "
def hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
def hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
"""
# ena, ek, + or -?
Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)
Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek)
"""
" Test models "
def nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
| can_mi | identifier_name |
current_models.py | # -*- coding: utf-8 -*-
"""
current_models - library of ionic current models implemented in Python
Created on Mon Apr 10 16:30:04 2017
@author: Oliver Britton
"""
import os
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
" Voltage clamp generator functions "
" //--Nav models--\\ "
" -- Nav 1.7 models -- "
def nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.7 from Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))
beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))
beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))
beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
" -- Nav 1.8 models -- "
def nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.8 from Huang Waxman 20(14?) "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))
beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))
beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.8 used in Tigerholm model "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))
sinf = 1/(1+np.exp((v+45.0)/8))
stau = 1/(alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))
uinf = 1/(1+np.exp((v+51.0)/8))
utau = 1.0/(alpha_u + beta_u)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
du = (uinf-u)/utau
return [dm, dh, ds, du]
def nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.8 model used in Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
" -- Nav 1.9 models -- "
def nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Huang Waxman 2014"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))
beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))
beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))
beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
def nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Maingret 2008"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
return [dm, dh, ds]
def nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.6 model from Zach Mainen 1994 "
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t,voltage_clamp_params)
vhalf = -43.0
a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))
b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))
m_inf = a_m/(a_m + b_m)
m_tau = 1./(a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))
b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))
h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))
h_tau = 1./(a_h + b_h)
dm = (m_inf-m)/m_tau
dh = (h_inf-h)/h_tau
return [dm, dh]
" Kv models "
def kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Tigerholm version of the Sheets et al. IKdr model "
" Model was developed from data recorded at 21 oC "
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v > -31.0:
tau = 0.16+0.8*np.exp(-0.0267*(v+11))
else:
tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))
ninf = 1/(1 + np.exp(-(v+45)/15.4))
ntau = tau/q10
dn = (ninf-n)/ntau
return [dn]
def km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
|
def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of IA.
"""
# g = gbar * n * h
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
h = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4
ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10
hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))
htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10
# Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms
if htau < 5.0:
htau = 5.0
dn = (ninf-n)/ntau
dh = (hinf-h)/htau
return [dn,dh]
"""
Ca models
Implemented:
cal_ja - Jaffe et al. 1994 ICaL model.
can_mi - Model of N-type Ca current from Migliore 95
To do:
SK
BK
Ca diffusion
"""
def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
tfa = 1.
ki = 0.001 # (mM)
cao = 2.5 # Davidson (mM)
" To do: make cai variable as an input like voltage "
cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007
celsius = 37.
def alpha(v):
return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)
def beta(v):
return 0.29*np.exp(-v/10.86)
def KTF(celsius):
return ((25./293.15)*(celsius + 273.15))
def efun(z):
return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius)/2
nu = v/f
return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)
a = alpha(v)
b = beta(v)
tau = 1./(tfa*(a + b))
minf = a/(a+b)
dm = (minf - m)/tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
" HCN models "
def hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
def hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
"""
# ena, ek, + or -?
Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)
Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek)
"""
" Test models "
def nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
| """ Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
# g = gbar * (0.25*ns + 0.75*nf)
v = voltage_clamp_func(t,voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v < -60.0:
nstau = 219.0*q10
else:
nstau = 13.0*v + 1000.0*q10
nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)
nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10
nftau = 1.0/(nftau_alpha + nftau_beta)
ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV
dns = (ninf-ns)/nstau
dnf = (ninf-nf)/nftau
return [dns,dnf] | identifier_body |
current_models.py | # -*- coding: utf-8 -*-
"""
current_models - library of ionic current models implemented in Python
Created on Mon Apr 10 16:30:04 2017
@author: Oliver Britton
"""
import os
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
" Voltage clamp generator functions "
" //--Nav models--\\ "
" -- Nav 1.7 models -- "
def nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.7 from Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))
beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))
beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))
beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
" -- Nav 1.8 models -- "
def nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.8 from Huang Waxman 20(14?) "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))
beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))
beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.8 used in Tigerholm model "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))
sinf = 1/(1+np.exp((v+45.0)/8))
stau = 1/(alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))
uinf = 1/(1+np.exp((v+51.0)/8))
utau = 1.0/(alpha_u + beta_u)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
du = (uinf-u)/utau
return [dm, dh, ds, du]
def nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.8 model used in Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
" -- Nav 1.9 models -- "
def nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Huang Waxman 2014"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))
beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))
beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))
beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
def nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Maingret 2008"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
return [dm, dh, ds]
def nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.6 model from Zach Mainen 1994 "
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t,voltage_clamp_params)
vhalf = -43.0
a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))
b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))
m_inf = a_m/(a_m + b_m)
m_tau = 1./(a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))
b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))
h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))
h_tau = 1./(a_h + b_h)
dm = (m_inf-m)/m_tau
dh = (h_inf-h)/h_tau
return [dm, dh]
" Kv models "
def kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Tigerholm version of the Sheets et al. IKdr model "
" Model was developed from data recorded at 21 oC "
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v > -31.0:
tau = 0.16+0.8*np.exp(-0.0267*(v+11))
else:
tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))
ninf = 1/(1 + np.exp(-(v+45)/15.4))
ntau = tau/q10
dn = (ninf-n)/ntau
return [dn]
def km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
# g = gbar * (0.25*ns + 0.75*nf)
v = voltage_clamp_func(t,voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v < -60.0:
nstau = 219.0*q10
else:
nstau = 13.0*v + 1000.0*q10
nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)
nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10
nftau = 1.0/(nftau_alpha + nftau_beta)
ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV
dns = (ninf-ns)/nstau
dnf = (ninf-nf)/nftau
return [dns,dnf]
def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of IA.
"""
# g = gbar * n * h
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
h = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4
ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10
hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))
htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10
# Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms
if htau < 5.0:
htau = 5.0
dn = (ninf-n)/ntau
dh = (hinf-h)/htau
return [dn,dh]
"""
Ca models
Implemented:
cal_ja - Jaffe et al. 1994 ICaL model.
can_mi - Model of N-type Ca current from Migliore 95
To do:
SK
BK
Ca diffusion
"""
def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
tfa = 1.
ki = 0.001 # (mM)
cao = 2.5 # Davidson (mM)
" To do: make cai variable as an input like voltage "
cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007
celsius = 37.
def alpha(v):
return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)
def beta(v):
return 0.29*np.exp(-v/10.86)
def KTF(celsius):
return ((25./293.15)*(celsius + 273.15))
def efun(z):
return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius)/2
nu = v/f
return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)
a = alpha(v)
b = beta(v)
tau = 1./(tfa*(a + b))
minf = a/(a+b)
dm = (minf - m)/tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
" HCN models "
def hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
def hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
|
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
"""
# ena, ek, + or -?
Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)
Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek)
"""
" Test models "
def nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
| tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0) | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.