file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
ahrs_serv.py | ():
global ahrs
global gyro_addr
ahrs.write_byte_data(gyro_addr,0x20,0x8F) #DataRate 400Hz, BW 20Hz, All Axis enabled, Gyro ON
ahrs.write_byte_data(gyro_addr,0x23,0xA0) #Escala 2000dps, BlockUpdates
ahrs.write_byte_data(gyro_addr,0x24,0x02) #OutSel = 10h, use HPF and LPF2, HPen = 0.
# Funciones que sacan los valores de los sensores.
def accel_read():
global ahrs
global accel_addr
accel_data = [0,0,0]
##Sacamos los datos de acceleracion de los 3 ejes
#Eje X
xl = format(ahrs.read_byte_data(accel_addr,0x28), '#010b')[2:6]
xh = format(ahrs.read_byte_data(accel_addr,0x29), '#010b')[2:]
#Eje Y
yl = format(ahrs.read_byte_data(accel_addr,0x2A), '#010b')[2:6]
yh = format(ahrs.read_byte_data(accel_addr,0x2B), '#010b')[2:]
#Eje Z
zl = format(ahrs.read_byte_data(accel_addr,0x2C), '#010b')[2:6]
zh = format(ahrs.read_byte_data(accel_addr,0x2D), '#010b')[2:]
## Combinamos juntos los 2 bytes.
accel_data[0] = int('0b' + xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1)) #Eje X #Unimos los bytes en complemento a 2
accel_data[1] = int('0b' + yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1)) #Eje Y #Unimos los bytes en complemento a 2
accel_data[2] = int('0b' + zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1)) #Eje Z #Unimos los bytes en complemento a 2
#Normalizamos el vector antes de retornarlo
norma = np.linalg.norm(accel_data)
accel_data = list(map(lambda x: x/norma,accel_data))
return accel_data
def magn_read():
global ahrs
global magn_addr
magn_data = [0,0,0]
##Sacamos los datos de campo magnetico de los 3 ejes
#Eje X
xh = ahrs.read_byte_data(magn_addr,0x03)
xl = ahrs.read_byte_data(magn_addr,0x04)
#Eje Y
yh = ahrs.read_byte_data(magn_addr,0x07)
yl = ahrs.read_byte_data(magn_addr,0x08)
#Eje Z
zh = ahrs.read_byte_data(magn_addr,0x05)
zl = ahrs.read_byte_data(magn_addr,0x06)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
magn_data[0] = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
magn_data[1] = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
magn_data[2] = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Escalamos los datos
magn_data[0] = (magn_data[0] - 35.0) * 1.0
magn_data[1] = (magn_data[1] + 35.0) * 1.02702702703
magn_data[2] = (magn_data[2] - 3.0) * 0.974358974359
#Normalizamos el vector
norma = np.linalg.norm(magn_data)
magn_data = list(map(lambda x: x/norma,magn_data))
return magn_data
def gyro_read():
global ahrs
global gyro_addr
gyro_data = [0,0,0]
#Eje X
xh = ahrs.read_byte_data(gyro_addr,0x29)
xl = ahrs.read_byte_data(gyro_addr,0x28)
#Eje Y
yh = ahrs.read_byte_data(gyro_addr,0x2B)
yl = ahrs.read_byte_data(gyro_addr,0x2A)
#Eje Z
zh = ahrs.read_byte_data(gyro_addr,0x2D)
zl = ahrs.read_byte_data(gyro_addr,0x2C)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
x = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
y = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
z = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Calculamos los grados por segundo (para 2000dps)
gyro_data[0] = float(x)*70/1000
gyro_data[1] = float(y)*70/1000
gyro_data[2] = float(z)*70/1000
#Transformamos los datos a radianes/seg
gyro_data = list(map(math.radians, gyro_data))
return gyro_data
def madgwicks_filter(accel_datas, magn_datas, gyro_datas, deltat):
global SEq
global b_x
global b_z
global w_b
global beta
global zeta
# print "accel = {}".format(accel_datas)
# print "magn = {}".format(magn_datas)
# print "gyro = {}".format(gyro_datas)
# print "deltat = {}".format(deltat)
# print SEq
# print b_x
# print w_b
# print beta
#axulirary variables to avoid reapeated calcualtions
halfSEq_1 = 0.5 * SEq[0]
halfSEq_2 = 0.5 * SEq[1]
halfSEq_3 = 0.5 * SEq[2]
halfSEq_4 = 0.5 * SEq[3]
twoSEq_1 = 2.0 * SEq[0]
twoSEq_2 = 2.0 * SEq[1]
twoSEq_3 = 2.0 * SEq[2]
twoSEq_4 = 2.0 * SEq[3]
twob_x = 2.0 * b_x
twob_z = 2.0 * b_z
twob_xSEq_1 = 2.0 * b_x * SEq[0]
twob_xSEq_2 = 2.0 * b_x * SEq[1]
twob_xSEq_3 = 2.0 * b_x * SEq[2]
twob_xSEq_4 = 2.0 * b_x * SEq[3]
twob_zSEq_1 = 2.0 * b | gyro_setup | identifier_name | |
a1.py | )
loop_end = time.time()
np_start = time.time()
B2 = A + (A@(A+(A@A)))
np_end = time.time()
print("Magnitude of B1-B2: " + str(np.linalg.norm(B1-B2, 2)))
print("Execution time for naive iterative method with N = " + str(N) + " is " + str(loop_end - loop_start))
print("Execution time for vectorized method with N = " + str(N) + " is " + str(np_end - np_start))
# test = np.arange(9).reshape(3,3)
# print(matrix_poly(test))
# print(test + (test@(test + (test @ test))))
print("\nQuestion 2(c):")
print("N = 100:")
timing(100)
print("N = 300:")
timing(300)
print("N = 1000:")
timing(1000)
# Q3(a)
def least_squares(x,t):
X = np.ones((x.shape[0], 2))
X[:,1] = x
w = np.linalg.inv(X.transpose()@X) @ X.transpose() @ t
return w
# print(least_squares(dataTrain[0],dataTrain[1]))
# Q3(b)
def plot_data(x,t):
b, a = least_squares(x,t)
min_x, max_x = np.min(x), np.max(x)
pt1 = [min_x, max_x]
pt2 = [a*min_x+b, a*max_x+b]
plt.scatter(x,t)
plt.plot(pt1,pt2,color="r")
plt.title("Question 3(b): the fitted line")
plt.show()
return a,b
# plot_data(dataTrain[0],dataTrain[1])
# Q3(c)
def error(a,b,X,T):
est_mat = a*X+b
mse = np.mean(np.square(T-est_mat))
return mse
# a,b = least_squares(dataTrain[0],dataTrain[1])
# error(a,b,dataTrain[0],dataTrain[1])
print('\n\nQuestion 3')
print('----------')
# Q3(d)
# Read the training and test data from the file dataA1Q3.pickle
with open('dataA1Q3.pickle','rb') as f:
dataTrain, dataTest = pickle.load(f)
# Call plot_data to fit a line to the training data
train_a,train_b = plot_data(dataTrain[0],dataTrain[1])
print("\nQuestion 3(d):")
# Print the values of a and b for the fitted line
print("a: "+str(train_a))
print("b: "+str(train_b))
# Compute and print the training error
print("Mean Square Error of training data: " + str(error(train_a,train_b,dataTrain[0],dataTrain[1])))
# Compute and print the test error
print("Mean Square Error of test data: " + str(error(train_a, train_b, dataTest[0],dataTest[1])))
def boundary_mesh(X,w,w0):
# decision boundary
|
def plot_data(X,T,elevation=30,azimuth=30):
colors = np.array(['r','b']) # red for class 0 , blue for class 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = np.array(['r','b']) # red for class 0 , blue for class 1
X = X.T
ax.scatter(X[0],X[1],X[2],color=colors[T],s=1)
ax.view_init(elevation,azimuth)
plt.draw()
return ax,fig
def plot_db(X,T,w,w0,elevation=30,azimuth=30):
xx,yy,zz, = boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,elevation,azimuth)
ax.plot_surface(xx,yy,zz,alpha=0.5,color='green')
return ax,fig
def plot_db3(X,T,w,w0):
_,fig1 = plot_db(X,T,w,w0,30,0)
_,fig2 = plot_db(X,T,w,w0,30,45)
_,fig3 = plot_db(X,T,w,w0,30,175)
return fig1,fig2,fig3
def movie_data(X,T):
ax,fig = plot_data(X,T,30,-20)
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
def movie_db(X,T,w,w0):
xx,yy,zz,= boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,30,-20)
ax.plot_surface(xx,yy,zz,alpha=0.3,color='green')
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
with open("dataA1Q4v2.pickle","rb") as f:
Xtrain,Ttrain,Xtest,Ttest = pickle.load(f)
clf = lin.LogisticRegression()
clf.fit(Xtrain, Ttrain)
w = clf.coef_[0]
bias = clf.intercept_[0]
print("\nQuestion 4")
print("----------")
print('\nQuestion 4(a):')
print("Weight: " + str(w))
print("Bias: " + str(bias))
print('\nQuestion 4(b):')
accuracy1 = clf.score(Xtest,Ttest)
comparison = np.equal(clf.predict(Xtest), Ttest)
accuracy2 = np.count_nonzero(comparison == True) / Ttest.shape[0]
print("accuracy1: " + str(accuracy1))
print("accuracy2: " + str(accuracy2))
print("accuracy1 - accuracy2: " + str(accuracy1 - accuracy2))
# Q4(c).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,5)
fig.suptitle("Question 4(c): Training data and decision boundary")
# Q4(d).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,20)
fig.suptitle("Question 4(d): Training data and decision boundary")
# plot_data(Xtrain, Ttrain,30,10)
print('\n\nQuestion 6')
print('----------')
# Q5 (a)-(k)
def gd_logreg(lrate):
# Q5(a). initialize weight
np.random.seed(3)
# Q5(b).
w0 = np.random.randn(Xtrain.shape[1]+1)/1000
w1 = w0.copy()
# add x0=1 to Xtrain and Ttrain
unbiased_train = np.ones((Xtrain.shape[0],Xtrain.shape[1]+1))
unbiased_train[:,1:] = Xtrain
unbiased_test = np.ones((Xtest.shape[0],Xtest.shape[1]+1))
unbiased_test[:,1:] = Xtest
# Q5(c). all helper functions below are needed
def sigma(z):
return 1/(1+np.exp(-z))
def z(x,w):
return x@w
def h(x,w):
return sigma(z(x,w))
def gd(x,t,w):
# gradient of L_ce = [X^T(y-t)]
return 1/(Ttrain.shape[0]) * x.transpose()@(h(x,w)-t)
def E(x,t,w):
# logistic-cross-entropy
return (t@np.logaddexp(0,-z(x,w))+(1-t)@np.logaddexp(0,z(x,w)))/t.shape[0]
train_CE = []
test_CE = []
train_acc = []
test_acc = []
E0 = E(unbiased_train,Ttrain,w0)
E1 = 1
# Q5(d).
while abs(E0-E1) >= np.float64(10**-10):
# for i in range(200):
E0 = E1
w0 = w1.copy()
weight_update = gd(unbiased_train,Ttrain,w1)
w1 -= lrate * weight_update
train_est_mat = np.where(z(unbiased_train,w1)>=0,1,0)
test_est_mat = np.where(z(unbiased_test,w1)>=0,1,0)
train_compare = np.equal(train_est_mat,Ttrain)
train_acc.append(np.count_nonzero(train_compare==True)/Ttrain.shape[0])
test_compare = np.equal(test | X = X.T
xmin = np.min(X[0])
xmax = np.max(X[0])
zmin = np.min(X[2])
zmax = np.max(X[2])
x = np.linspace(xmin,xmax,2)
z = np.linspace(zmin,zmax,2)
xx,zz = np.meshgrid(x,z)
yy = -(xx*w[0] + zz*w[2] + w0)/w[1]
return xx,yy,zz | identifier_body |
a1.py | )
loop_end = time.time()
np_start = time.time()
B2 = A + (A@(A+(A@A)))
np_end = time.time()
print("Magnitude of B1-B2: " + str(np.linalg.norm(B1-B2, 2)))
print("Execution time for naive iterative method with N = " + str(N) + " is " + str(loop_end - loop_start))
print("Execution time for vectorized method with N = " + str(N) + " is " + str(np_end - np_start))
# test = np.arange(9).reshape(3,3)
# print(matrix_poly(test))
# print(test + (test@(test + (test @ test))))
print("\nQuestion 2(c):")
print("N = 100:")
timing(100)
print("N = 300:")
timing(300)
print("N = 1000:")
timing(1000)
# Q3(a)
def least_squares(x,t):
X = np.ones((x.shape[0], 2))
X[:,1] = x
w = np.linalg.inv(X.transpose()@X) @ X.transpose() @ t
return w
# print(least_squares(dataTrain[0],dataTrain[1]))
# Q3(b)
def plot_data(x,t):
b, a = least_squares(x,t)
min_x, max_x = np.min(x), np.max(x)
pt1 = [min_x, max_x]
pt2 = [a*min_x+b, a*max_x+b]
plt.scatter(x,t)
plt.plot(pt1,pt2,color="r")
plt.title("Question 3(b): the fitted line")
plt.show()
return a,b
# plot_data(dataTrain[0],dataTrain[1])
# Q3(c)
def error(a,b,X,T):
est_mat = a*X+b
mse = np.mean(np.square(T-est_mat))
return mse
# a,b = least_squares(dataTrain[0],dataTrain[1])
# error(a,b,dataTrain[0],dataTrain[1])
print('\n\nQuestion 3')
print('----------')
# Q3(d)
# Read the training and test data from the file dataA1Q3.pickle
with open('dataA1Q3.pickle','rb') as f:
dataTrain, dataTest = pickle.load(f)
# Call plot_data to fit a line to the training data
train_a,train_b = plot_data(dataTrain[0],dataTrain[1])
print("\nQuestion 3(d):")
# Print the values of a and b for the fitted line
print("a: "+str(train_a))
print("b: "+str(train_b))
# Compute and print the training error
print("Mean Square Error of training data: " + str(error(train_a,train_b,dataTrain[0],dataTrain[1])))
# Compute and print the test error
print("Mean Square Error of test data: " + str(error(train_a, train_b, dataTest[0],dataTest[1])))
def boundary_mesh(X,w,w0):
# decision boundary
X = X.T
xmin = np.min(X[0])
xmax = np.max(X[0])
zmin = np.min(X[2])
zmax = np.max(X[2])
x = np.linspace(xmin,xmax,2)
z = np.linspace(zmin,zmax,2)
xx,zz = np.meshgrid(x,z)
yy = -(xx*w[0] + zz*w[2] + w0)/w[1]
return xx,yy,zz |
def plot_data(X,T,elevation=30,azimuth=30):
colors = np.array(['r','b']) # red for class 0 , blue for class 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = np.array(['r','b']) # red for class 0 , blue for class 1
X = X.T
ax.scatter(X[0],X[1],X[2],color=colors[T],s=1)
ax.view_init(elevation,azimuth)
plt.draw()
return ax,fig
def plot_db(X,T,w,w0,elevation=30,azimuth=30):
xx,yy,zz, = boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,elevation,azimuth)
ax.plot_surface(xx,yy,zz,alpha=0.5,color='green')
return ax,fig
def plot_db3(X,T,w,w0):
_,fig1 = plot_db(X,T,w,w0,30,0)
_,fig2 = plot_db(X,T,w,w0,30,45)
_,fig3 = plot_db(X,T,w,w0,30,175)
return fig1,fig2,fig3
def movie_data(X,T):
ax,fig = plot_data(X,T,30,-20)
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
def movie_db(X,T,w,w0):
xx,yy,zz,= boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,30,-20)
ax.plot_surface(xx,yy,zz,alpha=0.3,color='green')
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
with open("dataA1Q4v2.pickle","rb") as f:
Xtrain,Ttrain,Xtest,Ttest = pickle.load(f)
clf = lin.LogisticRegression()
clf.fit(Xtrain, Ttrain)
w = clf.coef_[0]
bias = clf.intercept_[0]
print("\nQuestion 4")
print("----------")
print('\nQuestion 4(a):')
print("Weight: " + str(w))
print("Bias: " + str(bias))
print('\nQuestion 4(b):')
accuracy1 = clf.score(Xtest,Ttest)
comparison = np.equal(clf.predict(Xtest), Ttest)
accuracy2 = np.count_nonzero(comparison == True) / Ttest.shape[0]
print("accuracy1: " + str(accuracy1))
print("accuracy2: " + str(accuracy2))
print("accuracy1 - accuracy2: " + str(accuracy1 - accuracy2))
# Q4(c).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,5)
fig.suptitle("Question 4(c): Training data and decision boundary")
# Q4(d).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,20)
fig.suptitle("Question 4(d): Training data and decision boundary")
# plot_data(Xtrain, Ttrain,30,10)
print('\n\nQuestion 6')
print('----------')
# Q5 (a)-(k)
def gd_logreg(lrate):
# Q5(a). initialize weight
np.random.seed(3)
# Q5(b).
w0 = np.random.randn(Xtrain.shape[1]+1)/1000
w1 = w0.copy()
# add x0=1 to Xtrain and Ttrain
unbiased_train = np.ones((Xtrain.shape[0],Xtrain.shape[1]+1))
unbiased_train[:,1:] = Xtrain
unbiased_test = np.ones((Xtest.shape[0],Xtest.shape[1]+1))
unbiased_test[:,1:] = Xtest
# Q5(c). all helper functions below are needed
def sigma(z):
return 1/(1+np.exp(-z))
def z(x,w):
return x@w
def h(x,w):
return sigma(z(x,w))
def gd(x,t,w):
# gradient of L_ce = [X^T(y-t)]
return 1/(Ttrain.shape[0]) * x.transpose()@(h(x,w)-t)
def E(x,t,w):
# logistic-cross-entropy
return (t@np.logaddexp(0,-z(x,w))+(1-t)@np.logaddexp(0,z(x,w)))/t.shape[0]
train_CE = []
test_CE = []
train_acc = []
test_acc = []
E0 = E(unbiased_train,Ttrain,w0)
E1 = 1
# Q5(d).
while abs(E0-E1) >= np.float64(10**-10):
# for i in range(200):
E0 = E1
w0 = w1.copy()
weight_update = gd(unbiased_train,Ttrain,w1)
w1 -= lrate * weight_update
train_est_mat = np.where(z(unbiased_train,w1)>=0,1,0)
test_est_mat = np.where(z(unbiased_test,w1)>=0,1,0)
train_compare = np.equal(train_est_mat,Ttrain)
train_acc.append(np.count_nonzero(train_compare==True)/Ttrain.shape[0])
test_compare = np.equal(test | random_line_split | |
a1.py |
# find A*(A + A*A)
final = mat_mul(A,final)
# find A + (A*(A + A*A))
for i in range(A.shape[0]):
for j in range(A.shape[1]):
final[i,j] += A[i,j]
return final
# Q2(b)
def timing(N):
A = np.random.rand(N,N)
loop_start = time.time()
B1 = matrix_poly(A)
loop_end = time.time()
np_start = time.time()
B2 = A + (A@(A+(A@A)))
np_end = time.time()
print("Magnitude of B1-B2: " + str(np.linalg.norm(B1-B2, 2)))
print("Execution time for naive iterative method with N = " + str(N) + " is " + str(loop_end - loop_start))
print("Execution time for vectorized method with N = " + str(N) + " is " + str(np_end - np_start))
# test = np.arange(9).reshape(3,3)
# print(matrix_poly(test))
# print(test + (test@(test + (test @ test))))
print("\nQuestion 2(c):")
print("N = 100:")
timing(100)
print("N = 300:")
timing(300)
print("N = 1000:")
timing(1000)
# Q3(a)
def least_squares(x,t):
X = np.ones((x.shape[0], 2))
X[:,1] = x
w = np.linalg.inv(X.transpose()@X) @ X.transpose() @ t
return w
# print(least_squares(dataTrain[0],dataTrain[1]))
# Q3(b)
def plot_data(x,t):
b, a = least_squares(x,t)
min_x, max_x = np.min(x), np.max(x)
pt1 = [min_x, max_x]
pt2 = [a*min_x+b, a*max_x+b]
plt.scatter(x,t)
plt.plot(pt1,pt2,color="r")
plt.title("Question 3(b): the fitted line")
plt.show()
return a,b
# plot_data(dataTrain[0],dataTrain[1])
# Q3(c)
def error(a,b,X,T):
est_mat = a*X+b
mse = np.mean(np.square(T-est_mat))
return mse
# a,b = least_squares(dataTrain[0],dataTrain[1])
# error(a,b,dataTrain[0],dataTrain[1])
print('\n\nQuestion 3')
print('----------')
# Q3(d)
# Read the training and test data from the file dataA1Q3.pickle
with open('dataA1Q3.pickle','rb') as f:
dataTrain, dataTest = pickle.load(f)
# Call plot_data to fit a line to the training data
train_a,train_b = plot_data(dataTrain[0],dataTrain[1])
print("\nQuestion 3(d):")
# Print the values of a and b for the fitted line
print("a: "+str(train_a))
print("b: "+str(train_b))
# Compute and print the training error
print("Mean Square Error of training data: " + str(error(train_a,train_b,dataTrain[0],dataTrain[1])))
# Compute and print the test error
print("Mean Square Error of test data: " + str(error(train_a, train_b, dataTest[0],dataTest[1])))
def boundary_mesh(X,w,w0):
# decision boundary
X = X.T
xmin = np.min(X[0])
xmax = np.max(X[0])
zmin = np.min(X[2])
zmax = np.max(X[2])
x = np.linspace(xmin,xmax,2)
z = np.linspace(zmin,zmax,2)
xx,zz = np.meshgrid(x,z)
yy = -(xx*w[0] + zz*w[2] + w0)/w[1]
return xx,yy,zz
def plot_data(X,T,elevation=30,azimuth=30):
colors = np.array(['r','b']) # red for class 0 , blue for class 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = np.array(['r','b']) # red for class 0 , blue for class 1
X = X.T
ax.scatter(X[0],X[1],X[2],color=colors[T],s=1)
ax.view_init(elevation,azimuth)
plt.draw()
return ax,fig
def plot_db(X,T,w,w0,elevation=30,azimuth=30):
xx,yy,zz, = boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,elevation,azimuth)
ax.plot_surface(xx,yy,zz,alpha=0.5,color='green')
return ax,fig
def plot_db3(X,T,w,w0):
_,fig1 = plot_db(X,T,w,w0,30,0)
_,fig2 = plot_db(X,T,w,w0,30,45)
_,fig3 = plot_db(X,T,w,w0,30,175)
return fig1,fig2,fig3
def movie_data(X,T):
ax,fig = plot_data(X,T,30,-20)
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
def movie_db(X,T,w,w0):
xx,yy,zz,= boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,30,-20)
ax.plot_surface(xx,yy,zz,alpha=0.3,color='green')
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
with open("dataA1Q4v2.pickle","rb") as f:
Xtrain,Ttrain,Xtest,Ttest = pickle.load(f)
clf = lin.LogisticRegression()
clf.fit(Xtrain, Ttrain)
w = clf.coef_[0]
bias = clf.intercept_[0]
print("\nQuestion 4")
print("----------")
print('\nQuestion 4(a):')
print("Weight: " + str(w))
print("Bias: " + str(bias))
print('\nQuestion 4(b):')
accuracy1 = clf.score(Xtest,Ttest)
comparison = np.equal(clf.predict(Xtest), Ttest)
accuracy2 = np.count_nonzero(comparison == True) / Ttest.shape[0]
print("accuracy1: " + str(accuracy1))
print("accuracy2: " + str(accuracy2))
print("accuracy1 - accuracy2: " + str(accuracy1 - accuracy2))
# Q4(c).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,5)
fig.suptitle("Question 4(c): Training data and decision boundary")
# Q4(d).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,20)
fig.suptitle("Question 4(d): Training data and decision boundary")
# plot_data(Xtrain, Ttrain,30,10)
print('\n\nQuestion 6')
print('----------')
# Q5 (a)-(k)
def gd_logreg(lrate):
# Q5(a). initialize weight
np.random.seed(3)
# Q5(b).
w0 = np.random.randn(Xtrain.shape[1]+1)/1000
w1 = w0.copy()
# add x0=1 to Xtrain and Ttrain
unbiased_train = np.ones((Xtrain.shape[0],Xtrain.shape[1]+1))
unbiased_train[:,1:] = Xtrain
unbiased_test = np.ones((Xtest.shape[0],Xtest.shape[1]+1))
unbiased_test[:,1:] = Xtest
# Q5(c). all helper functions below are needed
def sigma(z):
return 1/(1+np.exp(-z))
def z(x,w):
return x@w
def h(x,w):
return sigma(z(x,w))
def gd(x,t,w):
# gradient of L_ce = [X^T(y-t)]
return 1/(Ttrain.shape[0]) * x.transpose()@(h(x,w)-t)
def E(x,t,w):
# logistic-cross-entropy
return (t@np.logaddexp(0,-z(x,w))+(1-t)@np.logaddexp(0,z(x,w)))/t.shape[0]
train_CE = []
test_CE = []
train_acc = []
test_acc = []
E0 = E(unbiased_train,Ttrain,w0)
E1 = 1
# Q5(d).
while abs(E0-E1) >= np.float64(10**-10):
# for i in range(200):
E0 = E1
| final[i,j] += A[i,j] | conditional_block | |
a1.py | )
loop_end = time.time()
np_start = time.time()
B2 = A + (A@(A+(A@A)))
np_end = time.time()
print("Magnitude of B1-B2: " + str(np.linalg.norm(B1-B2, 2)))
print("Execution time for naive iterative method with N = " + str(N) + " is " + str(loop_end - loop_start))
print("Execution time for vectorized method with N = " + str(N) + " is " + str(np_end - np_start))
# test = np.arange(9).reshape(3,3)
# print(matrix_poly(test))
# print(test + (test@(test + (test @ test))))
print("\nQuestion 2(c):")
print("N = 100:")
timing(100)
print("N = 300:")
timing(300)
print("N = 1000:")
timing(1000)
# Q3(a)
def least_squares(x,t):
X = np.ones((x.shape[0], 2))
X[:,1] = x
w = np.linalg.inv(X.transpose()@X) @ X.transpose() @ t
return w
# print(least_squares(dataTrain[0],dataTrain[1]))
# Q3(b)
def plot_data(x,t):
b, a = least_squares(x,t)
min_x, max_x = np.min(x), np.max(x)
pt1 = [min_x, max_x]
pt2 = [a*min_x+b, a*max_x+b]
plt.scatter(x,t)
plt.plot(pt1,pt2,color="r")
plt.title("Question 3(b): the fitted line")
plt.show()
return a,b
# plot_data(dataTrain[0],dataTrain[1])
# Q3(c)
def error(a,b,X,T):
est_mat = a*X+b
mse = np.mean(np.square(T-est_mat))
return mse
# a,b = least_squares(dataTrain[0],dataTrain[1])
# error(a,b,dataTrain[0],dataTrain[1])
print('\n\nQuestion 3')
print('----------')
# Q3(d)
# Read the training and test data from the file dataA1Q3.pickle
with open('dataA1Q3.pickle','rb') as f:
dataTrain, dataTest = pickle.load(f)
# Call plot_data to fit a line to the training data
train_a,train_b = plot_data(dataTrain[0],dataTrain[1])
print("\nQuestion 3(d):")
# Print the values of a and b for the fitted line
print("a: "+str(train_a))
print("b: "+str(train_b))
# Compute and print the training error
print("Mean Square Error of training data: " + str(error(train_a,train_b,dataTrain[0],dataTrain[1])))
# Compute and print the test error
print("Mean Square Error of test data: " + str(error(train_a, train_b, dataTest[0],dataTest[1])))
def boundary_mesh(X,w,w0):
# decision boundary
X = X.T
xmin = np.min(X[0])
xmax = np.max(X[0])
zmin = np.min(X[2])
zmax = np.max(X[2])
x = np.linspace(xmin,xmax,2)
z = np.linspace(zmin,zmax,2)
xx,zz = np.meshgrid(x,z)
yy = -(xx*w[0] + zz*w[2] + w0)/w[1]
return xx,yy,zz
def plot_data(X,T,elevation=30,azimuth=30):
colors = np.array(['r','b']) # red for class 0 , blue for class 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = np.array(['r','b']) # red for class 0 , blue for class 1
X = X.T
ax.scatter(X[0],X[1],X[2],color=colors[T],s=1)
ax.view_init(elevation,azimuth)
plt.draw()
return ax,fig
def plot_db(X,T,w,w0,elevation=30,azimuth=30):
xx,yy,zz, = boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,elevation,azimuth)
ax.plot_surface(xx,yy,zz,alpha=0.5,color='green')
return ax,fig
def plot_db3(X,T,w,w0):
_,fig1 = plot_db(X,T,w,w0,30,0)
_,fig2 = plot_db(X,T,w,w0,30,45)
_,fig3 = plot_db(X,T,w,w0,30,175)
return fig1,fig2,fig3
def movie_data(X,T):
ax,fig = plot_data(X,T,30,-20)
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
def movie_db(X,T,w,w0):
xx,yy,zz,= boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,30,-20)
ax.plot_surface(xx,yy,zz,alpha=0.3,color='green')
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
with open("dataA1Q4v2.pickle","rb") as f:
Xtrain,Ttrain,Xtest,Ttest = pickle.load(f)
clf = lin.LogisticRegression()
clf.fit(Xtrain, Ttrain)
w = clf.coef_[0]
bias = clf.intercept_[0]
print("\nQuestion 4")
print("----------")
print('\nQuestion 4(a):')
print("Weight: " + str(w))
print("Bias: " + str(bias))
print('\nQuestion 4(b):')
accuracy1 = clf.score(Xtest,Ttest)
comparison = np.equal(clf.predict(Xtest), Ttest)
accuracy2 = np.count_nonzero(comparison == True) / Ttest.shape[0]
print("accuracy1: " + str(accuracy1))
print("accuracy2: " + str(accuracy2))
print("accuracy1 - accuracy2: " + str(accuracy1 - accuracy2))
# Q4(c).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,5)
fig.suptitle("Question 4(c): Training data and decision boundary")
# Q4(d).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,20)
fig.suptitle("Question 4(d): Training data and decision boundary")
# plot_data(Xtrain, Ttrain,30,10)
print('\n\nQuestion 6')
print('----------')
# Q5 (a)-(k)
def gd_logreg(lrate):
# Q5(a). initialize weight
np.random.seed(3)
# Q5(b).
w0 = np.random.randn(Xtrain.shape[1]+1)/1000
w1 = w0.copy()
# add x0=1 to Xtrain and Ttrain
unbiased_train = np.ones((Xtrain.shape[0],Xtrain.shape[1]+1))
unbiased_train[:,1:] = Xtrain
unbiased_test = np.ones((Xtest.shape[0],Xtest.shape[1]+1))
unbiased_test[:,1:] = Xtest
# Q5(c). all helper functions below are needed
def sigma(z):
return 1/(1+np.exp(-z))
def z(x,w):
return x@w
def h(x,w):
return sigma(z(x,w))
def gd(x,t,w):
# gradient of L_ce = [X^T(y-t)]
return 1/(Ttrain.shape[0]) * x.transpose()@(h(x,w)-t)
def | (x,t,w):
# logistic-cross-entropy
return (t@np.logaddexp(0,-z(x,w))+(1-t)@np.logaddexp(0,z(x,w)))/t.shape[0]
train_CE = []
test_CE = []
train_acc = []
test_acc = []
E0 = E(unbiased_train,Ttrain,w0)
E1 = 1
# Q5(d).
while abs(E0-E1) >= np.float64(10**-10):
# for i in range(200):
E0 = E1
w0 = w1.copy()
weight_update = gd(unbiased_train,Ttrain,w1)
w1 -= lrate * weight_update
train_est_mat = np.where(z(unbiased_train,w1)>=0,1,0)
test_est_mat = np.where(z(unbiased_test,w1)>=0,1,0)
train_compare = np.equal(train_est_mat,Ttrain)
train_acc.append(np.count_nonzero(train_compare==True)/Ttrain.shape[0])
test_compare = np.equal | E | identifier_name |
validation.go |
func recurseValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target, context string, seen map[string]*bytes.Buffer) *bytes.Buffer {
var (
buf = new(bytes.Buffer)
first = true
ut, isUT = att.Type.(expr.UserType)
)
// Break infinite recursions
if isUT {
if buf, ok := seen[ut.ID()]; ok {
return buf
}
seen[ut.ID()] = buf
}
flattenValidations(att, make(map[string]struct{}))
newline := func() {
if !first {
buf.WriteByte('\n')
} else {
first = false
}
}
// Write validations on attribute if any.
validation := validationCode(att, attCtx, req, alias, target, context)
if validation != "" {
buf.WriteString(validation)
first = false
}
// Recurse down depending on attribute type.
switch {
case expr.IsObject(att.Type):
if isUT {
put = ut
}
for _, nat := range *(expr.AsObject(att.Type)) {
tgt := fmt.Sprintf("%s.%s", target, attCtx.Scope.Field(nat.Attribute, nat.Name, true))
ctx := fmt.Sprintf("%s.%s", context, nat.Name)
val := validateAttribute(attCtx, nat.Attribute, put, tgt, ctx, att.IsRequired(nat.Name))
if val != "" {
newline()
buf.WriteString(val)
}
}
case expr.IsArray(att.Type):
elem := expr.AsArray(att.Type).ElemType
ctx := attCtx
if ctx.Pointer && expr.IsPrimitive(elem.Type) {
// Array elements of primitive type are never pointers
ctx = attCtx.Dup()
ctx.Pointer = false
}
val := validateAttribute(ctx, elem, put, "e", context+"[*]", true)
if val != "" {
newline()
data := map[string]any{"target": target, "validation": val}
if err := arrayValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsMap(att.Type):
m := expr.AsMap(att.Type)
ctx := attCtx.Dup()
ctx.Pointer = false
keyVal := validateAttribute(ctx, m.KeyType, put, "k", context+".key", true)
if keyVal != "" {
keyVal = "\n" + keyVal
}
valueVal := validateAttribute(ctx, m.ElemType, put, "v", context+"[key]", true)
if valueVal != "" {
valueVal = "\n" + valueVal
}
if keyVal != "" || valueVal != "" {
newline()
data := map[string]any{"target": target, "keyValidation": keyVal, "valueValidation": valueVal}
if err := mapValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsUnion(att.Type):
// NOTE: the only time we validate a union is when we are
// validating a proto-generated type since the HTTP
// serialization transforms unions into objects.
u := expr.AsUnion(att.Type)
tref := attCtx.Scope.Ref(&expr.AttributeExpr{Type: put}, attCtx.DefaultPkg)
var vals []string
var types []string
for _, v := range u.Values {
vatt := v.Attribute
fieldName := attCtx.Scope.Field(vatt, v.Name, true)
val := validateAttribute(attCtx, vatt, put, "v."+fieldName, context+".value", true)
if val != "" {
types = append(types, tref+"_"+fieldName)
vals = append(vals, val)
}
}
if len(vals) > 0 {
newline()
data := map[string]any{
"target": target,
"types": types,
"values": vals,
}
if err := unionValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
}
return buf
}
func validateAttribute(ctx *AttributeContext, att *expr.AttributeExpr, put expr.UserType, target, context string, req bool) string {
ut, isUT := att.Type.(expr.UserType)
if !isUT {
code := recurseValidationCode(att, put, ctx, req, false, target, context, nil).String()
if code == "" {
return ""
}
if expr.IsArray(att.Type) || expr.IsMap(att.Type) || expr.IsUnion(att.Type) {
return code
}
if !ctx.Pointer && (req || (att.DefaultValue != nil && ctx.UseDefault)) {
return code
}
cond := fmt.Sprintf("if %s != nil {\n", target)
if strings.HasPrefix(code, cond) {
return code
}
return fmt.Sprintf("%s%s\n}", cond, code)
}
if expr.IsAlias(ut) {
return recurseValidationCode(ut.Attribute(), put, ctx, req, true, target, context, nil).String()
}
if !hasValidations(ctx, ut) {
return ""
}
var buf bytes.Buffer
name := ctx.Scope.Name(att, "", ctx.Pointer, ctx.UseDefault)
data := map[string]any{"name": Goify(name, true), "target": target}
if err := userValT.Execute(&buf, data); err != nil {
panic(err) // bug
}
return fmt.Sprintf("if %s != nil {\n\t%s\n}", target, buf.String())
}
// validationCode produces Go code that runs the validations defined in the
// given attribute definition if any against the content of the variable named
// target. The generated code assumes that there is a pre-existing "err"
// variable of type error. It initializes that variable in case a validation
// fails.
//
// attCtx is the attribute context
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func validationCode(att *expr.AttributeExpr, attCtx *AttributeContext, req, alias bool, target, context string) string {
validation := att.Validation
if ut, ok := att.Type.(expr.UserType); ok {
val := ut.Attribute().Validation
if val != nil {
if validation == nil {
validation = val
} else {
validation.Merge(val)
}
att.Validation = validation
}
}
if validation == nil {
return ""
}
var (
kind = att.Type.Kind()
isNativePointer = kind == expr.BytesKind || kind == expr.AnyKind
isPointer = attCtx.Pointer || (!req && (att.DefaultValue == nil || !attCtx.UseDefault))
tval = target
)
if isPointer && expr.IsPrimitive(att.Type) && !isNativePointer {
tval = "*" + tval
}
if alias {
tval = fmt.Sprintf("%s(%s)", att.Type.Name(), tval)
}
data := map[string]any{
"attribute": att,
"attCtx": attCtx,
"isPointer": isPointer,
"context": context,
"target": target,
"targetVal": tval,
"string": kind == expr.StringKind,
"array": expr.IsArray(att.Type),
"map": expr.IsMap(att.Type),
}
runTemplate := func(tmpl *template.Template, data any) string {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
panic(err) // bug
}
return buf.String()
}
var res []string
if values := validation.Values; values != nil {
data["values"] = values
if val := runTemplate(enumValT, data); val != "" {
res = append(res, val)
}
}
if format := validation.Format; format != "" {
data["format"] = string(format)
if val := runTemplate(formatValT, data); val != "" {
res = append(res, val)
}
}
if pattern := validation.Pattern; pattern != "" {
data["pattern"] = pattern
if val := runTemplate(patternValT, data); val != "" {
res = append(res, val)
}
}
if exclMin := validation.ExclusiveMinimum; exclMin != nil {
data["exclMin"] = *exclMin
data["isExclMin"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if min := validation.Minimum; min != nil {
data["min"] = *min
data["isMin"] = true
if val := runTemplate(minMaxValT, data | {
seen := make(map[string]*bytes.Buffer)
return recurseValidationCode(att, put, attCtx, req, alias, target, target, seen).String()
} | identifier_body | |
validation.go | attCtx, req, alias, target, context)
if validation != "" {
buf.WriteString(validation)
first = false
}
// Recurse down depending on attribute type.
switch {
case expr.IsObject(att.Type):
if isUT {
put = ut
}
for _, nat := range *(expr.AsObject(att.Type)) {
tgt := fmt.Sprintf("%s.%s", target, attCtx.Scope.Field(nat.Attribute, nat.Name, true))
ctx := fmt.Sprintf("%s.%s", context, nat.Name)
val := validateAttribute(attCtx, nat.Attribute, put, tgt, ctx, att.IsRequired(nat.Name))
if val != "" {
newline()
buf.WriteString(val)
}
}
case expr.IsArray(att.Type):
elem := expr.AsArray(att.Type).ElemType
ctx := attCtx
if ctx.Pointer && expr.IsPrimitive(elem.Type) {
// Array elements of primitive type are never pointers
ctx = attCtx.Dup()
ctx.Pointer = false
}
val := validateAttribute(ctx, elem, put, "e", context+"[*]", true)
if val != "" {
newline()
data := map[string]any{"target": target, "validation": val}
if err := arrayValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsMap(att.Type):
m := expr.AsMap(att.Type)
ctx := attCtx.Dup()
ctx.Pointer = false
keyVal := validateAttribute(ctx, m.KeyType, put, "k", context+".key", true)
if keyVal != "" {
keyVal = "\n" + keyVal
}
valueVal := validateAttribute(ctx, m.ElemType, put, "v", context+"[key]", true)
if valueVal != "" {
valueVal = "\n" + valueVal
}
if keyVal != "" || valueVal != "" {
newline()
data := map[string]any{"target": target, "keyValidation": keyVal, "valueValidation": valueVal}
if err := mapValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsUnion(att.Type):
// NOTE: the only time we validate a union is when we are
// validating a proto-generated type since the HTTP
// serialization transforms unions into objects.
u := expr.AsUnion(att.Type)
tref := attCtx.Scope.Ref(&expr.AttributeExpr{Type: put}, attCtx.DefaultPkg)
var vals []string
var types []string
for _, v := range u.Values {
vatt := v.Attribute
fieldName := attCtx.Scope.Field(vatt, v.Name, true)
val := validateAttribute(attCtx, vatt, put, "v."+fieldName, context+".value", true)
if val != "" {
types = append(types, tref+"_"+fieldName)
vals = append(vals, val)
}
}
if len(vals) > 0 {
newline()
data := map[string]any{
"target": target,
"types": types,
"values": vals,
}
if err := unionValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
}
return buf
}
func validateAttribute(ctx *AttributeContext, att *expr.AttributeExpr, put expr.UserType, target, context string, req bool) string {
ut, isUT := att.Type.(expr.UserType)
if !isUT {
code := recurseValidationCode(att, put, ctx, req, false, target, context, nil).String()
if code == "" {
return ""
}
if expr.IsArray(att.Type) || expr.IsMap(att.Type) || expr.IsUnion(att.Type) {
return code
}
if !ctx.Pointer && (req || (att.DefaultValue != nil && ctx.UseDefault)) {
return code
}
cond := fmt.Sprintf("if %s != nil {\n", target)
if strings.HasPrefix(code, cond) {
return code
}
return fmt.Sprintf("%s%s\n}", cond, code)
}
if expr.IsAlias(ut) {
return recurseValidationCode(ut.Attribute(), put, ctx, req, true, target, context, nil).String()
}
if !hasValidations(ctx, ut) {
return ""
}
var buf bytes.Buffer
name := ctx.Scope.Name(att, "", ctx.Pointer, ctx.UseDefault)
data := map[string]any{"name": Goify(name, true), "target": target}
if err := userValT.Execute(&buf, data); err != nil {
panic(err) // bug
}
return fmt.Sprintf("if %s != nil {\n\t%s\n}", target, buf.String())
}
// validationCode produces Go code that runs the validations defined in the
// given attribute definition if any against the content of the variable named
// target. The generated code assumes that there is a pre-existing "err"
// variable of type error. It initializes that variable in case a validation
// fails.
//
// attCtx is the attribute context
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func validationCode(att *expr.AttributeExpr, attCtx *AttributeContext, req, alias bool, target, context string) string {
validation := att.Validation
if ut, ok := att.Type.(expr.UserType); ok {
val := ut.Attribute().Validation
if val != nil {
if validation == nil {
validation = val
} else {
validation.Merge(val)
}
att.Validation = validation
}
}
if validation == nil {
return ""
}
var (
kind = att.Type.Kind()
isNativePointer = kind == expr.BytesKind || kind == expr.AnyKind
isPointer = attCtx.Pointer || (!req && (att.DefaultValue == nil || !attCtx.UseDefault))
tval = target
)
if isPointer && expr.IsPrimitive(att.Type) && !isNativePointer {
tval = "*" + tval
}
if alias {
tval = fmt.Sprintf("%s(%s)", att.Type.Name(), tval)
}
data := map[string]any{
"attribute": att,
"attCtx": attCtx,
"isPointer": isPointer,
"context": context,
"target": target,
"targetVal": tval,
"string": kind == expr.StringKind,
"array": expr.IsArray(att.Type),
"map": expr.IsMap(att.Type),
}
runTemplate := func(tmpl *template.Template, data any) string {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
panic(err) // bug
}
return buf.String()
}
var res []string
if values := validation.Values; values != nil |
if format := validation.Format; format != "" {
data["format"] = string(format)
if val := runTemplate(formatValT, data); val != "" {
res = append(res, val)
}
}
if pattern := validation.Pattern; pattern != "" {
data["pattern"] = pattern
if val := runTemplate(patternValT, data); val != "" {
res = append(res, val)
}
}
if exclMin := validation.ExclusiveMinimum; exclMin != nil {
data["exclMin"] = *exclMin
data["isExclMin"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if min := validation.Minimum; min != nil {
data["min"] = *min
data["isMin"] = true
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if exclMax := validation.ExclusiveMaximum; exclMax != nil {
data["exclMax"] = *exclMax
data["isExclMax"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if max := validation.Maximum; max != nil {
data["max"] = *max
data["isMin"] = false
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if minLength := validation.MinLength; minLength != nil {
data["minLength"] = minLength
data["isMinLength"] = true
delete(data, "maxLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
if maxLength := | {
data["values"] = values
if val := runTemplate(enumValT, data); val != "" {
res = append(res, val)
}
} | conditional_block |
validation.go | attCtx, req, alias, target, context)
if validation != "" {
buf.WriteString(validation)
first = false
}
// Recurse down depending on attribute type.
switch {
case expr.IsObject(att.Type):
if isUT {
put = ut
}
for _, nat := range *(expr.AsObject(att.Type)) {
tgt := fmt.Sprintf("%s.%s", target, attCtx.Scope.Field(nat.Attribute, nat.Name, true))
ctx := fmt.Sprintf("%s.%s", context, nat.Name)
val := validateAttribute(attCtx, nat.Attribute, put, tgt, ctx, att.IsRequired(nat.Name))
if val != "" {
newline()
buf.WriteString(val)
}
}
case expr.IsArray(att.Type):
elem := expr.AsArray(att.Type).ElemType
ctx := attCtx
if ctx.Pointer && expr.IsPrimitive(elem.Type) {
// Array elements of primitive type are never pointers
ctx = attCtx.Dup()
ctx.Pointer = false
}
val := validateAttribute(ctx, elem, put, "e", context+"[*]", true)
if val != "" {
newline()
data := map[string]any{"target": target, "validation": val}
if err := arrayValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsMap(att.Type):
m := expr.AsMap(att.Type)
ctx := attCtx.Dup()
ctx.Pointer = false
keyVal := validateAttribute(ctx, m.KeyType, put, "k", context+".key", true)
if keyVal != "" {
keyVal = "\n" + keyVal
}
valueVal := validateAttribute(ctx, m.ElemType, put, "v", context+"[key]", true)
if valueVal != "" {
valueVal = "\n" + valueVal
}
if keyVal != "" || valueVal != "" {
newline()
data := map[string]any{"target": target, "keyValidation": keyVal, "valueValidation": valueVal}
if err := mapValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsUnion(att.Type):
// NOTE: the only time we validate a union is when we are
// validating a proto-generated type since the HTTP
// serialization transforms unions into objects.
u := expr.AsUnion(att.Type)
tref := attCtx.Scope.Ref(&expr.AttributeExpr{Type: put}, attCtx.DefaultPkg)
var vals []string
var types []string
for _, v := range u.Values {
vatt := v.Attribute
fieldName := attCtx.Scope.Field(vatt, v.Name, true)
val := validateAttribute(attCtx, vatt, put, "v."+fieldName, context+".value", true)
if val != "" {
types = append(types, tref+"_"+fieldName)
vals = append(vals, val)
}
}
if len(vals) > 0 {
newline()
data := map[string]any{
"target": target,
"types": types,
"values": vals,
}
if err := unionValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
}
return buf
}
func validateAttribute(ctx *AttributeContext, att *expr.AttributeExpr, put expr.UserType, target, context string, req bool) string {
ut, isUT := att.Type.(expr.UserType)
if !isUT {
code := recurseValidationCode(att, put, ctx, req, false, target, context, nil).String()
if code == "" {
return ""
}
if expr.IsArray(att.Type) || expr.IsMap(att.Type) || expr.IsUnion(att.Type) {
return code
}
if !ctx.Pointer && (req || (att.DefaultValue != nil && ctx.UseDefault)) {
return code
}
cond := fmt.Sprintf("if %s != nil {\n", target)
if strings.HasPrefix(code, cond) {
return code
}
return fmt.Sprintf("%s%s\n}", cond, code)
}
if expr.IsAlias(ut) {
return recurseValidationCode(ut.Attribute(), put, ctx, req, true, target, context, nil).String()
}
if !hasValidations(ctx, ut) {
return ""
}
var buf bytes.Buffer
name := ctx.Scope.Name(att, "", ctx.Pointer, ctx.UseDefault)
data := map[string]any{"name": Goify(name, true), "target": target}
if err := userValT.Execute(&buf, data); err != nil {
panic(err) // bug
}
return fmt.Sprintf("if %s != nil {\n\t%s\n}", target, buf.String())
}
// validationCode produces Go code that runs the validations defined in the
// given attribute definition if any against the content of the variable named
// target. The generated code assumes that there is a pre-existing "err"
// variable of type error. It initializes that variable in case a validation
// fails.
//
// attCtx is the attribute context
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func | (att *expr.AttributeExpr, attCtx *AttributeContext, req, alias bool, target, context string) string {
validation := att.Validation
if ut, ok := att.Type.(expr.UserType); ok {
val := ut.Attribute().Validation
if val != nil {
if validation == nil {
validation = val
} else {
validation.Merge(val)
}
att.Validation = validation
}
}
if validation == nil {
return ""
}
var (
kind = att.Type.Kind()
isNativePointer = kind == expr.BytesKind || kind == expr.AnyKind
isPointer = attCtx.Pointer || (!req && (att.DefaultValue == nil || !attCtx.UseDefault))
tval = target
)
if isPointer && expr.IsPrimitive(att.Type) && !isNativePointer {
tval = "*" + tval
}
if alias {
tval = fmt.Sprintf("%s(%s)", att.Type.Name(), tval)
}
data := map[string]any{
"attribute": att,
"attCtx": attCtx,
"isPointer": isPointer,
"context": context,
"target": target,
"targetVal": tval,
"string": kind == expr.StringKind,
"array": expr.IsArray(att.Type),
"map": expr.IsMap(att.Type),
}
runTemplate := func(tmpl *template.Template, data any) string {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
panic(err) // bug
}
return buf.String()
}
var res []string
if values := validation.Values; values != nil {
data["values"] = values
if val := runTemplate(enumValT, data); val != "" {
res = append(res, val)
}
}
if format := validation.Format; format != "" {
data["format"] = string(format)
if val := runTemplate(formatValT, data); val != "" {
res = append(res, val)
}
}
if pattern := validation.Pattern; pattern != "" {
data["pattern"] = pattern
if val := runTemplate(patternValT, data); val != "" {
res = append(res, val)
}
}
if exclMin := validation.ExclusiveMinimum; exclMin != nil {
data["exclMin"] = *exclMin
data["isExclMin"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if min := validation.Minimum; min != nil {
data["min"] = *min
data["isMin"] = true
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if exclMax := validation.ExclusiveMaximum; exclMax != nil {
data["exclMax"] = *exclMax
data["isExclMax"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if max := validation.Maximum; max != nil {
data["max"] = *max
data["isMin"] = false
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if minLength := validation.MinLength; minLength != nil {
data["minLength"] = minLength
data["isMinLength"] = true
delete(data, "maxLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
if maxLength := validation | validationCode | identifier_name |
validation.go | attCtx, req, alias, target, context)
if validation != "" {
buf.WriteString(validation)
first = false
}
// Recurse down depending on attribute type.
switch {
case expr.IsObject(att.Type):
if isUT {
put = ut
}
for _, nat := range *(expr.AsObject(att.Type)) {
tgt := fmt.Sprintf("%s.%s", target, attCtx.Scope.Field(nat.Attribute, nat.Name, true))
ctx := fmt.Sprintf("%s.%s", context, nat.Name)
val := validateAttribute(attCtx, nat.Attribute, put, tgt, ctx, att.IsRequired(nat.Name))
if val != "" {
newline()
buf.WriteString(val)
}
}
case expr.IsArray(att.Type):
elem := expr.AsArray(att.Type).ElemType
ctx := attCtx
if ctx.Pointer && expr.IsPrimitive(elem.Type) {
// Array elements of primitive type are never pointers
ctx = attCtx.Dup()
ctx.Pointer = false
}
val := validateAttribute(ctx, elem, put, "e", context+"[*]", true)
if val != "" {
newline()
data := map[string]any{"target": target, "validation": val}
if err := arrayValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsMap(att.Type):
m := expr.AsMap(att.Type)
ctx := attCtx.Dup()
ctx.Pointer = false
keyVal := validateAttribute(ctx, m.KeyType, put, "k", context+".key", true)
if keyVal != "" {
keyVal = "\n" + keyVal
}
valueVal := validateAttribute(ctx, m.ElemType, put, "v", context+"[key]", true)
if valueVal != "" {
valueVal = "\n" + valueVal
}
if keyVal != "" || valueVal != "" {
newline()
data := map[string]any{"target": target, "keyValidation": keyVal, "valueValidation": valueVal}
if err := mapValT.Execute(buf, data); err != nil { | panic(err) // bug
}
}
case expr.IsUnion(att.Type):
// NOTE: the only time we validate a union is when we are
// validating a proto-generated type since the HTTP
// serialization transforms unions into objects.
u := expr.AsUnion(att.Type)
tref := attCtx.Scope.Ref(&expr.AttributeExpr{Type: put}, attCtx.DefaultPkg)
var vals []string
var types []string
for _, v := range u.Values {
vatt := v.Attribute
fieldName := attCtx.Scope.Field(vatt, v.Name, true)
val := validateAttribute(attCtx, vatt, put, "v."+fieldName, context+".value", true)
if val != "" {
types = append(types, tref+"_"+fieldName)
vals = append(vals, val)
}
}
if len(vals) > 0 {
newline()
data := map[string]any{
"target": target,
"types": types,
"values": vals,
}
if err := unionValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
}
return buf
}
func validateAttribute(ctx *AttributeContext, att *expr.AttributeExpr, put expr.UserType, target, context string, req bool) string {
ut, isUT := att.Type.(expr.UserType)
if !isUT {
code := recurseValidationCode(att, put, ctx, req, false, target, context, nil).String()
if code == "" {
return ""
}
if expr.IsArray(att.Type) || expr.IsMap(att.Type) || expr.IsUnion(att.Type) {
return code
}
if !ctx.Pointer && (req || (att.DefaultValue != nil && ctx.UseDefault)) {
return code
}
cond := fmt.Sprintf("if %s != nil {\n", target)
if strings.HasPrefix(code, cond) {
return code
}
return fmt.Sprintf("%s%s\n}", cond, code)
}
if expr.IsAlias(ut) {
return recurseValidationCode(ut.Attribute(), put, ctx, req, true, target, context, nil).String()
}
if !hasValidations(ctx, ut) {
return ""
}
var buf bytes.Buffer
name := ctx.Scope.Name(att, "", ctx.Pointer, ctx.UseDefault)
data := map[string]any{"name": Goify(name, true), "target": target}
if err := userValT.Execute(&buf, data); err != nil {
panic(err) // bug
}
return fmt.Sprintf("if %s != nil {\n\t%s\n}", target, buf.String())
}
// validationCode produces Go code that runs the validations defined in the
// given attribute definition if any against the content of the variable named
// target. The generated code assumes that there is a pre-existing "err"
// variable of type error. It initializes that variable in case a validation
// fails.
//
// attCtx is the attribute context
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func validationCode(att *expr.AttributeExpr, attCtx *AttributeContext, req, alias bool, target, context string) string {
validation := att.Validation
if ut, ok := att.Type.(expr.UserType); ok {
val := ut.Attribute().Validation
if val != nil {
if validation == nil {
validation = val
} else {
validation.Merge(val)
}
att.Validation = validation
}
}
if validation == nil {
return ""
}
var (
kind = att.Type.Kind()
isNativePointer = kind == expr.BytesKind || kind == expr.AnyKind
isPointer = attCtx.Pointer || (!req && (att.DefaultValue == nil || !attCtx.UseDefault))
tval = target
)
if isPointer && expr.IsPrimitive(att.Type) && !isNativePointer {
tval = "*" + tval
}
if alias {
tval = fmt.Sprintf("%s(%s)", att.Type.Name(), tval)
}
data := map[string]any{
"attribute": att,
"attCtx": attCtx,
"isPointer": isPointer,
"context": context,
"target": target,
"targetVal": tval,
"string": kind == expr.StringKind,
"array": expr.IsArray(att.Type),
"map": expr.IsMap(att.Type),
}
runTemplate := func(tmpl *template.Template, data any) string {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
panic(err) // bug
}
return buf.String()
}
var res []string
if values := validation.Values; values != nil {
data["values"] = values
if val := runTemplate(enumValT, data); val != "" {
res = append(res, val)
}
}
if format := validation.Format; format != "" {
data["format"] = string(format)
if val := runTemplate(formatValT, data); val != "" {
res = append(res, val)
}
}
if pattern := validation.Pattern; pattern != "" {
data["pattern"] = pattern
if val := runTemplate(patternValT, data); val != "" {
res = append(res, val)
}
}
if exclMin := validation.ExclusiveMinimum; exclMin != nil {
data["exclMin"] = *exclMin
data["isExclMin"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if min := validation.Minimum; min != nil {
data["min"] = *min
data["isMin"] = true
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if exclMax := validation.ExclusiveMaximum; exclMax != nil {
data["exclMax"] = *exclMax
data["isExclMax"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if max := validation.Maximum; max != nil {
data["max"] = *max
data["isMin"] = false
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if minLength := validation.MinLength; minLength != nil {
data["minLength"] = minLength
data["isMinLength"] = true
delete(data, "maxLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
if maxLength := validation.MaxLength | random_line_split | |
mod.rs |
#[inline]
/// Gets relevance for ERR
fn get_relevance(score: f32, score_max: f32) -> f32 {
(2f32.powf(score) - 1.) / 2f32.powf(score_max)
}
/// Computes ERR. Assumes scores are sorted
pub fn get_err(scores: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(scores.len()).min(scores.len());
let score_max = scores
.iter()
.max_by_key(|x| FloatOrd(**x))
.expect("Must have a maximum score");
let mut err = 0.0;
let mut p = 1.0;
for rank in 1..=k {
let relevance = get_relevance(scores[rank - 1], *score_max);
err += p * relevance / (rank as f32);
p *= 1. - relevance;
}
err
}
/// Gets the weights for sub-topics for Discrete-ERRIA. Computes p(t | q)
pub fn get_subtopic_weights(subtopics: &[u32]) -> HashMap<u32, f32> {
let mut weights = HashMap::new();
let num_examples = subtopics.len();
if num_examples == 0 {
return weights;
}
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize];
let right = vals[pos.ceil() as usize];
let delta = pos.fract();
left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![ | {
let size = k.unwrap_or(scores.len()).min(scores.len());
let r_dcg = dcg(scores, size);
// Sort them in ascending order
scores.sort_by_key(|v| FloatOrd(-*v));
let idcg = dcg(scores, size);
if idcg > 0.0 {
r_dcg / idcg
} else {
0.0
}
} | identifier_body | |
mod.rs | data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
{
let mut metadata = Metadata::new();
metadata.insert("taxonomy".to_string(), MetaType::Str(format!("{:?}", i)));
str_data.push(metadata);
expected.insert(i, 1. / 30.);
}
{
let mut metadata = Metadata::new();
metadata.insert(
"taxonomy".to_string(),
MetaType::Str(format!("2{:?}", i / 10)),
);
str_data.push(metadata);
expected.insert(20 + i / 10, 1. / 3.);
}
{
let metadata = Metadata::new();
str_data.push(metadata);
expected.insert(std::u32::MAX, 1. / 3.);
}
}
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let sub: Vec<_> = str_data.iter().collect();
let subtopics = get_subtopics(&sub, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(subtopics.len(), sub.len());
println!("Weights: {:?}", weights);
println!("expected: {:?}", expected);
assert_eq!(weights.len(), expected.len());
for (key, val) in expected.iter() {
assert!(weights.contains_key(key));
let actual_val = weights.get(key).expect("key should be in weights");
assert!((val - actual_val).abs() < 1e-6);
}
}
#[test]
fn test_err_ia() {
let mut cat1_metadata = Metadata::new();
cat1_metadata.insert("taxonomy".to_string(), MetaType::Str("1".to_string()));
let mut cat2_metadata = Metadata::new();
cat2_metadata.insert("taxonomy".to_string(), MetaType::Str("2".to_string()));
let scores = vec![
(4., &cat1_metadata),
(0., &cat2_metadata),
(2., &cat1_metadata),
(1., &cat2_metadata),
(2., &cat2_metadata),
];
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let metadata: Vec<_> = scores.iter().map(|x| x.1).collect();
let just_scores: Vec<_> = scores.iter().map(|x| x.0).collect();
let subtopics = get_subtopics(&metadata, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(0)),
0f32
);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(1)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(2)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(3)) - 0.3765625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(4)) - 0.4140625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(5)) - 0.4815625).abs() < 1e-6);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, None),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(10)),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
}
#[test]
fn test_interpolate() {
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, None), 2.0);
assert_eq!(interpolate(&values, 25, None), 2.0);
assert_eq!(interpolate(&values, 50, None), 3.0);
assert_eq!(interpolate(&values, 100, None), 4.0);
}
{
let values = vec![2.0, 4.0, 100.0];
assert_eq!(interpolate(&values, 50, None), 4.0);
}
{
// Example from wikipedia
let values = vec![15.0, 20.0, 35.0, 40.0, 50.0];
assert_eq!(interpolate(&values, 5, None), 15.0);
assert_eq!(interpolate(&values, 30, None), 20.0);
assert_eq!(interpolate(&values, 40, None), 27.5);
assert_eq!(interpolate(&values, 95, None), 50.0);
}
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, Some(1.0)), 2.0);
assert_eq!(interpolate(&values, 10, Some(1.0)), 2.2);
assert_eq!(interpolate(&values, 25, Some(1.0)), 2.5);
assert_eq!(interpolate(&values, 75, Some(1.0)), 3.5);
assert_eq!(interpolate(&values, 100, Some(1.0)), 4.0);
}
}
#[test]
fn | test_get_percentiles | identifier_name | |
mod.rs | () {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize]; | left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
| let right = vals[pos.ceil() as usize];
let delta = pos.fract(); | random_line_split |
mod.rs |
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize];
let right = vals[pos.ceil() as usize];
let delta = pos.fract();
left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e | {
return weights;
} | conditional_block | |
imager_prepare.py | Map, MultiDataMap
class imager_prepare(BaseRecipe, RemoteCommandRecipeMixIn):
"""
Prepare phase master:
1. Validate input
2. Create mapfiles with input for work to be perform on the individual nodes
based on the structured input mapfile. The input mapfile contains a list
of measurement sets.
Each node computes a single subband group but needs this for all
timeslices.
3. Call the node scripts with correct input
4. validate performance
Only output the measurement nodes that finished succesfull
**Command Line arguments:**
The only command line argument is the a to a mapfile containing "all"
the measurement sets needed for creating the sky images. First ordered on
timeslice then on subband group and finaly on index in the frequency
range.
**Arguments:**
"""
inputs = {
'ndppp_exec': ingredient.ExecField(
'--ndppp-exec',
help="The full path to the ndppp executable"
),
'parset': ingredient.FileField(
'-p', '--parset',
help="The full path to a prepare parset"
),
'working_directory': ingredient.StringField(
'-w', '--working-directory',
help="Working directory used by the nodes: local data"
),
'target_mapfile': ingredient.StringField(
'--target-mapfile',
help="Contains the node and path to target files, defines"
" the number of nodes the script will start on."
),
'slices_per_image': ingredient.IntField(
'--slices-per-image',
help="The number of (time) slices for each output image"
),
'subbands_per_image': ingredient.IntField(
'--subbands-per-image',
help="The number of subbands to be collected in each output image"
),
'asciistat_executable': ingredient.ExecField(
'--asciistat-executable',
help="full path to the ascii stat executable"
),
'statplot_executable': ingredient.ExecField(
'--statplot-executable',
help="The full path to the statplot executable"
),
'msselect_executable': ingredient.ExecField(
'--msselect-executable',
help="The full path to the msselect executable "
),
'rficonsole_executable': ingredient.ExecField(
'--rficonsole-executable',
help="The full path to the rficonsole executable "
),
'mapfile': ingredient.StringField(
'--mapfile',
help="Full path of mapfile; contains a list of the "
"successfully generated and concatenated sub-band groups"
),
'slices_mapfile': ingredient.StringField(
'--slices-mapfile',
help="Path to mapfile containing the produced subband groups"
),
'raw_ms_per_image_mapfile': ingredient.StringField(
'--raw-ms-per-image-mapfile',
help="Path to mapfile containing the raw ms for each produced"
"image"
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help="Path to directory for processed measurment sets"
),
'add_beam_tables': ingredient.BoolField(
'--add_beam_tables',
default=False,
help="Developer option, adds beamtables to ms"
)
}
outputs = {
'mapfile': ingredient.FileField(
help="path to a mapfile Which contains a list of the"
"successfully generated and concatenated measurement set"
),
'slices_mapfile': ingredient.FileField(
help="Path to mapfile containing the produced subband groups"),
'raw_ms_per_image_mapfile': ingredient.FileField(
help="Path to mapfile containing the raw ms for each produced"
"image")
}
def go(self):
"""
Entry point for recipe: Called by the pipeline framework
"""
super(imager_prepare, self).go()
self.logger.info("Starting imager_prepare run")
# *********************************************************************
# input data
input_map = DataMap.load(self.inputs['args'][0])
output_map = DataMap.load(self.inputs['target_mapfile'])
slices_per_image = self.inputs['slices_per_image']
subbands_per_image = self.inputs['subbands_per_image']
# Validate input
if not self._validate_input_map(input_map, output_map, slices_per_image,
subbands_per_image):
|
# outputs
output_ms_mapfile_path = self.inputs['mapfile']
# *********************************************************************
# schedule the actual work
# TODO: Refactor this function into: load data, perform work,
# create output
node_command = " python %s" % (self.__file__.replace("master", "nodes"))
jobs = []
paths_to_image_mapfiles = []
n_subband_groups = len(output_map)
for idx_sb_group, item in enumerate(output_map):
#create the input files for this node
self.logger.debug("Creating input data subset for processing"
"on: {0}".format(item.host))
inputs_for_image_map = \
self._create_input_map_for_sbgroup(
slices_per_image, n_subband_groups,
subbands_per_image, idx_sb_group, input_map)
# Save the mapfile
job_directory = self.config.get(
"layout", "job_directory")
inputs_for_image_mapfile_path = os.path.join(
job_directory, "mapfiles",
"ms_per_image_{0}".format(idx_sb_group))
self._store_data_map(inputs_for_image_mapfile_path,
inputs_for_image_map, "inputmap for location")
#save the (input) ms, as a list of mapfiles
paths_to_image_mapfiles.append(
tuple([item.host, inputs_for_image_mapfile_path, False]))
arguments = [self.environment,
self.inputs['parset'],
self.inputs['working_directory'],
self.inputs['processed_ms_dir'],
self.inputs['ndppp_exec'],
item.file,
slices_per_image,
subbands_per_image,
inputs_for_image_mapfile_path,
self.inputs['asciistat_executable'],
self.inputs['statplot_executable'],
self.inputs['msselect_executable'],
self.inputs['rficonsole_executable'],
self.inputs['add_beam_tables']]
jobs.append(ComputeJob(item.host, node_command, arguments))
# Hand over the job(s) to the pipeline scheduler
self._schedule_jobs(jobs)
# *********************************************************************
# validate the output, cleanup, return output
if self.error.isSet(): #if one of the nodes failed
self.logger.warn("Failed prepare_imager run detected: Generating "
"new output_ms_mapfile_path without failed runs:"
" {0}".format(output_ms_mapfile_path))
concat_ms = copy.deepcopy(output_map)
slices = []
finished_runs = 0
#scan the return dict for completed key
for (item, job) in zip(concat_ms, jobs):
# only save the slices if the node has completed succesfull
if job.results["returncode"] == 0:
finished_runs += 1
slices.append(tuple([item.host,
job.results["time_slices"], False]))
else:
# Set the dataproduct to skipped!!
item.skip = True
slices.append(tuple([item.host, ["/Failed"], True]))
msg = "Failed run on {0}. NOT Created: {1} ".format(
item.host, item.file)
self.logger.warn(msg)
if finished_runs == 0:
self.logger.error("None of the started compute node finished:"
"The current recipe produced no output, aborting")
return 1
# Write the output mapfiles:
# concat.ms paths:
self._store_data_map(output_ms_mapfile_path, concat_ms,
"mapfile with concat.ms")
# timeslices
MultiDataMap(slices).save(self.inputs['slices_mapfile'])
self.logger.info(
"Wrote MultiMapfile with produces timeslice: {0}".format(
self.inputs['slices_mapfile']))
#map with actual input mss.
self._store_data_map(self.inputs["raw_ms_per_image_mapfile"],
DataMap(paths_to_image_mapfiles),
"mapfile containing (raw) input ms per image:")
# Set the return values
self.outputs['mapfile'] = output_ms_mapfile_path
self.outputs['slices_mapfile'] = self.inputs['slices_mapfile']
self.outputs['raw_ms_per_image_mapfile'] = \
self.inputs["raw_ms_per_image_mapfile"]
return 0
def _create_input_map_for_sbgroup(self, slices_per_image,
n_subband_groups, subbands_per_image, idx_sb_group, input_mapfile):
"""
Creates an input mapfile:
This is a subset of the complete input_mapfile based on the subband
details suplied: The input_mapfile is structured: First all subbands for
a complete timeslice and the the next timeslice. The result value
contains all the information needed for a single subbandgroup to be
computed on a single compute node
"""
inputs_for_image = []
# collect the inputs: first step over the time slices
for idx_slice in range(slices_per_image):
# calculate the first line for current time slice and subband group
line_idx_start = | return 1 | conditional_block |
imager_prepare.py | Map, MultiDataMap
class imager_prepare(BaseRecipe, RemoteCommandRecipeMixIn):
| **Arguments:**
"""
inputs = {
'ndppp_exec': ingredient.ExecField(
'--ndppp-exec',
help="The full path to the ndppp executable"
),
'parset': ingredient.FileField(
'-p', '--parset',
help="The full path to a prepare parset"
),
'working_directory': ingredient.StringField(
'-w', '--working-directory',
help="Working directory used by the nodes: local data"
),
'target_mapfile': ingredient.StringField(
'--target-mapfile',
help="Contains the node and path to target files, defines"
" the number of nodes the script will start on."
),
'slices_per_image': ingredient.IntField(
'--slices-per-image',
help="The number of (time) slices for each output image"
),
'subbands_per_image': ingredient.IntField(
'--subbands-per-image',
help="The number of subbands to be collected in each output image"
),
'asciistat_executable': ingredient.ExecField(
'--asciistat-executable',
help="full path to the ascii stat executable"
),
'statplot_executable': ingredient.ExecField(
'--statplot-executable',
help="The full path to the statplot executable"
),
'msselect_executable': ingredient.ExecField(
'--msselect-executable',
help="The full path to the msselect executable "
),
'rficonsole_executable': ingredient.ExecField(
'--rficonsole-executable',
help="The full path to the rficonsole executable "
),
'mapfile': ingredient.StringField(
'--mapfile',
help="Full path of mapfile; contains a list of the "
"successfully generated and concatenated sub-band groups"
),
'slices_mapfile': ingredient.StringField(
'--slices-mapfile',
help="Path to mapfile containing the produced subband groups"
),
'raw_ms_per_image_mapfile': ingredient.StringField(
'--raw-ms-per-image-mapfile',
help="Path to mapfile containing the raw ms for each produced"
"image"
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help="Path to directory for processed measurment sets"
),
'add_beam_tables': ingredient.BoolField(
'--add_beam_tables',
default=False,
help="Developer option, adds beamtables to ms"
)
}
outputs = {
'mapfile': ingredient.FileField(
help="path to a mapfile Which contains a list of the"
"successfully generated and concatenated measurement set"
),
'slices_mapfile': ingredient.FileField(
help="Path to mapfile containing the produced subband groups"),
'raw_ms_per_image_mapfile': ingredient.FileField(
help="Path to mapfile containing the raw ms for each produced"
"image")
}
def go(self):
"""
Entry point for recipe: Called by the pipeline framework
"""
super(imager_prepare, self).go()
self.logger.info("Starting imager_prepare run")
# *********************************************************************
# input data
input_map = DataMap.load(self.inputs['args'][0])
output_map = DataMap.load(self.inputs['target_mapfile'])
slices_per_image = self.inputs['slices_per_image']
subbands_per_image = self.inputs['subbands_per_image']
# Validate input
if not self._validate_input_map(input_map, output_map, slices_per_image,
subbands_per_image):
return 1
# outputs
output_ms_mapfile_path = self.inputs['mapfile']
# *********************************************************************
# schedule the actual work
# TODO: Refactor this function into: load data, perform work,
# create output
node_command = " python %s" % (self.__file__.replace("master", "nodes"))
jobs = []
paths_to_image_mapfiles = []
n_subband_groups = len(output_map)
for idx_sb_group, item in enumerate(output_map):
#create the input files for this node
self.logger.debug("Creating input data subset for processing"
"on: {0}".format(item.host))
inputs_for_image_map = \
self._create_input_map_for_sbgroup(
slices_per_image, n_subband_groups,
subbands_per_image, idx_sb_group, input_map)
# Save the mapfile
job_directory = self.config.get(
"layout", "job_directory")
inputs_for_image_mapfile_path = os.path.join(
job_directory, "mapfiles",
"ms_per_image_{0}".format(idx_sb_group))
self._store_data_map(inputs_for_image_mapfile_path,
inputs_for_image_map, "inputmap for location")
#save the (input) ms, as a list of mapfiles
paths_to_image_mapfiles.append(
tuple([item.host, inputs_for_image_mapfile_path, False]))
arguments = [self.environment,
self.inputs['parset'],
self.inputs['working_directory'],
self.inputs['processed_ms_dir'],
self.inputs['ndppp_exec'],
item.file,
slices_per_image,
subbands_per_image,
inputs_for_image_mapfile_path,
self.inputs['asciistat_executable'],
self.inputs['statplot_executable'],
self.inputs['msselect_executable'],
self.inputs['rficonsole_executable'],
self.inputs['add_beam_tables']]
jobs.append(ComputeJob(item.host, node_command, arguments))
# Hand over the job(s) to the pipeline scheduler
self._schedule_jobs(jobs)
# *********************************************************************
# validate the output, cleanup, return output
if self.error.isSet(): #if one of the nodes failed
self.logger.warn("Failed prepare_imager run detected: Generating "
"new output_ms_mapfile_path without failed runs:"
" {0}".format(output_ms_mapfile_path))
concat_ms = copy.deepcopy(output_map)
slices = []
finished_runs = 0
#scan the return dict for completed key
for (item, job) in zip(concat_ms, jobs):
# only save the slices if the node has completed succesfull
if job.results["returncode"] == 0:
finished_runs += 1
slices.append(tuple([item.host,
job.results["time_slices"], False]))
else:
# Set the dataproduct to skipped!!
item.skip = True
slices.append(tuple([item.host, ["/Failed"], True]))
msg = "Failed run on {0}. NOT Created: {1} ".format(
item.host, item.file)
self.logger.warn(msg)
if finished_runs == 0:
self.logger.error("None of the started compute node finished:"
"The current recipe produced no output, aborting")
return 1
# Write the output mapfiles:
# concat.ms paths:
self._store_data_map(output_ms_mapfile_path, concat_ms,
"mapfile with concat.ms")
# timeslices
MultiDataMap(slices).save(self.inputs['slices_mapfile'])
self.logger.info(
"Wrote MultiMapfile with produces timeslice: {0}".format(
self.inputs['slices_mapfile']))
#map with actual input mss.
self._store_data_map(self.inputs["raw_ms_per_image_mapfile"],
DataMap(paths_to_image_mapfiles),
"mapfile containing (raw) input ms per image:")
# Set the return values
self.outputs['mapfile'] = output_ms_mapfile_path
self.outputs['slices_mapfile'] = self.inputs['slices_mapfile']
self.outputs['raw_ms_per_image_mapfile'] = \
self.inputs["raw_ms_per_image_mapfile"]
return 0
def _create_input_map_for_sbgroup(self, slices_per_image,
n_subband_groups, subbands_per_image, idx_sb_group, input_mapfile):
"""
Creates an input mapfile:
This is a subset of the complete input_mapfile based on the subband
details suplied: The input_mapfile is structured: First all subbands for
a complete timeslice and the the next timeslice. The result value
contains all the information needed for a single subbandgroup to be
computed on a single compute node
"""
inputs_for_image = []
# collect the inputs: first step over the time slices
for idx_slice in range(slices_per_image):
# calculate the first line for current time slice and subband group
line_idx_start = | """
Prepare phase master:
1. Validate input
2. Create mapfiles with input for work to be perform on the individual nodes
based on the structured input mapfile. The input mapfile contains a list
of measurement sets.
Each node computes a single subband group but needs this for all
timeslices.
3. Call the node scripts with correct input
4. validate performance
Only output the measurement nodes that finished succesfull
**Command Line arguments:**
The only command line argument is the a to a mapfile containing "all"
the measurement sets needed for creating the sky images. First ordered on
timeslice then on subband group and finaly on index in the frequency
range.
| identifier_body |
imager_prepare.py | Map, MultiDataMap
class imager_prepare(BaseRecipe, RemoteCommandRecipeMixIn):
"""
Prepare phase master:
1. Validate input
2. Create mapfiles with input for work to be perform on the individual nodes
based on the structured input mapfile. The input mapfile contains a list
of measurement sets.
Each node computes a single subband group but needs this for all
timeslices.
3. Call the node scripts with correct input
4. validate performance
Only output the measurement nodes that finished succesfull
**Command Line arguments:**
The only command line argument is the a to a mapfile containing "all"
the measurement sets needed for creating the sky images. First ordered on
timeslice then on subband group and finaly on index in the frequency
range.
**Arguments:**
"""
inputs = {
'ndppp_exec': ingredient.ExecField(
'--ndppp-exec',
help="The full path to the ndppp executable"
),
'parset': ingredient.FileField(
'-p', '--parset',
help="The full path to a prepare parset"
),
'working_directory': ingredient.StringField(
'-w', '--working-directory',
help="Working directory used by the nodes: local data"
),
'target_mapfile': ingredient.StringField(
'--target-mapfile',
help="Contains the node and path to target files, defines"
" the number of nodes the script will start on."
),
'slices_per_image': ingredient.IntField(
'--slices-per-image',
help="The number of (time) slices for each output image" | '--subbands-per-image',
help="The number of subbands to be collected in each output image"
),
'asciistat_executable': ingredient.ExecField(
'--asciistat-executable',
help="full path to the ascii stat executable"
),
'statplot_executable': ingredient.ExecField(
'--statplot-executable',
help="The full path to the statplot executable"
),
'msselect_executable': ingredient.ExecField(
'--msselect-executable',
help="The full path to the msselect executable "
),
'rficonsole_executable': ingredient.ExecField(
'--rficonsole-executable',
help="The full path to the rficonsole executable "
),
'mapfile': ingredient.StringField(
'--mapfile',
help="Full path of mapfile; contains a list of the "
"successfully generated and concatenated sub-band groups"
),
'slices_mapfile': ingredient.StringField(
'--slices-mapfile',
help="Path to mapfile containing the produced subband groups"
),
'raw_ms_per_image_mapfile': ingredient.StringField(
'--raw-ms-per-image-mapfile',
help="Path to mapfile containing the raw ms for each produced"
"image"
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help="Path to directory for processed measurment sets"
),
'add_beam_tables': ingredient.BoolField(
'--add_beam_tables',
default=False,
help="Developer option, adds beamtables to ms"
)
}
outputs = {
'mapfile': ingredient.FileField(
help="path to a mapfile Which contains a list of the"
"successfully generated and concatenated measurement set"
),
'slices_mapfile': ingredient.FileField(
help="Path to mapfile containing the produced subband groups"),
'raw_ms_per_image_mapfile': ingredient.FileField(
help="Path to mapfile containing the raw ms for each produced"
"image")
}
def go(self):
"""
Entry point for recipe: Called by the pipeline framework
"""
super(imager_prepare, self).go()
self.logger.info("Starting imager_prepare run")
# *********************************************************************
# input data
input_map = DataMap.load(self.inputs['args'][0])
output_map = DataMap.load(self.inputs['target_mapfile'])
slices_per_image = self.inputs['slices_per_image']
subbands_per_image = self.inputs['subbands_per_image']
# Validate input
if not self._validate_input_map(input_map, output_map, slices_per_image,
subbands_per_image):
return 1
# outputs
output_ms_mapfile_path = self.inputs['mapfile']
# *********************************************************************
# schedule the actual work
# TODO: Refactor this function into: load data, perform work,
# create output
node_command = " python %s" % (self.__file__.replace("master", "nodes"))
jobs = []
paths_to_image_mapfiles = []
n_subband_groups = len(output_map)
for idx_sb_group, item in enumerate(output_map):
#create the input files for this node
self.logger.debug("Creating input data subset for processing"
"on: {0}".format(item.host))
inputs_for_image_map = \
self._create_input_map_for_sbgroup(
slices_per_image, n_subband_groups,
subbands_per_image, idx_sb_group, input_map)
# Save the mapfile
job_directory = self.config.get(
"layout", "job_directory")
inputs_for_image_mapfile_path = os.path.join(
job_directory, "mapfiles",
"ms_per_image_{0}".format(idx_sb_group))
self._store_data_map(inputs_for_image_mapfile_path,
inputs_for_image_map, "inputmap for location")
#save the (input) ms, as a list of mapfiles
paths_to_image_mapfiles.append(
tuple([item.host, inputs_for_image_mapfile_path, False]))
arguments = [self.environment,
self.inputs['parset'],
self.inputs['working_directory'],
self.inputs['processed_ms_dir'],
self.inputs['ndppp_exec'],
item.file,
slices_per_image,
subbands_per_image,
inputs_for_image_mapfile_path,
self.inputs['asciistat_executable'],
self.inputs['statplot_executable'],
self.inputs['msselect_executable'],
self.inputs['rficonsole_executable'],
self.inputs['add_beam_tables']]
jobs.append(ComputeJob(item.host, node_command, arguments))
# Hand over the job(s) to the pipeline scheduler
self._schedule_jobs(jobs)
# *********************************************************************
# validate the output, cleanup, return output
if self.error.isSet(): #if one of the nodes failed
self.logger.warn("Failed prepare_imager run detected: Generating "
"new output_ms_mapfile_path without failed runs:"
" {0}".format(output_ms_mapfile_path))
concat_ms = copy.deepcopy(output_map)
slices = []
finished_runs = 0
#scan the return dict for completed key
for (item, job) in zip(concat_ms, jobs):
# only save the slices if the node has completed succesfull
if job.results["returncode"] == 0:
finished_runs += 1
slices.append(tuple([item.host,
job.results["time_slices"], False]))
else:
# Set the dataproduct to skipped!!
item.skip = True
slices.append(tuple([item.host, ["/Failed"], True]))
msg = "Failed run on {0}. NOT Created: {1} ".format(
item.host, item.file)
self.logger.warn(msg)
if finished_runs == 0:
self.logger.error("None of the started compute node finished:"
"The current recipe produced no output, aborting")
return 1
# Write the output mapfiles:
# concat.ms paths:
self._store_data_map(output_ms_mapfile_path, concat_ms,
"mapfile with concat.ms")
# timeslices
MultiDataMap(slices).save(self.inputs['slices_mapfile'])
self.logger.info(
"Wrote MultiMapfile with produces timeslice: {0}".format(
self.inputs['slices_mapfile']))
#map with actual input mss.
self._store_data_map(self.inputs["raw_ms_per_image_mapfile"],
DataMap(paths_to_image_mapfiles),
"mapfile containing (raw) input ms per image:")
# Set the return values
self.outputs['mapfile'] = output_ms_mapfile_path
self.outputs['slices_mapfile'] = self.inputs['slices_mapfile']
self.outputs['raw_ms_per_image_mapfile'] = \
self.inputs["raw_ms_per_image_mapfile"]
return 0
def _create_input_map_for_sbgroup(self, slices_per_image,
n_subband_groups, subbands_per_image, idx_sb_group, input_mapfile):
"""
Creates an input mapfile:
This is a subset of the complete input_mapfile based on the subband
details suplied: The input_mapfile is structured: First all subbands for
a complete timeslice and the the next timeslice. The result value
contains all the information needed for a single subbandgroup to be
computed on a single compute node
"""
inputs_for_image = []
# collect the inputs: first step over the time slices
for idx_slice in range(slices_per_image):
# calculate the first line for current time slice and subband group
line_idx_start = | ),
'subbands_per_image': ingredient.IntField( | random_line_split |
imager_prepare.py | this for all
timeslices.
3. Call the node scripts with correct input
4. validate performance
Only output the measurement nodes that finished succesfull
**Command Line arguments:**
The only command line argument is the a to a mapfile containing "all"
the measurement sets needed for creating the sky images. First ordered on
timeslice then on subband group and finaly on index in the frequency
range.
**Arguments:**
"""
inputs = {
'ndppp_exec': ingredient.ExecField(
'--ndppp-exec',
help="The full path to the ndppp executable"
),
'parset': ingredient.FileField(
'-p', '--parset',
help="The full path to a prepare parset"
),
'working_directory': ingredient.StringField(
'-w', '--working-directory',
help="Working directory used by the nodes: local data"
),
'target_mapfile': ingredient.StringField(
'--target-mapfile',
help="Contains the node and path to target files, defines"
" the number of nodes the script will start on."
),
'slices_per_image': ingredient.IntField(
'--slices-per-image',
help="The number of (time) slices for each output image"
),
'subbands_per_image': ingredient.IntField(
'--subbands-per-image',
help="The number of subbands to be collected in each output image"
),
'asciistat_executable': ingredient.ExecField(
'--asciistat-executable',
help="full path to the ascii stat executable"
),
'statplot_executable': ingredient.ExecField(
'--statplot-executable',
help="The full path to the statplot executable"
),
'msselect_executable': ingredient.ExecField(
'--msselect-executable',
help="The full path to the msselect executable "
),
'rficonsole_executable': ingredient.ExecField(
'--rficonsole-executable',
help="The full path to the rficonsole executable "
),
'mapfile': ingredient.StringField(
'--mapfile',
help="Full path of mapfile; contains a list of the "
"successfully generated and concatenated sub-band groups"
),
'slices_mapfile': ingredient.StringField(
'--slices-mapfile',
help="Path to mapfile containing the produced subband groups"
),
'raw_ms_per_image_mapfile': ingredient.StringField(
'--raw-ms-per-image-mapfile',
help="Path to mapfile containing the raw ms for each produced"
"image"
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help="Path to directory for processed measurment sets"
),
'add_beam_tables': ingredient.BoolField(
'--add_beam_tables',
default=False,
help="Developer option, adds beamtables to ms"
)
}
outputs = {
'mapfile': ingredient.FileField(
help="path to a mapfile Which contains a list of the"
"successfully generated and concatenated measurement set"
),
'slices_mapfile': ingredient.FileField(
help="Path to mapfile containing the produced subband groups"),
'raw_ms_per_image_mapfile': ingredient.FileField(
help="Path to mapfile containing the raw ms for each produced"
"image")
}
def go(self):
"""
Entry point for recipe: Called by the pipeline framework
"""
super(imager_prepare, self).go()
self.logger.info("Starting imager_prepare run")
# *********************************************************************
# input data
input_map = DataMap.load(self.inputs['args'][0])
output_map = DataMap.load(self.inputs['target_mapfile'])
slices_per_image = self.inputs['slices_per_image']
subbands_per_image = self.inputs['subbands_per_image']
# Validate input
if not self._validate_input_map(input_map, output_map, slices_per_image,
subbands_per_image):
return 1
# outputs
output_ms_mapfile_path = self.inputs['mapfile']
# *********************************************************************
# schedule the actual work
# TODO: Refactor this function into: load data, perform work,
# create output
node_command = " python %s" % (self.__file__.replace("master", "nodes"))
jobs = []
paths_to_image_mapfiles = []
n_subband_groups = len(output_map)
for idx_sb_group, item in enumerate(output_map):
#create the input files for this node
self.logger.debug("Creating input data subset for processing"
"on: {0}".format(item.host))
inputs_for_image_map = \
self._create_input_map_for_sbgroup(
slices_per_image, n_subband_groups,
subbands_per_image, idx_sb_group, input_map)
# Save the mapfile
job_directory = self.config.get(
"layout", "job_directory")
inputs_for_image_mapfile_path = os.path.join(
job_directory, "mapfiles",
"ms_per_image_{0}".format(idx_sb_group))
self._store_data_map(inputs_for_image_mapfile_path,
inputs_for_image_map, "inputmap for location")
#save the (input) ms, as a list of mapfiles
paths_to_image_mapfiles.append(
tuple([item.host, inputs_for_image_mapfile_path, False]))
arguments = [self.environment,
self.inputs['parset'],
self.inputs['working_directory'],
self.inputs['processed_ms_dir'],
self.inputs['ndppp_exec'],
item.file,
slices_per_image,
subbands_per_image,
inputs_for_image_mapfile_path,
self.inputs['asciistat_executable'],
self.inputs['statplot_executable'],
self.inputs['msselect_executable'],
self.inputs['rficonsole_executable'],
self.inputs['add_beam_tables']]
jobs.append(ComputeJob(item.host, node_command, arguments))
# Hand over the job(s) to the pipeline scheduler
self._schedule_jobs(jobs)
# *********************************************************************
# validate the output, cleanup, return output
if self.error.isSet(): #if one of the nodes failed
self.logger.warn("Failed prepare_imager run detected: Generating "
"new output_ms_mapfile_path without failed runs:"
" {0}".format(output_ms_mapfile_path))
concat_ms = copy.deepcopy(output_map)
slices = []
finished_runs = 0
#scan the return dict for completed key
for (item, job) in zip(concat_ms, jobs):
# only save the slices if the node has completed succesfull
if job.results["returncode"] == 0:
finished_runs += 1
slices.append(tuple([item.host,
job.results["time_slices"], False]))
else:
# Set the dataproduct to skipped!!
item.skip = True
slices.append(tuple([item.host, ["/Failed"], True]))
msg = "Failed run on {0}. NOT Created: {1} ".format(
item.host, item.file)
self.logger.warn(msg)
if finished_runs == 0:
self.logger.error("None of the started compute node finished:"
"The current recipe produced no output, aborting")
return 1
# Write the output mapfiles:
# concat.ms paths:
self._store_data_map(output_ms_mapfile_path, concat_ms,
"mapfile with concat.ms")
# timeslices
MultiDataMap(slices).save(self.inputs['slices_mapfile'])
self.logger.info(
"Wrote MultiMapfile with produces timeslice: {0}".format(
self.inputs['slices_mapfile']))
#map with actual input mss.
self._store_data_map(self.inputs["raw_ms_per_image_mapfile"],
DataMap(paths_to_image_mapfiles),
"mapfile containing (raw) input ms per image:")
# Set the return values
self.outputs['mapfile'] = output_ms_mapfile_path
self.outputs['slices_mapfile'] = self.inputs['slices_mapfile']
self.outputs['raw_ms_per_image_mapfile'] = \
self.inputs["raw_ms_per_image_mapfile"]
return 0
def _create_input_map_for_sbgroup(self, slices_per_image,
n_subband_groups, subbands_per_image, idx_sb_group, input_mapfile):
"""
Creates an input mapfile:
This is a subset of the complete input_mapfile based on the subband
details suplied: The input_mapfile is structured: First all subbands for
a complete timeslice and the the next timeslice. The result value
contains all the information needed for a single subbandgroup to be
computed on a single compute node
"""
inputs_for_image = []
# collect the inputs: first step over the time slices
for idx_slice in range(slices_per_image):
# calculate the first line for current time slice and subband group
line_idx_start = idx_slice * \
(n_subband_groups * subbands_per_image) + \
(idx_sb_group * subbands_per_image)
line_idx_end = line_idx_start + subbands_per_image
#extend inputs with the files for the current time slice
inputs_for_image.extend(input_mapfile[line_idx_start: line_idx_end])
return DataMap(inputs_for_image)
def | _validate_input_map | identifier_name | |
room.go | Landlord(nil)
r.setLandlordPlayCardCount(0)
r.setFarmerPlayCardCount(0)
for _, user := range r.getUsers() {
if user != nil {
user.resume()
}
}
r.users_cards = make(map[string]string, pcount)
}
//设置房间的基础信息
func (r *Room) setRoomBaseInfo() {
allRoomData := *r.getAllRoomData()
arrAllRoomData := strings.Split(allRoomData, "|")
for _, roomData := range arrAllRoomData {
arrRoomData_s := strings.Split(roomData, "$")
arrRoomData := StrArrToIntArr(arrRoomData_s)
roomType, _, multiple := arrRoomData[0], arrRoomData[1], arrRoomData[2]
if roomType == r.GetRoomType() {
r.setMultiple(multiple)
break
}
}
}
//是否赛前玩家操作中
func (r *Room) isHandling() bool {
if r.GetRoomStatus() == RoomStatus_Handle {
return true
}
return false
}
//是否正在比赛
func (r *Room) isMatching() bool {
if r.GetRoomStatus() == RoomStatus_Setout {
return false
}
return true
}
//获取游戏规则
func (r *Room) getGameRule() int {
return r.gameRule
}
//设置游戏规则
func (r *Room) setGameRule(gameRule int) {
r.gameRule = gameRule
}
//获取发牌模式
func (r *Room) getDealMode() int {
return r.dealMode
}
//设置发牌模式
func (r *Room) setDealMode(dealMode int) {
r.dealMode = dealMode
}
//获取牌的模式
func (r *Room) GetCardMode() int {
return r.cardMode
}
//设置牌的模式
func (r *Room) SetCardMode(cardMode int) {
r.cardMode = cardMode
}
//获取游戏模式
func (r *Room) getGameType() int {
return r.gameType
}
//设置游戏模式
func (r *Room) setGameType(gameType int) {
r.gameType = gameType
}
//获取底牌
func (r *Room) getBaseCards() []Card {
return r.baseCards
}
//设置底牌
func (r *Room) setBaseCards(baseCards []Card) {
r.baseCards = baseCards
}
//获取地主
func (r *Room) getLandlord() *User {
return r.landlord
}
//设置地主
func (r *Room) setLandlord(landlord *User) {
r.landlord = landlord
}
//获取农民
func (r *Room) getFarmers() []*User {
return r.farmers
}
//设置农民
func (r *Room) setFarmers(users []*User) {
r.farmers = users
}
//获取当前可操作的玩家
func (r *Room) getCanHandleUser() *User {
return r.canHandleUser
}
/*
设置当前可操作的玩家
push:Handle_Push,userid,操作类型,当前底分,赛制
des:操作类型(0叫地主 1抢地主)
赛制(0常规赛 1加倍赛)
*/
func (r *Room) setCanHandleUser(canHandleUser *User, handleType int) {
r.canHandleUser = canHandleUser
message := fmt.Sprintf("%s,%d,%d,%d", *canHandleUser.getUserID(), handleType, r.getBaseScore(), r.getGameType())
pushMessageToUsers("Handle_Push", []string{message}, r.getUserIDs())
r.pushJudgment("Handle_Push", message)
}
/*
设置当前可操作的玩家并设置倒计时
*/
func (r *Room) setCanHandleUserAndSetCountDown(canHandleUser *User, handleType int) {
canHandleUser.countDown_handle(time.Second * 10)
r.setCanHandleUser(canHandleUser, handleType)
}
//获取可以叫地主的玩家
func (r *Room) getCanCallLandlordUser() *User {
return r.canCallLandlordUser
}
//设置可以叫地主的玩家
func (r *Room) setCanCallLandlordUser(canCallLandlordUser *User) {
r.canCallLandlordUser = canCallLandlordUser
}
//获取地主出牌次数
func (r *Room) getLandlordPlayCardCount() int {
return r.landlordPlayCardCount
}
//设置地主出牌次数
func (r *Room) setLandlordPlayCardCount(count int) {
r.landlordPlayCardCount = count
}
//累加地主出牌次数
func (r *Room) updteLandlordPlayCardCount() {
r.landlordPlayCardCount += 1
}
//获取农民出牌次数
func (r *Room) getFarmerPlayCardCount() int {
return r.farmerPlayCardCount
}
//设置农民出牌次数
func (r *Room) setFarmerPlayCardCount(count int) {
r.farmerPlayCardCount = count
}
//累加农民出牌次数
func (r *Room) updteFarmerPlayCardCount() {
r.farmerPlayCardCount += 1
}
//获取本局任务
func (r *Room) getCouncilTask() *Task {
return r.councilTask
}
//设置本局任务
func (r *Room) setCouncilTask(councilTask *Task) {
r.councilTask = councilTask
}
//获取所有玩家的积分
func (r *Room) getUsersVideoIntegral() []int {
return r.usersVideoIntegral
}
//获取春天的状态
func (r *Room) getSpringStatus() int {
return r.springStatus
}
//设置春天的状态
func (r *Room) setSpringStatus(springStatus int) {
r.springStatus = springStatus
}
//根据userid获取玩家积分
func (r *Room) getUserVideoIntegral(user *User) int {
userIndex := user.getIndex()
return r.getUsersVideoIntegral()[userIndex]
}
//根据userid设置玩家积分
func (r *Room) setUserVideoIntegral(user *User, videoIntegral int) {
userIndex := user.getIndex()
r.getUsersVideoIntegral()[userIndex] = videoIntegral
}
//重开
func (r *Room) reStart() {
r.resetUsers()
r.closeUserCountDown()
r.SetRoomStatus(RoomStatus_Setout)
r.reset()
}
//玩家转变成地主
func (r *Room) userTurnLandlord(user *User) {
logger.Debugf("%s 成为地主", *user.getUID())
user.setLandlord(true)
r.setLandlord(user)
farmers := []*User{}
for _, u := range r.getUsers() {
if u != user {
farmers = append(farmers, u)
}
}
r.setFarmers(farmers)
r.addCardsToLandlord()
r.showBaseCards(nil)
r.openDouble()
}
/*
亮底牌
push:BaseCards_Push,地主userid,cardid$cardid$cardid,底牌类型,底牌倍数,是否加入牌中
*/
func (r *Room) showBaseCards(user *User) {
if r.getLandlord() == nil {
return
}
// r.setBaseCards([]Card{Card{Suit: 1, Priority: 1}, Card{Suit: 1, Priority: 2}, Card{Suit: 1, Priority: 3}})
cards := | ltiple := r.getBaseCardsInfo()
userids := []string{}
addToCards := 0
if user == nil { //只执行一次(地主出现的时候)
//根据底牌加倍
if multiple > 1 {
r.setMultiple(r.getMultiple() * multiple)
r.pushMultiple()
}
userids = r.getUserIDs()
addToCards = 1
} else { //短线重连进来的
userids = []string{*user.getUserID()}
}
message := fmt.Sprintf("%s,%s,%d,%d,%d", *r.getLandlord().getUserID(), *r.getCardsID(cards), cardsType, multiple, addToCards)
if user == nil {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
r.pushJudgment("BaseCards_Push", message)
} else {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
}
}
//将底牌放入地主牌面中
func (r *Room) addCardsToLandlord() {
cards := r.getBaseCards()
landlord := r.getLandlord()
if landlord != nil {
var tmpCards CardList
tmpCards = landlord.getCards()
tmpCards = append(tmpCards, cards...)
sort.Sort(tmpCards)
for i := 0; i < len(tmpCards); i++ {
tmpCards[i].Index = i
}
landlord.setCards(tmpCards)
}
}
/*
获取牌的类型(-1不是特殊底牌 0豹子 1同花 2顺子 3王炸 4同花顺)
*/
func (r *Room) getBaseCardsInfo() (cardsType int, multiple int) {
cardsType = -1
multiple = 1
var | r.getBaseCards()
cardsType, mu | identifier_body |
room.go | nil {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
r.pushJudgment("BaseCards_Push", message)
} else {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
}
}
//将底牌放入地主牌面中
func (r *Room) addCardsToLandlord() {
cards := r.getBaseCards()
landlord := r.getLandlord()
if landlord != nil {
var tmpCards CardList
tmpCards = landlord.getCards()
tmpCards = append(tmpCards, cards...)
sort.Sort(tmpCards)
for i := 0; i < len(tmpCards); i++ {
tmpCards[i].Index = i
}
landlord.setCards(tmpCards)
}
}
/*
获取牌的类型(-1不是特殊底牌 0豹子 1同花 2顺子 3王炸 4同花顺)
*/
func (r *Room) getBaseCardsInfo() (cardsType int, multiple int) {
cardsType = -1
multiple = 1
var cards CardList = r.getBaseCards()
shunzi := []int{}
tonghua := map[int]bool{}
baozi := map[int]bool{}
wangzha := map[int]bool{}
for _, card := range cards {
if card.Priority < Priority_Two {
if len(shunzi) == 0 {
shunzi = append(shunzi, card.Priority)
} else {
if shunzi[len(shunzi)-1]+1 == card.Priority {
shunzi = append(shunzi, card.Priority)
}
}
}
tonghua[card.Suit] = true
baozi[card.Priority] = true
if card.Priority >= Priority_SKing {
wangzha[card.Priority] = true
}
}
isShunzi := len(shunzi) == 3
isTonghua := len(tonghua) == 1
isBaozi := len(baozi) == 1
isWangzha := len(wangzha) == 2
isTonghuaShun := isShunzi && isTonghua
if isTonghuaShun {
cardsType = 4
multiple = 4
} else if isWangzha && false {
cardsType = 3
multiple = 2
} else if isShunzi {
cardsType = 2
multiple = 2
} else if isTonghua {
cardsType = 1
multiple = 2
} else if isBaozi {
cardsType = 0
multiple = 2
}
return cardsType, multiple
}
//获取牌的ID列表
func (u *Room) getCardsID(cards []Card) *string {
buff := bytes.Buffer{}
for _, card := range cards {
buff.WriteString(fmt.Sprintf("%d$", card.ID))
}
cardsid := RemoveLastChar(buff)
return cardsid
}
//获取开赛后的状态
func (r *Room) getMatchingStatus() int {
return r.matchingStatus
}
//设置开赛后的状态
func (r *Room) setMatchingStatus(matchingStatus int) {
r.matchingStatus = matchingStatus
}
//获取裁判
func (r *Room) getJudgmentUser() *User {
return r.judgmentUser
}
//设置裁判
func (r *Room) setJudgmentUser(judgmentUser *User) {
r.judgmentUser = judgmentUser
}
//获取房间基数
func (r *Room) getCardinality() int {
return r.cardinality
}
//设置房间基数
func (r *Room) setCardinality(cardinality int) {
r.cardinality = cardinality
}
//获取房间底分
func (r *Room) getBaseScore() int {
return r.baseScore
}
//设置房间底分
func (r *Room) setBaseScore(baseScore int) {
r.baseScore = baseScore
}
/*
推送倍率
push:Multiple_Push,倍数
*/
func (r *Room) pushMultiple() {
multiple := strconv.Itoa(r.getRealityMultiple())
pushMessageToUsers("Multiple_Push", []string{multiple}, r.getUserIDs())
r.pushJudgment("Multiple_Push", multiple)
}
//获取房间倍数
func (r *Room) getMultiple() int {
return r.multiple
}
//设置房间倍数
func (r *Room) setMultiple(multiple int) {
r.multiple = multiple
}
//两倍房间倍数并推送
func (r *Room) doubleMultiple() {
r.setMultiple(r.getMultiple() * 2)
r.pushMultiple()
}
//三倍房间倍数并推送
func (r *Room) tripleMultiple() {
r.setMultiple(r.getMultiple() * 3)
r.pushMultiple()
}
//获取流局倍数
func (r *Room) getLiujuMultiple() int {
return r.liujuMultiple
}
//设置流局倍数
func (r *Room) setLiujuMultiple(liujuMultiple int) {
r.liujuMultiple = liujuMultiple
}
//获取房间真实倍数
func (r *Room) getRealityMultiple() int {
return r.getMultiple() * r.getLiujuMultiple()
}
//更新出牌的轮次
func (r *Room) updatePlayRound() int {
r.playRound += 1
return r.playRound
}
//获取出牌的轮次
func (r *Room) getPlayRound() int {
return r.playRound
}
//设置出牌的轮次
func (r *Room) setPlayRound(playRound int) {
r.playRound = playRound
}
//更新出牌的次数
func (r *Room) updatePlayTime() int {
r.playTime += 1
return r.playTime
}
//获取出牌的次数
func (r *Room) getPlayTime() int {
return r.playTime
}
//获取出牌的次数
func (r *Room) setPlayTime(playTime int) {
r.playTime = playTime
}
//获取剩余大王的数量
func (r *Room) getSurplusBKingCount() int {
return r.surplusBKingCount
}
//设置剩余大王的数量
func (r *Room) setSurplusBKingCount(v int) {
r.surplusBKingCount = v
}
//更新剩余大王的数量
func (r *Room) updateSurplusBKingCount() {
r.surplusBKingCount = r.surplusBKingCount - 1
}
//获取剩余小王的数量
func (r *Room) getSurplusSKingCount() int {
return r.surplusSKingCount
}
//设置剩余小王的数量
func (r *Room) setSurplusSKingCount(v int) {
r.surplusSKingCount = v
}
//更新剩余小王的数量
func (r *Room) updateSurplusSKingCount() {
r.surplusSKingCount = r.surplusSKingCount - 1
}
//获取剩余2的数量
func (r *Room) getSurplusTwoCount() int {
return r.surplusTwoCount
}
//设置剩余2的数量
func (r *Room) setSurplusTwoCount(v int) {
r.surplusTwoCount = v
}
//更新剩余2的数量
func (r *Room) updateSurplusTwoCount() {
r.surplusTwoCount = r.surplusTwoCount - 1
}
//获取设置牌权的命令
func (r *Room) getSetCtlMsg() []string {
return r.setCtlMsg
}
//设置牌权的内容,推送残局时候用
func (r *Room) setSetCtlMsg(setCtlMsg []string) {
r.setCtlMsg = setCtlMsg
}
//获取初始牌数量是否完整
func (r *Room) initCardCountIsIntegrity() bool {
return cardCount == perCapitaCardCount
}
//获取房间人数
func (r *Room) GetPCount() int {
return r.pcount
}
//更新房间人数
func (r *Room) updatePCount(v int) {
r.pcount = r.pcount + v
}
//获取房间观战人数
func (r *Room) GetIdlePCount() int {
return len(r.idleusers)
}
//根据index获取玩家
func (r *Room) getUserByIndex(index int) *User {
return r.users[index]
}
//获取房间入座人数
func (r *Room) getUserCount() int {
count := 0
for _, user := range r.users {
if user != nil {
count += 1
}
}
return count
}
//获取准备中的玩家数量
func (r *Room) getSetoutCount() int {
count := 0
for _, user := range r.users {
if user != nil {
if user.getStatus() == UserStatus_Setout {
count += 1
}
}
}
return count
}
/*
获取玩家UserID字符串集合
in:是否刷新
*/
func (r *Room) getUserIDs(args ...bool) []string {
if len(args) > 0 {
if args[0] {
| r.userids = nil
| identifier_name | |
room.go | 赛制(0常规赛 1加倍赛)
*/
func (r *Room) setCanHandleUser(canHandleUser *User, handleType int) {
r.canHandleUser = canHandleUser
message := fmt.Sprintf("%s,%d,%d,%d", *canHandleUser.getUserID(), handleType, r.getBaseScore(), r.getGameType())
pushMessageToUsers("Handle_Push", []string{message}, r.getUserIDs())
r.pushJudgment("Handle_Push", message)
}
/*
设置当前可操作的玩家并设置倒计时
*/
func (r *Room) setCanHandleUserAndSetCountDown(canHandleUser *User, handleType int) {
canHandleUser.countDown_handle(time.Second * 10)
r.setCanHandleUser(canHandleUser, handleType)
}
//获取可以叫地主的玩家
func (r *Room) getCanCallLandlordUser() *User {
return r.canCallLandlordUser
}
//设置可以叫地主的玩家
func (r *Room) setCanCallLandlordUser(canCallLandlordUser *User) {
r.canCallLandlordUser = canCallLandlordUser
}
//获取地主出牌次数
func (r *Room) getLandlordPlayCardCount() int {
return r.landlordPlayCardCount
}
//设置地主出牌次数
func (r *Room) setLandlordPlayCardCount(count int) {
r.landlordPlayCardCount = count
}
//累加地主出牌次数
func (r *Room) updteLandlordPlayCardCount() {
r.landlordPlayCardCount += 1
}
//获取农民出牌次数
func (r *Room) getFarmerPlayCardCount() int {
return r.farmerPlayCardCount
}
//设置农民出牌次数
func (r *Room) setFarmerPlayCardCount(count int) {
r.farmerPlayCardCount = count
}
//累加农民出牌次数
func (r *Room) updteFarmerPlayCardCount() {
r.farmerPlayCardCount += 1
}
//获取本局任务
func (r *Room) getCouncilTask() *Task {
return r.councilTask
}
//设置本局任务
func (r *Room) setCouncilTask(councilTask *Task) {
r.councilTask = councilTask
}
//获取所有玩家的积分
func (r *Room) getUsersVideoIntegral() []int {
return r.usersVideoIntegral
}
//获取春天的状态
func (r *Room) getSpringStatus() int {
return r.springStatus
}
//设置春天的状态
func (r *Room) setSpringStatus(springStatus int) {
r.springStatus = springStatus
}
//根据userid获取玩家积分
func (r *Room) getUserVideoIntegral(user *User) int {
userIndex := user.getIndex()
return r.getUsersVideoIntegral()[userIndex]
}
//根据userid设置玩家积分
func (r *Room) setUserVideoIntegral(user *User, videoIntegral int) {
userIndex := user.getIndex()
r.getUsersVideoIntegral()[userIndex] = videoIntegral
}
//重开
func (r *Room) reStart() {
r.resetUsers()
r.closeUserCountDown()
r.SetRoomStatus(RoomStatus_Setout)
r.reset()
}
//玩家转变成地主
func (r *Room) userTurnLandlord(user *User) {
logger.Debugf("%s 成为地主", *user.getUID())
user.setLandlord(true)
r.setLandlord(user)
farmers := []*User{}
for _, u := range r.getUsers() {
if u != user {
farmers = append(farmers, u)
}
}
r.setFarmers(farmers)
r.addCardsToLandlord()
r.showBaseCards(nil)
r.openDouble()
}
/*
亮底牌
push:BaseCards_Push,地主userid,cardid$cardid$cardid,底牌类型,底牌倍数,是否加入牌中
*/
func (r *Room) showBaseCards(user *User) {
if r.getLandlord() == nil {
return
}
// r.setBaseCards([]Card{Card{Suit: 1, Priority: 1}, Card{Suit: 1, Priority: 2}, Card{Suit: 1, Priority: 3}})
cards := r.getBaseCards()
cardsType, multiple := r.getBaseCardsInfo()
userids := []string{}
addToCards := 0
if user == nil { //只执行一次(地主出现的时候)
//根据底牌加倍
if multiple > 1 {
r.setMultiple(r.getMultiple() * multiple)
r.pushMultiple()
}
userids = r.getUserIDs()
addToCards = 1
} else { //短线重连进来的
userids = []string{*user.getUserID()}
}
message := fmt.Sprintf("%s,%s,%d,%d,%d", *r.getLandlord().getUserID(), *r.getCardsID(cards), cardsType, multiple, addToCards)
if user == nil {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
r.pushJudgment("BaseCards_Push", message)
} else {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
}
}
//将底牌放入地主牌面中
func (r *Room) addCardsToLandlord() {
cards := r.getBaseCards()
landlord := r.getLandlord()
if landlord != nil {
var tmpCards CardList
tmpCards = landlord.getCards()
tmpCards = append(tmpCards, cards...)
sort.Sort(tmpCards)
for i := 0; i < len(tmpCards); i++ {
tmpCards[i].Index = i
}
landlord.setCards(tmpCards)
}
}
/*
获取牌的类型(-1不是特殊底牌 0豹子 1同花 2顺子 3王炸 4同花顺)
*/
func (r *Room) getBaseCardsInfo() (cardsType int, multiple int) {
cardsType = -1
multiple = 1
var cards CardList = r.getBaseCards()
shunzi := []int{}
tonghua := map[int]bool{}
baozi := map[int]bool{}
wangzha := map[int]bool{}
for _, card := range cards {
if card.Priority < Priority_Two {
if len(shunzi) == 0 {
shunzi = append(shunzi, card.Priority)
} else {
if shunzi[len(shunzi)-1]+1 == card.Priority {
shunzi = append(shunzi, card.Priority)
}
}
}
tonghua[card.Suit] = true
baozi[card.Priority] = true
if card.Priority >= Priority_SKing {
wangzha[card.Priority] = true
}
}
isShunzi := len(shunzi) == 3
isTonghua := len(tonghua) == 1
isBaozi := len(baozi) == 1
isWangzha := len(wangzha) == 2
isTonghuaShun := isShunzi && isTonghua
if isTonghuaShun {
cardsType = 4
multiple = 4
} else if isWangzha && false {
cardsType = 3
multiple = 2
} else if isShunzi {
cardsType = 2
multiple = 2
} else if isTonghua {
cardsType = 1
multiple = 2
} else if isBaozi {
cardsType = 0
multiple = 2
}
return cardsType, multiple
}
//获取牌的ID列表
func (u *Room) getCardsID(cards []Card) *string {
buff := bytes.Buffer{}
for _, card := range cards {
buff.WriteString(fmt.Sprintf("%d$", card.ID))
}
cardsid := RemoveLastChar(buff)
return cardsid
}
//获取开赛后的状态
func (r *Room) getMatchingStatus() int {
return r.matchingStatus
}
//设置开赛后的状态
func (r *Room) setMatchingStatus(matchingStatus int) {
r.matchingStatus = matchingStatus
}
//获取裁判
func (r *Room) getJudgmentUser() *User {
return r.judgmentUser
}
//设置裁判
func (r *Room) setJudgmentUser(judgmentUser *User) {
r.judgmentUser = judgmentUser
}
//获取房间基数
func (r *Room) getCardinality() int {
return r.cardinality
}
//设置房间基数
func (r *Room) setCardinality(cardinality int) {
r.cardinality = cardinality
}
//获取房间底分
func (r *Room) getBaseScore() int {
return r.baseScore
}
//设置房间底分
func (r *Room) setBaseScore(baseScore int) {
r.baseScore = baseScore
}
/*
推送倍率
push:Multiple_Push,倍数
*/
func (r *Room) pushMultiple() {
multiple := strconv.Itoa(r.getRealityMultiple())
pus | hMessageToUsers("Multiple_Push", []str | conditional_block | |
room.go | .setLandlord(nil)
r.setLandlordPlayCardCount(0)
r.setFarmerPlayCardCount(0)
for _, user := range r.getUsers() {
if user != nil {
user.resume()
}
}
r.users_cards = make(map[string]string, pcount)
}
//设置房间的基础信息
func (r *Room) setRoomBaseInfo() {
allRoomData := *r.getAllRoomData()
arrAllRoomData := strings.Split(allRoomData, "|")
for _, roomData := range arrAllRoomData {
arrRoomData_s := strings.Split(roomData, "$")
arrRoomData := StrArrToIntArr(arrRoomData_s)
roomType, _, multiple := arrRoomData[0], arrRoomData[1], arrRoomData[2]
if roomType == r.GetRoomType() {
r.setMultiple(multiple)
break
}
}
}
//是否赛前玩家操作中
func (r *Room) isHandling() bool {
if r.GetRoomStatus() == RoomStatus_Handle {
return true
}
return false
}
//是否正在比赛
func (r *Room) isMatching() bool {
if r.GetRoomStatus() == RoomStatus_Setout {
return false
}
return true
}
//获取游戏规则
func (r *Room) getGameRule() int {
return r.gameRule
}
//设置游戏规则
func (r *Room) setGameRule(gameRule int) {
r.gameRule = gameRule
}
//获取发牌模式
func (r *Room) getDealMode() int {
return r.dealMode
}
//设置发牌模式
func (r *Room) setDealMode(dealMode int) {
r.dealMode = dealMode
}
//获取牌的模式
func (r *Room) GetCardMode() int {
return r.cardMode
}
//设置牌的模式
func (r *Room) SetCardMode(cardMode int) {
r.cardMode = cardMode
}
//获取游戏模式
func (r *Room) getGameType() int {
return r.gameType
}
//设置游戏模式
func (r *Room) setGameType(gameType int) {
r.gameType = gameType
}
//获取底牌
func (r *Room) getBaseCards() []Card {
return r.baseCards
}
//设置底牌
func (r *Room) setBaseCards(baseCards []Card) {
r.baseCards = baseCards
}
//获取地主
func (r *Room) getLandlord() *User {
return r.landlord
}
//设置地主
func (r *Room) setLandlord(landlord *User) {
r.landlord = landlord
}
//获取农民
func (r *Room) getFarmers() []*User {
return r.farmers
}
//设置农民
func (r *Room) setFarmers(users []*User) {
r.farmers = users
}
//获取当前可操作的玩家
func (r *Room) getCanHandleUser() *User {
return r.canHandleUser
}
/*
设置当前可操作的玩家
push:Handle_Push,userid,操作类型,当前底分,赛制
des:操作类型(0叫地主 1抢地主)
赛制(0常规赛 1加倍赛)
*/
func (r *Room) setCanHandleUser(canHandleUser *User, handleType int) {
r.canHandleUser = canHandleUser
message := fmt.Sprintf("%s,%d,%d,%d", *canHandleUser.getUserID(), handleType, r.getBaseScore(), r.getGameType())
pushMessageToUsers("Handle_Push", []string{message}, r.getUserIDs())
r.pushJudgment("Handle_Push", message)
}
/*
设置当前可操作的玩家并设置倒计时
*/
func (r *Room) setCanHandleUserAndSetCountDown(canHandleUser *User, handleType int) {
canHandleUser.countDown_handle(time.Second * 10)
r.setCanHandleUser(canHandleUser, handleType)
}
//获取可以叫地主的玩家
func (r *Room) getCanCallLandlordUser() *User {
return r.canCallLandlordUser
}
//设置可以叫地主的玩家
func (r *Room) setCanCallLandlordUser(canCallLandlordUser *User) {
r.canCallLandlordUser = canCallLandlordUser
}
//获取地主出牌次数
func (r *Room) getLandlordPlayCardCount() int {
return r.landlordPlayCardCount
}
//设置地主出牌次数
func (r *Room) setLandlordPlayCardCount(count int) {
r.landlordPlayCardCount = count
}
//累加地主出牌次数
func (r *Room) updteLandlordPlayCardCount() {
r.landlordPlayCardCount += 1
}
//获取农民出牌次数
func (r *Room) getFarmerPlayCardCount() int {
return r.farmerPlayCardCount
}
//设置农民出牌次数
func (r *Room) setFarmerPlayCardCount(count int) {
r.farmerPlayCardCount = count
}
//累加农民出牌次数
func (r *Room) updteFarmerPlayCardCount() {
r.farmerPlayCardCount += 1
}
//获取本局任务
func (r *Room) getCouncilTask() *Task {
return r.councilTask
}
//设置本局任务
func (r *Room) setCouncilTask(councilTask *Task) {
r.councilTask = councilTask
}
//获取所有玩家的积分
func (r *Room) getUsersVideoIntegral() []int {
return r.usersVideoIntegral
}
//获取春天的状态
func (r *Room) getSpringStatus() int {
return r.springStatus
}
//设置春天的状态
func (r *Room) setSpringStatus(springStatus int) {
r.springStatus = springStatus
}
//根据userid获取玩家积分
func (r *Room) getUserVideoIntegral(user *User) int {
userIndex := user.getIndex()
return r.getUsersVideoIntegral()[userIndex]
}
//根据userid设置玩家积分
func (r *Room) setUserVideoIntegral(user *User, videoIntegral int) {
userIndex := user.getIndex()
r.getUsersVideoIntegral()[userIndex] = videoIntegral
}
//重开
func (r *Room) reStart() {
r.resetUsers()
r.closeUserCountDown()
r.SetRoomStatus(RoomStatus_Setout)
r.reset()
}
//玩家转变成地主
func (r *Room) userTurnLandlord(user *User) {
logger.Debugf("%s 成为地主", *user.getUID())
user.setLandlord(true)
r.setLandlord(user)
farmers := []*User{}
for _, u := range r.getUsers() {
if u != user {
farmers = append(farmers, u)
}
}
r.setFarmers(farmers)
r.addCardsToLandlord()
r.showBaseCards(nil)
r.openDouble()
}
/*
亮底牌
push:BaseCards_Push,地主userid,cardid$cardid$cardid,底牌类型,底牌倍数,是否加入牌中
*/
func (r *Room) showBaseCards(user *User) {
if r.getLandlord() == nil {
return
}
// r.setBaseCards([]Card{Card{Suit: 1, Priority: 1}, Card{Suit: 1, Priority: 2}, Card{Suit: 1, Priority: 3}})
cards := r.getBaseCards()
cardsType, multiple := r.getBaseCardsInfo()
userids := []string{}
addToCards := 0
if user == nil { //只执行一次(地主出现的时候)
//根据底牌加倍
if multiple > 1 {
r.setMultiple(r.getMultiple() * multiple)
r.pushMultiple()
}
userids = r.getUserIDs()
addToCards = 1
} else { //短线重连进来的
userids = []string{*user.getUserID()}
}
message := fmt.Sprintf("%s,%s,%d,%d,%d", *r.getLandlord().getUserID(), *r.getCardsID(cards), cardsType, multiple, addToCards)
if user == nil {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
r.pushJudgment("BaseCards_Push", message)
} else {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
}
}
//将底牌放入地主牌面中
func (r *Room) addCardsToLandlord() {
cards := r.getBaseCards()
landlord := r.getLandlord()
if landlord != nil {
var tmpCards CardList
tmpCards = landlord.getCards()
tmpCards = append(tmpCards, cards...)
sort.Sort(tmpCards)
for i := 0; i < len(tmpCards); i++ {
tmpCards[i].Index = i
} |
/*
获取牌的类型(-1不是特殊底牌 0豹子 1同花 2顺子 3王炸 4同花顺)
*/
func (r *Room) getBaseCardsInfo() (cardsType int, multiple int) {
cardsType = -1
multiple = 1
var | landlord.setCards(tmpCards)
}
} | random_line_split |
workload_placement_nodelabel.go | , err := makePaddingPod(fxt.Namespace.Name, podName, zone, zoneRes)
Expect(err).NotTo(HaveOccurred(), "unable to create padding pod %q on zone %q", podName, zone.Name)
padPod, err = pinPodTo(padPod, nodeName, zone.Name)
Expect(err).NotTo(HaveOccurred(), "unable to pin pod %q to zone %q", podName, zone.Name)
err = fxt.Client.Create(context.TODO(), padPod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q on zone %q", podName, zone.Name)
paddingPods = append(paddingPods, padPod)
}
}
By("Waiting for padding pods to be ready")
failedPodIds := e2ewait.ForPaddingPodsRunning(fxt, paddingPods)
Expect(failedPodIds).To(BeEmpty(), "some padding pods have failed to run")
var err error
targetNodeNRTInitial, err = e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
})
It("[test_id:47598][tier2] should place the pod in the node with available resources in one NUMA zone and fulfilling node selector", func() {
By(fmt.Sprintf("Labeling nodes %q and %q with label %q:%q", targetNodeName, alternativeNodeName, labelName, labelValueMedium))
unlabelTarget, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
defer func() {
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
}()
unlabelAlternative, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
defer func() {
err := unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}()
By("Scheduling the testing pod")
pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod")
pod.Spec.SchedulerName = serialconfig.Config.SchedulerName
pod.Spec.Containers[0].Resources.Limits = requiredRes
pod.Spec.NodeSelector = map[string]string{
labelName: labelValueMedium,
}
err = fxt.Client.Create(context.TODO(), pod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q", pod.Name)
By("waiting for pod to be running")
updatedPod, err := e2ewait.ForPodPhase(fxt.Client, pod.Namespace, pod.Name, corev1.PodRunning, 1*time.Minute)
if err != nil {
_ = objects.LogEventsForPod(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name)
}
Expect(err).NotTo(HaveOccurred())
By("checking the pod has been scheduled in the proper node")
Expect(updatedPod.Spec.NodeName).To(Equal(targetNodeName))
By(fmt.Sprintf("checking the pod was scheduled with the topology aware scheduler %q", serialconfig.Config.SchedulerName))
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
})
Context("label two nodes with different label values but both matching the node affinity of the deployment pod of the test", func() {
var unlabelTarget, unlabelAlternative func() error
nodesUnlabeled := false
BeforeEach(func() {
By(fmt.Sprintf("Labeling target node %q with label %q:%q and the alternative node %q with label %q:%q", targetNodeName, labelName, labelValueLarge, alternativeNodeName, labelName, labelValueMedium))
var err error
unlabelTarget, err = labelNodeWithValue(fxt.Client, labelName, labelValueLarge, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
unlabelAlternative, err = labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
})
AfterEach(func() {
if !nodesUnlabeled {
/*if we are here this means one of these:
1. the test failed before getting to the step where it removes the labels
2. the test failed to remove the labels during the test's check so try again here
Note that unlabeling an already unlabeled node will not result in an error,
so this condition is only to avoid extra minor operations
*/
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}
})
DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources",
func(getNodeAffFunc getNodeAffinityFunc) {
affinity := getNodeAffFunc(labelName, []string{labelValueLarge, labelValueMedium}, corev1.NodeSelectorOpIn)
By(fmt.Sprintf("create a deployment with one guaranteed pod with node affinity property: %+v ", affinity.NodeAffinity))
deploymentName := "test-dp"
var replicas int32 = 1
podLabels := map[string]string{
"test": "test-dp",
}
deployment := objects.NewTestDeployment(replicas, podLabels, nil, fxt.Namespace.Name, deploymentName, objects.PauseImage, []string{objects.PauseCommand}, []string{})
deployment.Spec.Template.Spec.SchedulerName = serialconfig.Config.SchedulerName
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = requiredRes
deployment.Spec.Template.Spec.Affinity = affinity
klog.Infof("create the test deployment with requests %s", e2ereslist.ToString(requiredRes))
err := fxt.Client.Create(context.TODO(), deployment)
Expect(err).NotTo(HaveOccurred(), "unable to create deployment %q", deployment.Name)
By("waiting for deployment to be up & running")
dpRunningTimeout := 1 * time.Minute
dpRunningPollInterval := 10 * time.Second
err = e2ewait.ForDeploymentComplete(fxt.Client, deployment, dpRunningPollInterval, dpRunningTimeout)
Expect(err).NotTo(HaveOccurred(), "Deployment %q not up & running after %v", deployment.Name, dpRunningTimeout)
By(fmt.Sprintf("checking deployment pods have been scheduled with the topology aware scheduler %q and in the proper node %q", serialconfig.Config.SchedulerName, targetNodeName))
pods, err := schedutils.ListPodsByDeployment(fxt.Client, *deployment)
Expect(err).NotTo(HaveOccurred(), "Unable to get pods from Deployment %q: %v", deployment.Name, err)
for _, pod := range pods {
Expect(pod.Spec.NodeName).To(Equal(targetNodeName), "pod %s/%s is scheduled on node %q but expected to be on the target node %q", pod.Namespace, pod.Name, targetNodeName)
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
}
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
By("unlabel nodes during execution and check that the test's pod was not evicted due to shaked matching criteria") | nodesUnlabeled = true | random_line_split | |
workload_placement_nodelabel.go | map[string]string{
labelName: labelValueMedium,
}
err = fxt.Client.Create(context.TODO(), pod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q", pod.Name)
By("waiting for pod to be running")
updatedPod, err := e2ewait.ForPodPhase(fxt.Client, pod.Namespace, pod.Name, corev1.PodRunning, 1*time.Minute)
if err != nil {
_ = objects.LogEventsForPod(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name)
}
Expect(err).NotTo(HaveOccurred())
By("checking the pod has been scheduled in the proper node")
Expect(updatedPod.Spec.NodeName).To(Equal(targetNodeName))
By(fmt.Sprintf("checking the pod was scheduled with the topology aware scheduler %q", serialconfig.Config.SchedulerName))
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
})
Context("label two nodes with different label values but both matching the node affinity of the deployment pod of the test", func() {
var unlabelTarget, unlabelAlternative func() error
nodesUnlabeled := false
BeforeEach(func() {
By(fmt.Sprintf("Labeling target node %q with label %q:%q and the alternative node %q with label %q:%q", targetNodeName, labelName, labelValueLarge, alternativeNodeName, labelName, labelValueMedium))
var err error
unlabelTarget, err = labelNodeWithValue(fxt.Client, labelName, labelValueLarge, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
unlabelAlternative, err = labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
})
AfterEach(func() {
if !nodesUnlabeled {
/*if we are here this means one of these:
1. the test failed before getting to the step where it removes the labels
2. the test failed to remove the labels during the test's check so try again here
Note that unlabeling an already unlabeled node will not result in an error,
so this condition is only to avoid extra minor operations
*/
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}
})
DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources",
func(getNodeAffFunc getNodeAffinityFunc) {
affinity := getNodeAffFunc(labelName, []string{labelValueLarge, labelValueMedium}, corev1.NodeSelectorOpIn)
By(fmt.Sprintf("create a deployment with one guaranteed pod with node affinity property: %+v ", affinity.NodeAffinity))
deploymentName := "test-dp"
var replicas int32 = 1
podLabels := map[string]string{
"test": "test-dp",
}
deployment := objects.NewTestDeployment(replicas, podLabels, nil, fxt.Namespace.Name, deploymentName, objects.PauseImage, []string{objects.PauseCommand}, []string{})
deployment.Spec.Template.Spec.SchedulerName = serialconfig.Config.SchedulerName
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = requiredRes
deployment.Spec.Template.Spec.Affinity = affinity
klog.Infof("create the test deployment with requests %s", e2ereslist.ToString(requiredRes))
err := fxt.Client.Create(context.TODO(), deployment)
Expect(err).NotTo(HaveOccurred(), "unable to create deployment %q", deployment.Name)
By("waiting for deployment to be up & running")
dpRunningTimeout := 1 * time.Minute
dpRunningPollInterval := 10 * time.Second
err = e2ewait.ForDeploymentComplete(fxt.Client, deployment, dpRunningPollInterval, dpRunningTimeout)
Expect(err).NotTo(HaveOccurred(), "Deployment %q not up & running after %v", deployment.Name, dpRunningTimeout)
By(fmt.Sprintf("checking deployment pods have been scheduled with the topology aware scheduler %q and in the proper node %q", serialconfig.Config.SchedulerName, targetNodeName))
pods, err := schedutils.ListPodsByDeployment(fxt.Client, *deployment)
Expect(err).NotTo(HaveOccurred(), "Unable to get pods from Deployment %q: %v", deployment.Name, err)
for _, pod := range pods {
Expect(pod.Spec.NodeName).To(Equal(targetNodeName), "pod %s/%s is scheduled on node %q but expected to be on the target node %q", pod.Namespace, pod.Name, targetNodeName)
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
}
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
By("unlabel nodes during execution and check that the test's pod was not evicted due to shaked matching criteria")
nodesUnlabeled = true
err = unlabelTarget()
//if at least on of the unlabling failed, set nodesUnlabeled to false to try again in afterEach
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
//check that it didn't stop running for some time
By(fmt.Sprintf("ensuring the deployment %q keep being ready", deployment.Name))
Eventually(func() bool {
updatedDp := &appsv1.Deployment{}
err := fxt.Client.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDp)
Expect(err).ToNot(HaveOccurred())
return e2ewait.IsDeploymentComplete(deployment, &updatedDp.Status)
}, time.Second*30, time.Second*5).Should(BeTrue(), "deployment %q became unready", deployment.Name)
},
Entry("[test_id:47597] should be able to schedule pod with affinity property requiredDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution),
Entry("[test_id:49843] should be able to schedule pod with affinity property prefferdDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution),
)
})
})
})
func createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{*nodeSelTerm},
},
},
}
return aff
}
func | createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution | identifier_name | |
workload_placement_nodelabel.go | ].Resources.Limits = requiredRes
pod.Spec.NodeSelector = map[string]string{
labelName: labelValueMedium,
}
err = fxt.Client.Create(context.TODO(), pod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q", pod.Name)
By("waiting for pod to be running")
updatedPod, err := e2ewait.ForPodPhase(fxt.Client, pod.Namespace, pod.Name, corev1.PodRunning, 1*time.Minute)
if err != nil {
_ = objects.LogEventsForPod(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name)
}
Expect(err).NotTo(HaveOccurred())
By("checking the pod has been scheduled in the proper node")
Expect(updatedPod.Spec.NodeName).To(Equal(targetNodeName))
By(fmt.Sprintf("checking the pod was scheduled with the topology aware scheduler %q", serialconfig.Config.SchedulerName))
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
})
Context("label two nodes with different label values but both matching the node affinity of the deployment pod of the test", func() {
var unlabelTarget, unlabelAlternative func() error
nodesUnlabeled := false
BeforeEach(func() {
By(fmt.Sprintf("Labeling target node %q with label %q:%q and the alternative node %q with label %q:%q", targetNodeName, labelName, labelValueLarge, alternativeNodeName, labelName, labelValueMedium))
var err error
unlabelTarget, err = labelNodeWithValue(fxt.Client, labelName, labelValueLarge, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
unlabelAlternative, err = labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
})
AfterEach(func() {
if !nodesUnlabeled {
/*if we are here this means one of these:
1. the test failed before getting to the step where it removes the labels
2. the test failed to remove the labels during the test's check so try again here
Note that unlabeling an already unlabeled node will not result in an error,
so this condition is only to avoid extra minor operations
*/
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}
})
DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources",
func(getNodeAffFunc getNodeAffinityFunc) {
affinity := getNodeAffFunc(labelName, []string{labelValueLarge, labelValueMedium}, corev1.NodeSelectorOpIn)
By(fmt.Sprintf("create a deployment with one guaranteed pod with node affinity property: %+v ", affinity.NodeAffinity))
deploymentName := "test-dp"
var replicas int32 = 1
podLabels := map[string]string{
"test": "test-dp",
}
deployment := objects.NewTestDeployment(replicas, podLabels, nil, fxt.Namespace.Name, deploymentName, objects.PauseImage, []string{objects.PauseCommand}, []string{})
deployment.Spec.Template.Spec.SchedulerName = serialconfig.Config.SchedulerName
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = requiredRes
deployment.Spec.Template.Spec.Affinity = affinity
klog.Infof("create the test deployment with requests %s", e2ereslist.ToString(requiredRes))
err := fxt.Client.Create(context.TODO(), deployment)
Expect(err).NotTo(HaveOccurred(), "unable to create deployment %q", deployment.Name)
By("waiting for deployment to be up & running")
dpRunningTimeout := 1 * time.Minute
dpRunningPollInterval := 10 * time.Second
err = e2ewait.ForDeploymentComplete(fxt.Client, deployment, dpRunningPollInterval, dpRunningTimeout)
Expect(err).NotTo(HaveOccurred(), "Deployment %q not up & running after %v", deployment.Name, dpRunningTimeout)
By(fmt.Sprintf("checking deployment pods have been scheduled with the topology aware scheduler %q and in the proper node %q", serialconfig.Config.SchedulerName, targetNodeName))
pods, err := schedutils.ListPodsByDeployment(fxt.Client, *deployment)
Expect(err).NotTo(HaveOccurred(), "Unable to get pods from Deployment %q: %v", deployment.Name, err)
for _, pod := range pods {
Expect(pod.Spec.NodeName).To(Equal(targetNodeName), "pod %s/%s is scheduled on node %q but expected to be on the target node %q", pod.Namespace, pod.Name, targetNodeName)
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
}
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
By("unlabel nodes during execution and check that the test's pod was not evicted due to shaked matching criteria")
nodesUnlabeled = true
err = unlabelTarget()
//if at least on of the unlabling failed, set nodesUnlabeled to false to try again in afterEach
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
//check that it didn't stop running for some time
By(fmt.Sprintf("ensuring the deployment %q keep being ready", deployment.Name))
Eventually(func() bool {
updatedDp := &appsv1.Deployment{}
err := fxt.Client.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDp)
Expect(err).ToNot(HaveOccurred())
return e2ewait.IsDeploymentComplete(deployment, &updatedDp.Status)
}, time.Second*30, time.Second*5).Should(BeTrue(), "deployment %q became unready", deployment.Name)
},
Entry("[test_id:47597] should be able to schedule pod with affinity property requiredDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution),
Entry("[test_id:49843] should be able to schedule pod with affinity property prefferdDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution),
)
})
})
})
func createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity | {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{*nodeSelTerm},
},
},
}
return aff | identifier_body | |
workload_placement_nodelabel.go | , zone.Name)
paddingPods = append(paddingPods, padPod)
}
}
By("Waiting for padding pods to be ready")
failedPodIds := e2ewait.ForPaddingPodsRunning(fxt, paddingPods)
Expect(failedPodIds).To(BeEmpty(), "some padding pods have failed to run")
var err error
targetNodeNRTInitial, err = e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
})
It("[test_id:47598][tier2] should place the pod in the node with available resources in one NUMA zone and fulfilling node selector", func() {
By(fmt.Sprintf("Labeling nodes %q and %q with label %q:%q", targetNodeName, alternativeNodeName, labelName, labelValueMedium))
unlabelTarget, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
defer func() {
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
}()
unlabelAlternative, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
defer func() {
err := unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}()
By("Scheduling the testing pod")
pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod")
pod.Spec.SchedulerName = serialconfig.Config.SchedulerName
pod.Spec.Containers[0].Resources.Limits = requiredRes
pod.Spec.NodeSelector = map[string]string{
labelName: labelValueMedium,
}
err = fxt.Client.Create(context.TODO(), pod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q", pod.Name)
By("waiting for pod to be running")
updatedPod, err := e2ewait.ForPodPhase(fxt.Client, pod.Namespace, pod.Name, corev1.PodRunning, 1*time.Minute)
if err != nil {
_ = objects.LogEventsForPod(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name)
}
Expect(err).NotTo(HaveOccurred())
By("checking the pod has been scheduled in the proper node")
Expect(updatedPod.Spec.NodeName).To(Equal(targetNodeName))
By(fmt.Sprintf("checking the pod was scheduled with the topology aware scheduler %q", serialconfig.Config.SchedulerName))
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
})
Context("label two nodes with different label values but both matching the node affinity of the deployment pod of the test", func() {
var unlabelTarget, unlabelAlternative func() error
nodesUnlabeled := false
BeforeEach(func() {
By(fmt.Sprintf("Labeling target node %q with label %q:%q and the alternative node %q with label %q:%q", targetNodeName, labelName, labelValueLarge, alternativeNodeName, labelName, labelValueMedium))
var err error
unlabelTarget, err = labelNodeWithValue(fxt.Client, labelName, labelValueLarge, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
unlabelAlternative, err = labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
})
AfterEach(func() {
if !nodesUnlabeled {
/*if we are here this means one of these:
1. the test failed before getting to the step where it removes the labels
2. the test failed to remove the labels during the test's check so try again here
Note that unlabeling an already unlabeled node will not result in an error,
so this condition is only to avoid extra minor operations
*/
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}
})
DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources",
func(getNodeAffFunc getNodeAffinityFunc) {
affinity := getNodeAffFunc(labelName, []string{labelValueLarge, labelValueMedium}, corev1.NodeSelectorOpIn)
By(fmt.Sprintf("create a deployment with one guaranteed pod with node affinity property: %+v ", affinity.NodeAffinity))
deploymentName := "test-dp"
var replicas int32 = 1
podLabels := map[string]string{
"test": "test-dp",
}
deployment := objects.NewTestDeployment(replicas, podLabels, nil, fxt.Namespace.Name, deploymentName, objects.PauseImage, []string{objects.PauseCommand}, []string{})
deployment.Spec.Template.Spec.SchedulerName = serialconfig.Config.SchedulerName
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = requiredRes
deployment.Spec.Template.Spec.Affinity = affinity
klog.Infof("create the test deployment with requests %s", e2ereslist.ToString(requiredRes))
err := fxt.Client.Create(context.TODO(), deployment)
Expect(err).NotTo(HaveOccurred(), "unable to create deployment %q", deployment.Name)
By("waiting for deployment to be up & running")
dpRunningTimeout := 1 * time.Minute
dpRunningPollInterval := 10 * time.Second
err = e2ewait.ForDeploymentComplete(fxt.Client, deployment, dpRunningPollInterval, dpRunningTimeout)
Expect(err).NotTo(HaveOccurred(), "Deployment %q not up & running after %v", deployment.Name, dpRunningTimeout)
By(fmt.Sprintf("checking deployment pods have been scheduled with the topology aware scheduler %q and in the proper node %q", serialconfig.Config.SchedulerName, targetNodeName))
pods, err := schedutils.ListPodsByDeployment(fxt.Client, *deployment)
Expect(err).NotTo(HaveOccurred(), "Unable to get pods from Deployment %q: %v", deployment.Name, err)
for _, pod := range pods {
Expect(pod.Spec.NodeName).To(Equal(targetNodeName), "pod %s/%s is scheduled on node %q but expected to be on the target node %q", pod.Namespace, pod.Name, targetNodeName)
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
}
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
By("unlabel nodes during execution and check that the test's pod was not evicted due to shaked matching criteria")
nodesUnlabeled = true
err = unlabelTarget()
//if at least on of the unlabling failed, set nodesUnlabeled to false to try again in afterEach
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil | {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
} | conditional_block | |
pix2pix_GAN.py | leaky relu activation
g = LeakyReLU(alpha=0.2)(g)
return g
# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# add batch normalization
g = BatchNormalization()(g, training=True)
# conditionally add dropout
if dropout:
g = Dropout(0.5)(g, training=True)
# merge with skip connection
g = Concatenate()([g, skip_in])
# relu activation
g = Activation('relu')(g)
return g
# define the standalone generator model
def | (image_shape=(128,128,4)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model: C64-C128-C256-C512-C512-C512-C512-C512
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
# e7 = define_encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e6)
b = Activation('relu')(b)
# decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
# d1 = decoder_block(b, e7, 512)
d2 = decoder_block(b, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False)
d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# output
g = Conv2DTranspose(4, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = Activation('tanh')(g)
# define model
model = Model(in_image, out_image)
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model, image_shape, learning_rate_generator = 0.0002):
# make weights in the discriminator not trainable
d_model.trainable = False
# define the source image
in_src = Input(shape=image_shape)
# connect the source image to the generator input. The input to the generator are
# images with only obstacles
gen_out = g_model(in_src)
# connect the source input and generator output to the discriminator input
dis_out = d_model([in_src, gen_out])
# src image as input, generated image and classification output
model = Model(in_src, [dis_out, gen_out])
# compile model
opt = Adam(lr=learning_rate_generator, beta_1=0.5)
model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1,100])
return model
# select a batch of random samples, returns images and target
def generate_real_samples(dataset, n_samples, patch_shape):
# unpack dataset
image_obsta, image_paths_n_obsta = dataset
# choose random instances
indices = list(range(0,image_obsta.shape[0]))
random.shuffle(indices)
ix = indices[0:n_samples]
# retrieve selected images
X1, X2 = image_obsta[ix], image_paths_n_obsta[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return [X1, X2], y
# generate a batch of images, returns images and targets
def generate_fake_samples(g_model, samples, patch_shape):
# generate fake instance
X = g_model.predict(samples)
# create 'fake' class labels (0)
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y
# # extracts path images. Its given a batch of color images with obstacles and paths
# # implement a thresholding method to extract only the path i.e. remove the obstacles
# def extract_path_image(imgs, im_size = 128):
# size = imgs.shape[0]
# print("size is : ", size)
# for i in range(size):
# im = imgs[i]
# for j in range(im_size):
# for k in range(im_size):
# pixel = im[j][k]
# # remove the obstacles
# if(pixel[1]>80 and pixel[0]<40 and pixel[2]<40):
# im[j][k] = [0,0,0,255]
# return imgs
# input is a set of color images with obstacles and paths. It removed the paths and outputs the set of images with
# only the obstacles
def remove_paths(imgs, im_size = 128):
size = imgs.shape[0]
for i in range(size):
im = imgs[i]
for j in range(im_size):
for k in range(im_size):
pixel = im[j][k]
# remove the white paths
if((abs((pixel[0]-pixel[1])/2)<10 and abs((pixel[1]-pixel[2])/2)<10) or (pixel[0]>=100 and pixel[1]>=100 and pixel[2]>=100)):
im[j][k] = [0,0,0,255] # convert white pixels to black
# remove the blue paths
elif((pixel[2]>=80 and pixel[0]<pixel[2]-20 and pixel[1]<pixel[2]-20 and pixel[0]<80 and pixel[1]<80) or (pixel[0]<40 and pixel[1]<40 and pixel[2]<80)):
im[j][k] = [0,0,0,255]
return imgs
# train pix2pix models
def train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1, n_patch=8):
# calculate the number of batches per training epoch
trainA, trainB = dataset
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
generator_loss = []
discriminator_loss = []
discriminator_loss_real = []
discriminator_loss_fake = []
for i in range(n_steps):
# select a batch of real samples
[real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real = generate_real_samples(dataset, n_batch, n_patch)
# generate a batch of fake samples
fake_image_paths_n_obsta, label_fake = generate_fake_samples(g_model, real_image_obsta_batch, n_patch)
# update discriminator for real samples
d_loss1 = d_model.train_on_batch([real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real)
# update discriminator for generated samples
d_loss2 = d_model.train_on_batch([real_image_obsta_batch, fake_image_paths_n_obsta], label_fake)
# update the generator
g_loss, _, _ = gan_model.train_on_batch(real_image_obsta_batch, [label_real, real_image_paths_n_obsta_batch])
# store the images that the generator generates after each epoch
if(i % bat_per_epo == 0):
[real_image_obsta_sample, real_image_paths_n_obsta_sample], label_real = generate_real_samples(dataset, 1, n_patch)
generated_image = g_model.predict(real_image_obsta_sample)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_paths_n_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1]. | define_generator | identifier_name |
pix2pix_GAN.py | # leaky relu activation
g = LeakyReLU(alpha=0.2)(g)
return g
# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# add batch normalization
g = BatchNormalization()(g, training=True)
# conditionally add dropout
if dropout:
g = Dropout(0.5)(g, training=True)
# merge with skip connection
g = Concatenate()([g, skip_in])
# relu activation
g = Activation('relu')(g)
return g
# define the standalone generator model
def define_generator(image_shape=(128,128,4)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model: C64-C128-C256-C512-C512-C512-C512-C512
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
# e7 = define_encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e6)
b = Activation('relu')(b) | d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# output
g = Conv2DTranspose(4, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = Activation('tanh')(g)
# define model
model = Model(in_image, out_image)
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model, image_shape, learning_rate_generator = 0.0002):
# make weights in the discriminator not trainable
d_model.trainable = False
# define the source image
in_src = Input(shape=image_shape)
# connect the source image to the generator input. The input to the generator are
# images with only obstacles
gen_out = g_model(in_src)
# connect the source input and generator output to the discriminator input
dis_out = d_model([in_src, gen_out])
# src image as input, generated image and classification output
model = Model(in_src, [dis_out, gen_out])
# compile model
opt = Adam(lr=learning_rate_generator, beta_1=0.5)
model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1,100])
return model
# select a batch of random samples, returns images and target
def generate_real_samples(dataset, n_samples, patch_shape):
# unpack dataset
image_obsta, image_paths_n_obsta = dataset
# choose random instances
indices = list(range(0,image_obsta.shape[0]))
random.shuffle(indices)
ix = indices[0:n_samples]
# retrieve selected images
X1, X2 = image_obsta[ix], image_paths_n_obsta[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return [X1, X2], y
# generate a batch of images, returns images and targets
def generate_fake_samples(g_model, samples, patch_shape):
# generate fake instance
X = g_model.predict(samples)
# create 'fake' class labels (0)
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y
# # extracts path images. Its given a batch of color images with obstacles and paths
# # implement a thresholding method to extract only the path i.e. remove the obstacles
# def extract_path_image(imgs, im_size = 128):
# size = imgs.shape[0]
# print("size is : ", size)
# for i in range(size):
# im = imgs[i]
# for j in range(im_size):
# for k in range(im_size):
# pixel = im[j][k]
# # remove the obstacles
# if(pixel[1]>80 and pixel[0]<40 and pixel[2]<40):
# im[j][k] = [0,0,0,255]
# return imgs
# input is a set of color images with obstacles and paths. It removed the paths and outputs the set of images with
# only the obstacles
def remove_paths(imgs, im_size = 128):
size = imgs.shape[0]
for i in range(size):
im = imgs[i]
for j in range(im_size):
for k in range(im_size):
pixel = im[j][k]
# remove the white paths
if((abs((pixel[0]-pixel[1])/2)<10 and abs((pixel[1]-pixel[2])/2)<10) or (pixel[0]>=100 and pixel[1]>=100 and pixel[2]>=100)):
im[j][k] = [0,0,0,255] # convert white pixels to black
# remove the blue paths
elif((pixel[2]>=80 and pixel[0]<pixel[2]-20 and pixel[1]<pixel[2]-20 and pixel[0]<80 and pixel[1]<80) or (pixel[0]<40 and pixel[1]<40 and pixel[2]<80)):
im[j][k] = [0,0,0,255]
return imgs
# train pix2pix models
def train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1, n_patch=8):
# calculate the number of batches per training epoch
trainA, trainB = dataset
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
generator_loss = []
discriminator_loss = []
discriminator_loss_real = []
discriminator_loss_fake = []
for i in range(n_steps):
# select a batch of real samples
[real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real = generate_real_samples(dataset, n_batch, n_patch)
# generate a batch of fake samples
fake_image_paths_n_obsta, label_fake = generate_fake_samples(g_model, real_image_obsta_batch, n_patch)
# update discriminator for real samples
d_loss1 = d_model.train_on_batch([real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real)
# update discriminator for generated samples
d_loss2 = d_model.train_on_batch([real_image_obsta_batch, fake_image_paths_n_obsta], label_fake)
# update the generator
g_loss, _, _ = gan_model.train_on_batch(real_image_obsta_batch, [label_real, real_image_paths_n_obsta_batch])
# store the images that the generator generates after each epoch
if(i % bat_per_epo == 0):
[real_image_obsta_sample, real_image_paths_n_obsta_sample], label_real = generate_real_samples(dataset, 1, n_patch)
generated_image = g_model.predict(real_image_obsta_sample)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_paths_n_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1]. | # decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
# d1 = decoder_block(b, e7, 512)
d2 = decoder_block(b, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False) | random_line_split |
pix2pix_GAN.py | 8):
# size = imgs.shape[0]
# print("size is : ", size)
# for i in range(size):
# im = imgs[i]
# for j in range(im_size):
# for k in range(im_size):
# pixel = im[j][k]
# # remove the obstacles
# if(pixel[1]>80 and pixel[0]<40 and pixel[2]<40):
# im[j][k] = [0,0,0,255]
# return imgs
# input is a set of color images with obstacles and paths. It removed the paths and outputs the set of images with
# only the obstacles
def remove_paths(imgs, im_size = 128):
size = imgs.shape[0]
for i in range(size):
im = imgs[i]
for j in range(im_size):
for k in range(im_size):
pixel = im[j][k]
# remove the white paths
if((abs((pixel[0]-pixel[1])/2)<10 and abs((pixel[1]-pixel[2])/2)<10) or (pixel[0]>=100 and pixel[1]>=100 and pixel[2]>=100)):
im[j][k] = [0,0,0,255] # convert white pixels to black
# remove the blue paths
elif((pixel[2]>=80 and pixel[0]<pixel[2]-20 and pixel[1]<pixel[2]-20 and pixel[0]<80 and pixel[1]<80) or (pixel[0]<40 and pixel[1]<40 and pixel[2]<80)):
im[j][k] = [0,0,0,255]
return imgs
# train pix2pix models
def train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1, n_patch=8):
# calculate the number of batches per training epoch
trainA, trainB = dataset
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
generator_loss = []
discriminator_loss = []
discriminator_loss_real = []
discriminator_loss_fake = []
for i in range(n_steps):
# select a batch of real samples
[real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real = generate_real_samples(dataset, n_batch, n_patch)
# generate a batch of fake samples
fake_image_paths_n_obsta, label_fake = generate_fake_samples(g_model, real_image_obsta_batch, n_patch)
# update discriminator for real samples
d_loss1 = d_model.train_on_batch([real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real)
# update discriminator for generated samples
d_loss2 = d_model.train_on_batch([real_image_obsta_batch, fake_image_paths_n_obsta], label_fake)
# update the generator
g_loss, _, _ = gan_model.train_on_batch(real_image_obsta_batch, [label_real, real_image_paths_n_obsta_batch])
# store the images that the generator generates after each epoch
if(i % bat_per_epo == 0):
[real_image_obsta_sample, real_image_paths_n_obsta_sample], label_real = generate_real_samples(dataset, 1, n_patch)
generated_image = g_model.predict(real_image_obsta_sample)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_paths_n_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_paths.pdf")
fig2 = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig2.add_gridspec(1,2)
ax[0] = fig2.add_subplot(gs[0, 0])
ax[1] = fig2.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_obst.pdf")
discriminator_loss_real.append(d_loss1)
discriminator_loss_fake.append(d_loss2)
generator_loss.append(g_loss)
discriminator_loss.append(d_loss1+d_loss2)
print(i)
# save the plots for loss etc
x = np.linspace(0, n_steps, n_steps)
plt.figure()
plt.plot(x, discriminator_loss, color = 'blue')
plt.ylabel('Discriminator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator','generator'))
plt.savefig(save_path+'/loss_discriminator.pdf')
plt.figure()
plt.plot(x, generator_loss, color = 'orange')
plt.ylabel('Generator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator loss for fake images','discriminator loss for real images'))
plt.savefig(save_path+'/loss_generator.pdf')
writer = pd.ExcelWriter(save_path+'/loss.xlsx', engine='xlsxwriter')
df1 = DataFrame({'Generator Loss': generator_loss, 'Discriminator Loss': discriminator_loss, 'Discriminator Loss for Real Images': discriminator_loss_real, 'Discriminator Loss for Fake Images': discriminator_loss_fake})
df1.to_excel(writer, sheet_name='sheet1', index=False)
writer.save()
# Saving the Gnerator Model and weights since that is the only one necessary
model_json = g_model.to_json()
with open(save_path+'/Generator_model_tex.json', "w") as json_file:
json_file.write(model_json)
g_model.save_weights(save_path+'/Generator_model_weights_tex.h5')
def load_model_and_check(load_path, test_data):
json_file = open(load_path+'/Generator_model_tex.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
print('Model loaded')
loaded_model.load_weights(load_path+'/Generator_model_weights_tex.h5')
for i in range(test_data.shape[0]):
rand_im = test_data[i]
rand_im = rand_im[np.newaxis,:,:,:]
generated_image = loaded_model.predict(rand_im)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(rand_im,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Test Image as Input', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(load_path +'/Test_Image_Level4_'+ str(i)+'.pdf')
def load_images(folder, im_size = (128,128), col = 1):
# load color images after resizing them !
im_list = []
for filename in os.listdir(folder):
p = os.path.join(folder, filename)
if p == folder + '/.DS_Store':
continue
# img = mpimg.imread(p)
if(col == 1):
| img = Image.open(p).convert('L') | conditional_block | |
pix2pix_GAN.py | leaky relu activation
g = LeakyReLU(alpha=0.2)(g)
return g
# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# add batch normalization
g = BatchNormalization()(g, training=True)
# conditionally add dropout
if dropout:
g = Dropout(0.5)(g, training=True)
# merge with skip connection
g = Concatenate()([g, skip_in])
# relu activation
g = Activation('relu')(g)
return g
# define the standalone generator model
def define_generator(image_shape=(128,128,4)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model: C64-C128-C256-C512-C512-C512-C512-C512
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
# e7 = define_encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e6)
b = Activation('relu')(b)
# decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
# d1 = decoder_block(b, e7, 512)
d2 = decoder_block(b, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False)
d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# output
g = Conv2DTranspose(4, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = Activation('tanh')(g)
# define model
model = Model(in_image, out_image)
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model, image_shape, learning_rate_generator = 0.0002):
# make weights in the discriminator not trainable
d_model.trainable = False
# define the source image
in_src = Input(shape=image_shape)
# connect the source image to the generator input. The input to the generator are
# images with only obstacles
gen_out = g_model(in_src)
# connect the source input and generator output to the discriminator input
dis_out = d_model([in_src, gen_out])
# src image as input, generated image and classification output
model = Model(in_src, [dis_out, gen_out])
# compile model
opt = Adam(lr=learning_rate_generator, beta_1=0.5)
model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1,100])
return model
# select a batch of random samples, returns images and target
def generate_real_samples(dataset, n_samples, patch_shape):
# unpack dataset
image_obsta, image_paths_n_obsta = dataset
# choose random instances
indices = list(range(0,image_obsta.shape[0]))
random.shuffle(indices)
ix = indices[0:n_samples]
# retrieve selected images
X1, X2 = image_obsta[ix], image_paths_n_obsta[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return [X1, X2], y
# generate a batch of images, returns images and targets
def generate_fake_samples(g_model, samples, patch_shape):
# generate fake instance
|
# # extracts path images. Its given a batch of color images with obstacles and paths
# # implement a thresholding method to extract only the path i.e. remove the obstacles
# def extract_path_image(imgs, im_size = 128):
# size = imgs.shape[0]
# print("size is : ", size)
# for i in range(size):
# im = imgs[i]
# for j in range(im_size):
# for k in range(im_size):
# pixel = im[j][k]
# # remove the obstacles
# if(pixel[1]>80 and pixel[0]<40 and pixel[2]<40):
# im[j][k] = [0,0,0,255]
# return imgs
# input is a set of color images with obstacles and paths. It removed the paths and outputs the set of images with
# only the obstacles
def remove_paths(imgs, im_size = 128):
size = imgs.shape[0]
for i in range(size):
im = imgs[i]
for j in range(im_size):
for k in range(im_size):
pixel = im[j][k]
# remove the white paths
if((abs((pixel[0]-pixel[1])/2)<10 and abs((pixel[1]-pixel[2])/2)<10) or (pixel[0]>=100 and pixel[1]>=100 and pixel[2]>=100)):
im[j][k] = [0,0,0,255] # convert white pixels to black
# remove the blue paths
elif((pixel[2]>=80 and pixel[0]<pixel[2]-20 and pixel[1]<pixel[2]-20 and pixel[0]<80 and pixel[1]<80) or (pixel[0]<40 and pixel[1]<40 and pixel[2]<80)):
im[j][k] = [0,0,0,255]
return imgs
# train pix2pix models
def train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1, n_patch=8):
# calculate the number of batches per training epoch
trainA, trainB = dataset
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
generator_loss = []
discriminator_loss = []
discriminator_loss_real = []
discriminator_loss_fake = []
for i in range(n_steps):
# select a batch of real samples
[real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real = generate_real_samples(dataset, n_batch, n_patch)
# generate a batch of fake samples
fake_image_paths_n_obsta, label_fake = generate_fake_samples(g_model, real_image_obsta_batch, n_patch)
# update discriminator for real samples
d_loss1 = d_model.train_on_batch([real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real)
# update discriminator for generated samples
d_loss2 = d_model.train_on_batch([real_image_obsta_batch, fake_image_paths_n_obsta], label_fake)
# update the generator
g_loss, _, _ = gan_model.train_on_batch(real_image_obsta_batch, [label_real, real_image_paths_n_obsta_batch])
# store the images that the generator generates after each epoch
if(i % bat_per_epo == 0):
[real_image_obsta_sample, real_image_paths_n_obsta_sample], label_real = generate_real_samples(dataset, 1, n_patch)
generated_image = g_model.predict(real_image_obsta_sample)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_paths_n_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1]. | X = g_model.predict(samples)
# create 'fake' class labels (0)
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y | identifier_body |
utils.rs | _data = w_1 comma w_2 ...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s| !s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
}
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn | (path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode::Union | pk_path | identifier_name |
utils.rs | }
}
};
}
pub fn load_query_param_from_file(path: &Path) -> Result<Vec<QueryParam<u32>>> {
let data = fs::read_to_string(path)?;
let query_params: Vec<QueryParam<u32>> = serde_json::from_str(&data)?;
Ok(query_params)
}
// input format: block_id sep [ v_data ] sep { w_data }
// sep = \t or space
// v_data = v_1 comma v_2 ...
// w_data = w_1 comma w_2 ...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s| !s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
}
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn pk_path(path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut | static ID_CNT: AtomicU16 = AtomicU16::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst)) | random_line_split | |
utils.rs | _data = w_1 comma w_2 ...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s| !s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> |
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn pk_path(path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode:: | {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
} | identifier_body |
utils.rs | _data = w_1 comma w_2 ...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() |
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s| !s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
}
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn pk_path(path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode:: | {
continue;
} | conditional_block |
huffman.rs | ::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct HuffmanDecoderBuilder {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> | };
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) & !util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos != 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0 | {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if !self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true | identifier_body |
huffman.rs | _reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos != 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0000001, 0b00000001, 0b0000000001000, 0b000000001, 0b000001, 0b0000100, 0b00000000011, 0b0000101000, 0b00001011, 0b00001100, 0b000000000101, 0b00000000010010000, 0b00000000010010001, 0b00000000010010010, 0b00000000010010011, 0b0000111, 0b00010, 0b00011, 0b000010101, 0b001000, 0b0011, 0b0100, 0b00001101, 0b00100100, 0b00101, 0b01010, 0b00100101, 0b0000000001001010, 0b00000000010011, 0b0000101001000, 0b0000000001001011, 0b0010011, 0b01011, 0b01100, 0b0110100, 0b011011, 0b100, 0b101, 0b01110, 0b01101010, 0b01111]);
}
#[test]
fn overspecified() {
let mut b = HuffmanDecoder::builder(1);
b.next_code(1).unwrap();
b.next_code(1).unwrap();
assert_eq!(b.next_code(1).err().unwrap().kind(), ErrorKind::Undecodable);
}
fn test_decode(code_lens: &[usize], input: &str, expected: &[u32]) {
let max_code_len = *code_lens.iter().max().unwrap();
// Without long codes.
test_decode_(max_code_len, code_lens, input, expected);
// With long codes.
if max_code_len > 1 {
test_decode_(cmp::max(max_code_len as isize - 4, 1) as usize, code_lens, input, expected);
}
}
fn test_decode_(lookup_table_bits: usize, code_lens: &[usize], input: &str, expected: &[u32]) {
let mut b = HuffmanDecoder::builder(lookup_table_bits);
for (i, &code_len) in code_lens.iter().enumerate() {
b.create_code(i as u32, code_len).unwrap();
}
let d = b.build();
let mut reader = new_bit_reader(input);
for exp in expected {
assert_eq!(d.decode(&mut reader).unwrap(), *exp);
}
}
#[test]
fn decode_1() {
/*
0 2 codeword 00
1 4 codeword 0100
2 4 codeword 0101
3 4 codeword 0110
4 4 codeword 0111
5 2 codeword 10
6 3 codeword 110
7 3 codeword 111 */
test_decode(&[2, 4, 4, 4, 4, 2, 3, 3],
"00 111 0111 0110 110 110 111",
&[0, 7, 4, 3, 6, 6, 7]);
}
#[test]
fn decode_2() {
test_decode(&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
"001000 0000000001001011 100 000001 0000000000 01111 00010 unused: 011011",
&[20, 31, 37, 5, 0, 41, 17]);
} | random_line_split | ||
huffman.rs | new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct | {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if !self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true
};
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) & !util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos != 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b | HuffmanDecoderBuilder | identifier_name |
huffman.rs |
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct HuffmanDecoderBuilder {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if !self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true
};
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) & !util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos != 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000 | {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
} | conditional_block | |
audio_feature.py | self.n_fft = n_fft
self.hop_length = hop_length
def frame_to_second(self, frame, sr=16000):
return (frame * self.hop_length + self.n_fft / 2) / sr
def second_to_frame(self, second, sr=16000):
return (second * sr - (self.n_fft/2)) / self.hop_length if second > 0 else 0
def get_audio_feature(self, audio_data, audio_sr, feature_type):
if feature_type == FeatureType.FEATURE_MFCC:
return self.get_mfcc_quantify(audio_data, audio_sr)
elif feature_type == FeatureType.FEATURE_FINGERS:
return self.get_fingerprints(audio_data, audio_sr)
def get_fingerprints(self, audio_data, audio_sr=16000):
'''音频指纹特征
'''
Sxx, f, t = self._get_spectrogram(audio_data, audio_sr)
f_step = np.median(f[1:-1] - f[:-2]) #np.median() 计算中位数
t_step = np.median(t[1:-1] - t[:-2])
peak_locations, max_filter, max_filter_size = self._find_spectrogram_peaks(Sxx, t_step, audio_sr)
if peak_locations.size == 0:
return []
fingerprints = self._get_fingerprints_from_peaks(len(f) - 1, f_step, peak_locations, len(t) - 1, t_step)
return fingerprints
def _get_spectrogram(self, audio_data, audio_sr):
f, t, Sxx = scipy.signal.spectrogram(audio_data, fs=audio_sr,
scaling='spectrum',
mode='magnitude',
window='hann',
nperseg=self.n_fft,
noverlap=self.hop_length)
return Sxx, f, t
def _find_spectrogram_peaks(self, Sxx, t_step, audio_sr, f_size_hz=500, t_size_sec=2):
max_f = audio_sr // 2
f_bins = Sxx.shape[0]
f_per_bin = max_f / f_bins
f_size = int(np.round(f_size_hz / f_per_bin))
t_size = int(np.round(t_size_sec / t_step))
max_filter = scipy.ndimage.filters.maximum_filter(Sxx, size=(f_size, t_size), mode='constant')
peak = (Sxx == max_filter) & (Sxx != 0)
peak_locations = np.argwhere((Sxx == max_filter) & (Sxx != 0))
return peak_locations, max_filter, (t_size, f_size)
def _get_fingerprints_from_peaks(self, f_max, f_step, peak_locations, t_max, t_step):
n_peaks = len(peak_locations) #the number of peak points
# 1400hz tall zone box
zone_f_size = 1400 // f_step
# 6 second wide zone box
zone_t_size = 6 // t_step
# start one spectrogram time segment after the current one
zone_t_offset = 1
df_peak_locations = pd.DataFrame(peak_locations, columns=['f', 't'])
# sort by time
df_peak_locations.sort_values(by='t', ascending=True, inplace=True)
peak_locations_t_sort = df_peak_locations['t']
# sort by frequency
peak_locations_f_sort = df_peak_locations['f'].sort_values(ascending=True)
fingerprints = []
avg_n_pairs_per_peak = 0
save_num = 0
for i, anchor in df_peak_locations.iterrows():
anchor_t, anchor_f = anchor['t'], anchor['f'] # 锚点的坐标
zone_freq_start, zone_freq_end, zone_time_start, zone_time_end = self._get_target_zone_bounds(anchor_f,
anchor_t,
f_max, t_max,
zone_f_size,
zone_t_offset,
zone_t_size)
paired_df_peak_locations, n_pairs = self._query_dataframe_for_peaks_in_target_zone_binary_search(
df_peak_locations, peak_locations_t_sort, peak_locations_f_sort,
zone_freq_end, zone_freq_start, zone_time_end, zone_time_start)
avg_n_pairs_per_peak += n_pairs
for j, second_peak in paired_df_peak_locations.iterrows():
second_peak_f = second_peak['f']
second_peak_t_ = second_peak['t']
time_delta = second_peak_t_ - anchor_t
combined_key = self._combine_parts_into_key(anchor_f, second_peak_f, time_delta)
fingerprint = [int(combined_key), int(anchor_t), int(second_peak_t_)]
fingerprints.append(fingerprint)
avg_n_pairs_per_peak /= n_peaks
return fingerprints
def _get_target_zone_bounds(self, anchor_f, anchor_t, f_max, t_max, zone_f_size, zone_t_offset, zone_t_size):
"""
anchor_f:锚点的频率,
anchor_t:锚点的时间,
f_max, t_max = 多少个f, 多少个t
"""
zone_time_start = anchor_t + zone_t_offset #起点:锚点的时间 + 1
zone_time_end = min(t_max, zone_time_start + zone_t_size)
zone_freq_start = max(0, anchor_f - (zone_f_size // 2))
zone_freq_end = min(f_max, zone_freq_start + zone_f_size)
if zone_freq_end == f_max:
zone_freq_start = zone_freq_end - zone_f_size
return int(zone_freq_start), int(zone_freq_end), int(zone_time_start), int(zone_time_end)
def _query_dataframe_for_peaks_in_target_zone_binary_search(self, df_peak_locations, peak_locations_t,
peak_locations_f,
zone_freq_end, zone_freq_start,
zone_time_end, zone_time_start):
start = peak_locations_t.searchsorted(zone_time_start, side='left')
end = peak_locations_t.searchsorted(zone_time_end, side='right')
if isinstance(start, np.ndarray):
start = start[0]
if isinstance(end, np.ndarray):
end = end[0]
t_index = peak_locations_t.index[start:end]
f_start = peak_locations_f.searchsorted(zone_freq_start, side='left')
f_end = peak_locations_f.searchsorted(zone_freq_end, side='right')
if isinstance(f_start, np.ndarray):
f_start = f_start[0]
if isinstance(f_end, np.ndarray):
f_end = f_end[0]
f_index = peak_locations_f.index[f_start:f_end]
paired_df_peak_locations = df_peak_locations.loc[t_index & f_index]
n_pairs = len(paired_df_peak_locations)
return paired_df_peak_locations, n_pairs
def _combine_parts_into_key(self, peak_f, second_peak_f, time_delta):
peak_f = np.uint32(peak_f)
second_peak_f = np.uint32(second_peak_f)
time_delta = np.uint32(time_delta)
first_part = np.left_shift(peak_f, np.uint32(20))
second_part = np.left_shift(second_peak_f, np.uint32(10))
combined_key = first_part + second_part + time_delta
return combined_key
@staticmethod
def get_mfcc_quantify(audio_data, audio_sr=16000, n_mfcc=12, n_fft=1024, hop_length=128):
'''
mfcc量化特征
return shape=(duration, audio_sr//hop_length + 1)
'''
if len(audio_data.shape) > 1:
audio_data = np.mean(audio_data, axis=0) # 多声道的取平均值
duration = audio_data.shape[0]//audio_sr
quan_level = 6
value = 64/quan_level #quan_level最大只能是6,超过6计算出的word值就可能超过int64所表达范围了
words_list = []
for i in range(duration):
#提取每秒的特征
one_data = audio_data[i*audio_sr:(i+1)*audio_sr] #1s的数据
one_mfcc_feat = librosa.feature.mfcc(y=one_data, sr=audio_sr, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length) #提取mfcc特征
cur_feat = one_mfcc_feat.T
r, c = cur_feat.shape #(126, n_mfcc)
feat_list = []
pre_feat = [0]*c
for i in range(r):
l = []
for j in range(c):
if i == 0 or i == r-1:
v = cur_feat[i][j]
else:
v = (cur_feat[i-1][j] + cur_feat[i][j] + cur_feat[i+1][j])/3 #平滑
l.append(v)
l += pre_feat
pre_feat = l[:c]
| zero_num = 0
word = 0
for v in l:
if v >= -1 and v <= 1:
zero_num += 1
plus = int((v + 32)/value)
| #量化
| conditional_block |
audio_feature.py | self.n_fft = n_fft
self.hop_length = hop_length
def frame_to_second(self, frame, sr=16000):
return (frame * self.hop_length + self.n_fft / 2) / sr
def second_to_frame(self, second, sr=16000):
return (second * sr - (self.n_fft/2)) / self.hop_length if second > 0 else 0
def get_audio_feature(self, audio_data, audio_sr, feature_type):
if feature_type == FeatureType.FEATURE_MFCC:
return self.get_mfcc_quantify(audio_data, audio_sr)
elif feature_type == FeatureType.FEATURE_FINGERS:
return self.get_fingerprints(audio_data, audio_sr)
def get_fingerprints(self, audio_data, audio_sr=16000):
'''音频指纹特征
'''
Sxx, f, t = self._get_spectrogram(audio_data, audio_sr)
f_step = np.median(f[1:-1] - f[:-2]) #np.median() 计算中位数
t_step = np.median(t[1:-1] - t[:-2])
peak_locations, max_filter, max_filter_size = self._find_spectrogram_peaks(Sxx, t_step, audio_sr)
if peak_locations.size == 0:
return []
fingerprints = self._get_fingerprints_from_peaks(len(f) - 1, f_step, peak_locations, len(t) - 1, t_step)
return fingerprints
def _get_spectrogram(self, audio_data, audio_sr):
f, t, Sxx = scipy.signal.spectrogram(audio_data, fs=audio_sr,
scaling='spectrum',
mode='magnitude',
window='hann',
nperseg=self.n_fft,
noverlap=self.hop_length)
return Sxx, f, t
def _find_spectrogram_peaks(self, Sxx, t_step, audio_sr, f_size_hz=500, t_size_sec=2):
max_f = audio_sr // 2
f_bins = Sxx.shape[0]
f_per_bin = max_f / f_bins
f_size = int(np.round(f_size_hz / f_per_bin))
t_size = int(np.round(t_size_sec / t_step))
max_filter = scipy.ndimage.filters.maximum_filter(Sxx, size=(f_size, t_size), mode='constant')
peak = (Sxx == max_filter) & (Sxx != 0)
peak_locations = np.argwhere((Sxx == max_filter) & (Sxx != 0))
return peak_locations, max_filter, (t_size, f_size)
def _get_fingerprints_from_peaks(self, f_max, f_step, peak_locations, t_max, t_step):
n_peaks = len(peak_locations) #the number of peak points
# 1400hz tall zone box
zone_f_size = 1400 // f_step
# 6 second wide zone box
zone_t_size = 6 // t_step
# start one spectrogram time segment after the current one
zone_t_offset = 1
df_peak_locations = pd.DataFrame(peak_locations, columns=['f', 't'])
# sort by time
df_peak_locations.sort_values(by='t', ascending=True, inplace=True)
peak_locations_t_sort = df_peak_locations['t']
# sort by frequency
peak_locations_f_sort = df_peak_locations['f'].sort_values(ascending=True)
fingerprints = []
avg_n_pairs_per_peak = 0
save_num = 0
for i, anchor in df_peak_locations.iterrows():
anchor_t, anchor_f = anchor['t'], anchor['f'] # 锚点的坐标
zone_freq_start, zone_freq_end, zone_time_start, zone_time_end = self._get_target_zone_bounds(anchor_f,
anchor_t,
f_max, t_max,
zone_f_size,
zone_t_offset, | paired_df_peak_locations, n_pairs = self._query_dataframe_for_peaks_in_target_zone_binary_search(
df_peak_locations, peak_locations_t_sort, peak_locations_f_sort,
zone_freq_end, zone_freq_start, zone_time_end, zone_time_start)
avg_n_pairs_per_peak += n_pairs
for j, second_peak in paired_df_peak_locations.iterrows():
second_peak_f = second_peak['f']
second_peak_t_ = second_peak['t']
time_delta = second_peak_t_ - anchor_t
combined_key = self._combine_parts_into_key(anchor_f, second_peak_f, time_delta)
fingerprint = [int(combined_key), int(anchor_t), int(second_peak_t_)]
fingerprints.append(fingerprint)
avg_n_pairs_per_peak /= n_peaks
return fingerprints
def _get_target_zone_bounds(self, anchor_f, anchor_t, f_max, t_max, zone_f_size, zone_t_offset, zone_t_size):
"""
anchor_f:锚点的频率,
anchor_t:锚点的时间,
f_max, t_max = 多少个f, 多少个t
"""
zone_time_start = anchor_t + zone_t_offset #起点:锚点的时间 + 1
zone_time_end = min(t_max, zone_time_start + zone_t_size)
zone_freq_start = max(0, anchor_f - (zone_f_size // 2))
zone_freq_end = min(f_max, zone_freq_start + zone_f_size)
if zone_freq_end == f_max:
zone_freq_start = zone_freq_end - zone_f_size
return int(zone_freq_start), int(zone_freq_end), int(zone_time_start), int(zone_time_end)
def _query_dataframe_for_peaks_in_target_zone_binary_search(self, df_peak_locations, peak_locations_t,
peak_locations_f,
zone_freq_end, zone_freq_start,
zone_time_end, zone_time_start):
start = peak_locations_t.searchsorted(zone_time_start, side='left')
end = peak_locations_t.searchsorted(zone_time_end, side='right')
if isinstance(start, np.ndarray):
start = start[0]
if isinstance(end, np.ndarray):
end = end[0]
t_index = peak_locations_t.index[start:end]
f_start = peak_locations_f.searchsorted(zone_freq_start, side='left')
f_end = peak_locations_f.searchsorted(zone_freq_end, side='right')
if isinstance(f_start, np.ndarray):
f_start = f_start[0]
if isinstance(f_end, np.ndarray):
f_end = f_end[0]
f_index = peak_locations_f.index[f_start:f_end]
paired_df_peak_locations = df_peak_locations.loc[t_index & f_index]
n_pairs = len(paired_df_peak_locations)
return paired_df_peak_locations, n_pairs
def _combine_parts_into_key(self, peak_f, second_peak_f, time_delta):
peak_f = np.uint32(peak_f)
second_peak_f = np.uint32(second_peak_f)
time_delta = np.uint32(time_delta)
first_part = np.left_shift(peak_f, np.uint32(20))
second_part = np.left_shift(second_peak_f, np.uint32(10))
combined_key = first_part + second_part + time_delta
return combined_key
@staticmethod
def get_mfcc_quantify(audio_data, audio_sr=16000, n_mfcc=12, n_fft=1024, hop_length=128):
'''
mfcc量化特征
return shape=(duration, audio_sr//hop_length + 1)
'''
if len(audio_data.shape) > 1:
audio_data = np.mean(audio_data, axis=0) # 多声道的取平均值
duration = audio_data.shape[0]//audio_sr
quan_level = 6
value = 64/quan_level #quan_level最大只能是6,超过6计算出的word值就可能超过int64所表达范围了
words_list = []
for i in range(duration):
#提取每秒的特征
one_data = audio_data[i*audio_sr:(i+1)*audio_sr] #1s的数据
one_mfcc_feat = librosa.feature.mfcc(y=one_data, sr=audio_sr, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length) #提取mfcc特征
cur_feat = one_mfcc_feat.T
r, c = cur_feat.shape #(126, n_mfcc)
feat_list = []
pre_feat = [0]*c
for i in range(r):
l = []
for j in range(c):
if i == 0 or i == r-1:
v = cur_feat[i][j]
else:
v = (cur_feat[i-1][j] + cur_feat[i][j] + cur_feat[i+1][j])/3 #平滑
l.append(v)
l += pre_feat
pre_feat = l[:c]
#量化
zero_num = 0
word = 0
for v in l:
if v >= -1 and v <= 1:
zero_num += 1
plus = int((v + 32)/value)
| zone_t_size)
| random_line_split |
audio_feature.py | .n_fft = n_fft
self.hop_length = hop_length
def frame_to_second(self, frame, sr=16000):
return (frame * self.hop_length + self.n_fft / 2) / sr
def second_to_frame(self, second, sr=16000):
return (second * sr - (self.n_fft/2)) / self.hop_length if second > 0 else 0
def get_audio_feature(self, audio_data, audio_sr, feature_type):
if feature_type == FeatureType.FEATURE_MFCC:
return self.get_mfcc_quantify(audio_data, audio_sr)
elif feature_type == FeatureType.FEATURE_FINGERS:
return self.get_fingerprints(audio_data, audio_sr)
def get_fingerprints(self, audio_data, audio_sr=16000):
'''音频指纹特征
'''
Sxx, f, t = self._get_spectrogram(audio_data, audio_sr)
f_step = np.median(f[1:-1] - f[:-2]) #np.median() 计算中位数
t_step = np.median(t[1:-1] - t[:-2])
peak_locations, max_filter, max_filter_size = self._find_spectrogram_peaks(Sxx, t_step, audio_sr)
if peak_locations.size == 0:
return []
fingerprints = self._get_fingerprints_from_peaks(len(f) - 1, f_step, peak_locations, len(t) - 1, t_step)
return fingerprints
def _get_spectrogram(self, audio_data, audio_sr):
f, t, Sxx = scipy.signal.spectrogram(audio_data, fs=audio_sr,
scaling='spectrum',
mode='magnitude',
window='hann',
nperseg=self.n_fft,
noverlap=self.hop_length)
return Sxx, f, t
def _find_spectrogram_peaks(self, Sxx, t_step, audio_sr, f_size_hz=500, t_size_sec=2):
max_f = audio_sr // 2
f_bins = Sxx.shape[0]
f_per_bin = max_f / f_bins
f_size = int(np.round(f_size_hz / f_per_bin))
t_size = int(np.round(t_size_sec / t_step))
max_filter = scipy.ndimage.filters.maximum_filter(Sxx, size=(f_size, t_size), mode='constant')
peak = (Sxx == max_filter) & (Sxx != 0)
peak_locations = np.argwhere((Sxx == max_filter) & (Sxx != 0))
return peak_locations, max_filter, (t_size, f_size)
def _get_fingerprints_from_peaks(self, f_max, f_step, peak_locations, t_max, t_step):
n_peaks = len(peak_locations) #the number of peak points
# 1400hz tall zone box
zone_f_size = 1400 // f_step
# 6 second wide zone box
zone_t_size = 6 // t_step
# start one spectrogram time segment after the current one
zone_t_offset = 1
df_peak_locations = pd.DataFrame(peak_locations, columns=['f', 't'])
# sort by time
df_peak_locations.sort_values(by='t', ascending=True, inplace=True)
peak_locations_t_sort = df_peak_locations['t']
# sort by frequency
peak_locations_f_sort = df_peak_locations['f'].sort_values(ascending=True)
fingerprints = []
avg_n_pairs_per_peak = 0
save_num = 0
for i, anchor in df_peak_locations.iterrows():
anchor_t, anchor_f = anchor['t'], anchor['f'] # 锚点的坐标
zone_freq_start, zone_freq_end, zone_time_start, zone_time_end = self._get_target_zone_bounds(anchor_f,
anchor_t,
f_max, t_max,
zone_f_size,
zone_t_offset,
zone_t_size)
paired_df_peak_locations, n_pairs = self._query_dataframe_for_peaks_in_target_zone_binary_search(
df_peak_locations, peak_locations_t_sort, peak_locations_f_sort,
zone_freq_end, zone_freq_start, zone_time_end, zone_time_start)
avg_n_pairs_per_peak += n_pairs
for j, second_peak in paired_df_peak_locations.iterrows():
second_peak_f = second_peak['f']
second_peak_t_ = second_peak['t']
time_delta = second_peak_t_ - anchor_t
combined_key = self._combine_parts_into_key(anchor_f, second_peak_f, time_delta)
fingerprint = [int(combined_key), int(anchor_t), int(second_peak_t_)]
fingerprints.append(fingerprint)
avg_n_pairs_per_peak /= n_peaks
return fingerprints
def _get_target_zone_bounds(self, anchor_f, anchor_t, f_max, t_max, zone_f_size, zone_t_offset, zone_t_size):
"""
anchor_f:锚点的频率,
anchor_t:锚点的时间,
f_max, t_max = 多少个f, 多少个t
"""
zone_time_start = anchor_t + zone_t_offset #起点:锚点的时间 + 1
zone_time_end = min(t_max, zone_time_start + zone_t_size)
zone_freq_start = max(0, anchor_f - (zone_f_size // 2))
zone_freq_end = min(f_max, zone_freq_start + zone_f_size)
if zone_freq_end == f_max:
zone_freq_start = zone_freq_end - zone_f_size
return int(zone_freq_start), int(zone_freq_end), int(zone_time_start), int(zone_time_end)
def _query_dataframe_for_peaks_in_target_zone_binary_search(self, df_peak_locations, peak_locations_t,
peak_locations_f,
zone_freq_end, zone_freq_start,
zone_time_end, zone_time_start):
start = peak_locations_t.searchsorted(zone_time_start, side='left')
end = peak_locations_t.searchsorted(zone_time_end, side= | def _combine_parts_into_key(self, peak_f, second_peak_f, time_delta):
peak_f = np.uint32(peak_f)
second_peak_f
= np.uint32(second_peak_f)
time_delta = np.uint32(time_delta)
first_part = np.left_shift(peak_f, np.uint32(20))
second_part = np.left_shift(second_peak_f, np.uint32(10))
combined_key = first_part + second_part + time_delta
return combined_key
@staticmethod
def get_mfcc_quantify(audio_data, audio_sr=16000, n_mfcc=12, n_fft=1024, hop_length=128):
'''
mfcc量化特征
return shape=(duration, audio_sr//hop_length + 1)
'''
if len(audio_data.shape) > 1:
audio_data = np.mean(audio_data, axis=0) # 多声道的取平均值
duration = audio_data.shape[0]//audio_sr
quan_level = 6
value = 64/quan_level #quan_level最大只能是6,超过6计算出的word值就可能超过int64所表达范围了
words_list = []
for i in range(duration):
#提取每秒的特征
one_data = audio_data[i*audio_sr:(i+1)*audio_sr] #1s的数据
one_mfcc_feat = librosa.feature.mfcc(y=one_data, sr=audio_sr, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length) #提取mfcc特征
cur_feat = one_mfcc_feat.T
r, c = cur_feat.shape #(126, n_mfcc)
feat_list = []
pre_feat = [0]*c
for i in range(r):
l = []
for j in range(c):
if i == 0 or i == r-1:
v = cur_feat[i][j]
else:
v = (cur_feat[i-1][j] + cur_feat[i][j] + cur_feat[i+1][j])/3 #平滑
l.append(v)
l += pre_feat
pre_feat = l[:c]
#量化
zero_num = 0
word = 0
for v in l:
if v >= -1 and v <= 1:
zero_num += 1
plus = int((v + 32)/value)
| 'right')
if isinstance(start, np.ndarray):
start = start[0]
if isinstance(end, np.ndarray):
end = end[0]
t_index = peak_locations_t.index[start:end]
f_start = peak_locations_f.searchsorted(zone_freq_start, side='left')
f_end = peak_locations_f.searchsorted(zone_freq_end, side='right')
if isinstance(f_start, np.ndarray):
f_start = f_start[0]
if isinstance(f_end, np.ndarray):
f_end = f_end[0]
f_index = peak_locations_f.index[f_start:f_end]
paired_df_peak_locations = df_peak_locations.loc[t_index & f_index]
n_pairs = len(paired_df_peak_locations)
return paired_df_peak_locations, n_pairs
| identifier_body |
audio_feature.py | .n_fft = n_fft
self.hop_length = hop_length
def frame_to_second(self, frame, sr=16000):
return (frame * self.hop_length + self.n_fft / 2) / sr
def second_to_frame(self, second, sr=16000):
return (second * sr - (self.n_fft/2)) / self.hop_length if second > 0 else 0
def get_audio_feature(self, audio_data, audio_sr, fe | if feature_type == FeatureType.FEATURE_MFCC:
return self.get_mfcc_quantify(audio_data, audio_sr)
elif feature_type == FeatureType.FEATURE_FINGERS:
return self.get_fingerprints(audio_data, audio_sr)
def get_fingerprints(self, audio_data, audio_sr=16000):
'''音频指纹特征
'''
Sxx, f, t = self._get_spectrogram(audio_data, audio_sr)
f_step = np.median(f[1:-1] - f[:-2]) #np.median() 计算中位数
t_step = np.median(t[1:-1] - t[:-2])
peak_locations, max_filter, max_filter_size = self._find_spectrogram_peaks(Sxx, t_step, audio_sr)
if peak_locations.size == 0:
return []
fingerprints = self._get_fingerprints_from_peaks(len(f) - 1, f_step, peak_locations, len(t) - 1, t_step)
return fingerprints
def _get_spectrogram(self, audio_data, audio_sr):
f, t, Sxx = scipy.signal.spectrogram(audio_data, fs=audio_sr,
scaling='spectrum',
mode='magnitude',
window='hann',
nperseg=self.n_fft,
noverlap=self.hop_length)
return Sxx, f, t
def _find_spectrogram_peaks(self, Sxx, t_step, audio_sr, f_size_hz=500, t_size_sec=2):
max_f = audio_sr // 2
f_bins = Sxx.shape[0]
f_per_bin = max_f / f_bins
f_size = int(np.round(f_size_hz / f_per_bin))
t_size = int(np.round(t_size_sec / t_step))
max_filter = scipy.ndimage.filters.maximum_filter(Sxx, size=(f_size, t_size), mode='constant')
peak = (Sxx == max_filter) & (Sxx != 0)
peak_locations = np.argwhere((Sxx == max_filter) & (Sxx != 0))
return peak_locations, max_filter, (t_size, f_size)
def _get_fingerprints_from_peaks(self, f_max, f_step, peak_locations, t_max, t_step):
n_peaks = len(peak_locations) #the number of peak points
# 1400hz tall zone box
zone_f_size = 1400 // f_step
# 6 second wide zone box
zone_t_size = 6 // t_step
# start one spectrogram time segment after the current one
zone_t_offset = 1
df_peak_locations = pd.DataFrame(peak_locations, columns=['f', 't'])
# sort by time
df_peak_locations.sort_values(by='t', ascending=True, inplace=True)
peak_locations_t_sort = df_peak_locations['t']
# sort by frequency
peak_locations_f_sort = df_peak_locations['f'].sort_values(ascending=True)
fingerprints = []
avg_n_pairs_per_peak = 0
save_num = 0
for i, anchor in df_peak_locations.iterrows():
anchor_t, anchor_f = anchor['t'], anchor['f'] # 锚点的坐标
zone_freq_start, zone_freq_end, zone_time_start, zone_time_end = self._get_target_zone_bounds(anchor_f,
anchor_t,
f_max, t_max,
zone_f_size,
zone_t_offset,
zone_t_size)
paired_df_peak_locations, n_pairs = self._query_dataframe_for_peaks_in_target_zone_binary_search(
df_peak_locations, peak_locations_t_sort, peak_locations_f_sort,
zone_freq_end, zone_freq_start, zone_time_end, zone_time_start)
avg_n_pairs_per_peak += n_pairs
for j, second_peak in paired_df_peak_locations.iterrows():
second_peak_f = second_peak['f']
second_peak_t_ = second_peak['t']
time_delta = second_peak_t_ - anchor_t
combined_key = self._combine_parts_into_key(anchor_f, second_peak_f, time_delta)
fingerprint = [int(combined_key), int(anchor_t), int(second_peak_t_)]
fingerprints.append(fingerprint)
avg_n_pairs_per_peak /= n_peaks
return fingerprints
def _get_target_zone_bounds(self, anchor_f, anchor_t, f_max, t_max, zone_f_size, zone_t_offset, zone_t_size):
"""
anchor_f:锚点的频率,
anchor_t:锚点的时间,
f_max, t_max = 多少个f, 多少个t
"""
zone_time_start = anchor_t + zone_t_offset #起点:锚点的时间 + 1
zone_time_end = min(t_max, zone_time_start + zone_t_size)
zone_freq_start = max(0, anchor_f - (zone_f_size // 2))
zone_freq_end = min(f_max, zone_freq_start + zone_f_size)
if zone_freq_end == f_max:
zone_freq_start = zone_freq_end - zone_f_size
return int(zone_freq_start), int(zone_freq_end), int(zone_time_start), int(zone_time_end)
def _query_dataframe_for_peaks_in_target_zone_binary_search(self, df_peak_locations, peak_locations_t,
peak_locations_f,
zone_freq_end, zone_freq_start,
zone_time_end, zone_time_start):
start = peak_locations_t.searchsorted(zone_time_start, side='left')
end = peak_locations_t.searchsorted(zone_time_end, side='right')
if isinstance(start, np.ndarray):
start = start[0]
if isinstance(end, np.ndarray):
end = end[0]
t_index = peak_locations_t.index[start:end]
f_start = peak_locations_f.searchsorted(zone_freq_start, side='left')
f_end = peak_locations_f.searchsorted(zone_freq_end, side='right')
if isinstance(f_start, np.ndarray):
f_start = f_start[0]
if isinstance(f_end, np.ndarray):
f_end = f_end[0]
f_index = peak_locations_f.index[f_start:f_end]
paired_df_peak_locations = df_peak_locations.loc[t_index & f_index]
n_pairs = len(paired_df_peak_locations)
return paired_df_peak_locations, n_pairs
def _combine_parts_into_key(self, peak_f, second_peak_f, time_delta):
peak_f = np.uint32(peak_f)
second_peak_f = np.uint32(second_peak_f)
time_delta = np.uint32(time_delta)
first_part = np.left_shift(peak_f, np.uint32(20))
second_part = np.left_shift(second_peak_f, np.uint32(10))
combined_key = first_part + second_part + time_delta
return combined_key
@staticmethod
def get_mfcc_quantify(audio_data, audio_sr=16000, n_mfcc=12, n_fft=1024, hop_length=128):
'''
mfcc量化特征
return shape=(duration, audio_sr//hop_length + 1)
'''
if len(audio_data.shape) > 1:
audio_data = np.mean(audio_data, axis=0) # 多声道的取平均值
duration = audio_data.shape[0]//audio_sr
quan_level = 6
value = 64/quan_level #quan_level最大只能是6,超过6计算出的word值就可能超过int64所表达范围了
words_list = []
for i in range(duration):
#提取每秒的特征
one_data = audio_data[i*audio_sr:(i+1)*audio_sr] #1s的数据
one_mfcc_feat = librosa.feature.mfcc(y=one_data, sr=audio_sr, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length) #提取mfcc特征
cur_feat = one_mfcc_feat.T
r, c = cur_feat.shape #(126, n_mfcc)
feat_list = []
pre_feat = [0]*c
for i in range(r):
l = []
for j in range(c):
if i == 0 or i == r-1:
v = cur_feat[i][j]
else:
v = (cur_feat[i-1][j] + cur_feat[i][j] + cur_feat[i+1][j])/3 #平滑
l.append(v)
l += pre_feat
pre_feat = l[:c]
#量化
zero_num = 0
word = 0
for v in l:
if v >= -1 and v <= 1:
zero_num += 1
plus = int((v + 32)/value)
| ature_type):
| identifier_name |
Experiments.py |
@abstractmethod
def evaluate(self, prediction_fn):
"""
This function should compute all relevant metrics to the task,
prediction_fn: (inp) -> (pred): it's an end-to-end prediction function from any model.
returns: dict: metrics
"""
pass
def save(self, path):
"""
Saves the entire object ready to be loaded.
"""
torch.save(self, path)
def load(path):
"""
STATIC METHOD
accessed through class, loads a pre-existing experiment.
"""
return torch.load(path)
class TranslationExperiment(Experiment):
def __init__(self, task_data, src_splitter=string_split_v1, tgt_splitter=string_split_v1):
"""
task_data: [(str, str)]: this is the expected data format.
>>> from src.Experiments import TranslationExperiment
>>> translation_experiment = TranslationExperiment(validation_pairs)
>>> def simple_translate(src):
>>> return "return output"
>>> translation_experiment.evaluate(simple_translate)
{'BLEU': 1.4384882092392364e-09}
"""
super().__init__(task_data)
self.src_splitter = src_splitter
self.tgt_splitter = tgt_splitter
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", batched=None):
"""
Produces evaluation scores and saves the results to a file. The tokenisation is done through string_split_v1.
So any non spaced text will be considered as one token.
prediction_fn: (str)->(str) or [str]->[str]
save_dir: str: folder to save the file
save_name: str: name of file
batched: int or None: size to use for the prediction function
"""
if batched:
src_sents = [src for (src, tgt) in self.task_data]
chunked_sents = list(chunks(src_sents, batched))
predictions = [prediction_fn(sents) for sents in tqdm.tqdm(chunked_sents, desc="predicting", total=len(chunked_sents))]
predictions = [val for sublist in predictions for val in sublist] # flattening
else:
predictions = [prediction_fn(src) for (src, tgt) in tqdm.tqdm(self.task_data, desc="predicting")]
# BLEU calculation
BLEU_scores = []
for (src, tgt), pred in tqdm.tqdm(list(zip(self.task_data, predictions)), desc="calculating bleu"):
BLEU_score = nltk_bleu(self.tgt_splitter(tgt), self.tgt_splitter(pred))
BLEU_scores.append(BLEU_score)
total_BLEU = np.average(BLEU_scores)
# Write to file
if save_dir != None:
save_path = os.path.join(save_dir, save_name)
print(f"saving translation eval to file: {save_path}")
with open(save_path, "w", encoding="utf-8") as out_fp:
for (src, tgt), pred, BLEU in zip(self.task_data, predictions, BLEU_scores):
out_fp.write("SRC :" + src + "\n")
out_fp.write("TGT :" + tgt + "\n")
out_fp.write("PRED :" + pred + "\n")
out_fp.write("BLEU :" + str(BLEU) + "\n")
out_fp.write("\n")
out_fp.write("\n\n| EVALUATION | BLEU: {:5.2f} |\n".format(total_BLEU))
print("| EVALUATION | BLEU: {:5.3f} |".format(total_BLEU))
return {"BLEU":total_BLEU}
class CAsT_experiment(Experiment):
def __init__(self, topics):
'''
topics: (context:[q_ids], q_id, q_rel:[d_ids])
'''
self.topics = topics
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", hits=100):
full_q_rels = {}
run = {}
for topic in self.topics:
pred_d_ids = prediction_fn(topic, hits=100)
context, q_id, q_rels = topic
full_q_rels[q_id] = {d_id:1 for d_id in q_rels}
run[q_id] = {d_id:score for (d_id, score) in pred_d_ids}
evaluator = pytrec_eval.RelevanceEvaluator(full_q_rels, {'map', 'ndcg'})
results = evaluator.evaluate(run)
aggregate = self.dict_mean(list(results.values()))
return aggregate, results
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
class TREC_Eval_Command_Experiment():
def __init__(self, trec_eval_command='trec_eval -q -c -M1000 -m ndcg_cut.3,5,10,15,20,100,1000 -m all_trec qRELS RUN_FILE',
relevant_metrics=['ndcg_cut_3', 'ndcg_cut_5', 'ndcg_cut_1000', 'map_cut_1000', 'recall_500', 'recall_1000'],
q_rel_file='datasets/TREC_CAsT/2020qrels.txt'):
'''
This is an experiment transform that uses the official trec_eval command to compute scores for each query
and return valid results according to the command specified.
'''
self.trec_eval_command = trec_eval_command
self.relevant_metrics = relevant_metrics
self.q_rel_file = q_rel_file
self.temp_run_file = '/tmp/temp_run_by_carlos.run'
self.run_file_exporter = RUN_File_Transform_Exporter(self.temp_run_file, model_name='temp_model_by_carlos')
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
returns: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...], 'ndcg_cut_3':0.33, 'ndcg_cut_5'...},...]
'''
self.run_file_exporter(samples)
resolved_command = self.trec_eval_command.replace('qRELS', self.q_rel_file).replace('RUN_FILE', self.temp_run_file)
print(f'Running the following command: {resolved_command} > /tmp/temp_run.eval')
os.system(f'{resolved_command} > /tmp/temp_run.eval')
with open('/tmp/temp_run.eval', 'r') as eval_f:
eval_results = {}
for row in eval_f:
if not any([metric in row for metric in self.relevant_metrics]):
continue
metric, q_id, score = row.split()
if q_id not in eval_results:
eval_results[q_id] = {}
eval_results[q_id][metric] = float(score)
for sample in samples:
if sample['q_id'] not in eval_results:
print(f"q_rel missing for q_id {sample['q_id']}. No scores added to sample")
continue
sample.update(eval_results[sample['q_id']])
return samples
class Ranking_Experiment():
def __init__(self, q_rels, save_dir=None, save_name="rerank_eval.run"):
'''
q_rels: dict: {'q_id':[d_id, d_id,...],...}
'''
pytrec_q_rels = {}
for q_id, d_ids in q_rels.items():
pytrec_q_rels[q_id] = {d_id:1 for d_id in d_ids}
self.evaluator = pytrec_eval.RelevanceEvaluator(pytrec_q_rels, {'map', 'ndcg_cut_3', 'set_recall', 'recip_rank'})
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
pytrec_run = {}
for sample_obj in samples:
q_id = sample_obj['q_id']
pytrec_run[q_id] = {}
for d_id, score in sample_obj['search_results']:
pytrec_run[q_id][d_id] = score
results = self.evaluator.evaluate(pytrec_run)
for sample_obj, result in zip(samples, results.values()):
sample_obj.update(result)
aggregate = self.dict_mean(list(results.values()))
return aggregate
class Sequence_BLEU_Experiment():
def __init__(self, fields={}, debug=True):
'''
An Experiment to evaluate sequence similarity through metrics like: BLEU or token accuracy | """
task_data: [(str, str)]: input/target pairs for translation evaluation.
"""
self.task_data = task_data | identifier_body | |
Experiments.py | _translate)
{'BLEU': 1.4384882092392364e-09}
"""
super().__init__(task_data)
self.src_splitter = src_splitter
self.tgt_splitter = tgt_splitter
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", batched=None):
"""
Produces evaluation scores and saves the results to a file. The tokenisation is done through string_split_v1.
So any non spaced text will be considered as one token.
prediction_fn: (str)->(str) or [str]->[str]
save_dir: str: folder to save the file
save_name: str: name of file
batched: int or None: size to use for the prediction function
"""
if batched:
src_sents = [src for (src, tgt) in self.task_data]
chunked_sents = list(chunks(src_sents, batched))
predictions = [prediction_fn(sents) for sents in tqdm.tqdm(chunked_sents, desc="predicting", total=len(chunked_sents))]
predictions = [val for sublist in predictions for val in sublist] # flattening
else:
predictions = [prediction_fn(src) for (src, tgt) in tqdm.tqdm(self.task_data, desc="predicting")]
# BLEU calculation
BLEU_scores = []
for (src, tgt), pred in tqdm.tqdm(list(zip(self.task_data, predictions)), desc="calculating bleu"):
BLEU_score = nltk_bleu(self.tgt_splitter(tgt), self.tgt_splitter(pred))
BLEU_scores.append(BLEU_score)
total_BLEU = np.average(BLEU_scores)
# Write to file
if save_dir != None:
save_path = os.path.join(save_dir, save_name)
print(f"saving translation eval to file: {save_path}")
with open(save_path, "w", encoding="utf-8") as out_fp:
for (src, tgt), pred, BLEU in zip(self.task_data, predictions, BLEU_scores):
out_fp.write("SRC :" + src + "\n")
out_fp.write("TGT :" + tgt + "\n")
out_fp.write("PRED :" + pred + "\n")
out_fp.write("BLEU :" + str(BLEU) + "\n")
out_fp.write("\n")
out_fp.write("\n\n| EVALUATION | BLEU: {:5.2f} |\n".format(total_BLEU))
print("| EVALUATION | BLEU: {:5.3f} |".format(total_BLEU))
return {"BLEU":total_BLEU}
class CAsT_experiment(Experiment):
def __init__(self, topics):
'''
topics: (context:[q_ids], q_id, q_rel:[d_ids])
'''
self.topics = topics
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", hits=100):
full_q_rels = {}
run = {}
for topic in self.topics:
pred_d_ids = prediction_fn(topic, hits=100)
context, q_id, q_rels = topic
full_q_rels[q_id] = {d_id:1 for d_id in q_rels}
run[q_id] = {d_id:score for (d_id, score) in pred_d_ids}
evaluator = pytrec_eval.RelevanceEvaluator(full_q_rels, {'map', 'ndcg'})
results = evaluator.evaluate(run)
aggregate = self.dict_mean(list(results.values()))
return aggregate, results
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
class TREC_Eval_Command_Experiment():
def __init__(self, trec_eval_command='trec_eval -q -c -M1000 -m ndcg_cut.3,5,10,15,20,100,1000 -m all_trec qRELS RUN_FILE',
relevant_metrics=['ndcg_cut_3', 'ndcg_cut_5', 'ndcg_cut_1000', 'map_cut_1000', 'recall_500', 'recall_1000'],
q_rel_file='datasets/TREC_CAsT/2020qrels.txt'):
'''
This is an experiment transform that uses the official trec_eval command to compute scores for each query
and return valid results according to the command specified.
'''
self.trec_eval_command = trec_eval_command
self.relevant_metrics = relevant_metrics
self.q_rel_file = q_rel_file
self.temp_run_file = '/tmp/temp_run_by_carlos.run'
self.run_file_exporter = RUN_File_Transform_Exporter(self.temp_run_file, model_name='temp_model_by_carlos')
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
returns: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...], 'ndcg_cut_3':0.33, 'ndcg_cut_5'...},...]
'''
self.run_file_exporter(samples)
resolved_command = self.trec_eval_command.replace('qRELS', self.q_rel_file).replace('RUN_FILE', self.temp_run_file)
print(f'Running the following command: {resolved_command} > /tmp/temp_run.eval')
os.system(f'{resolved_command} > /tmp/temp_run.eval')
with open('/tmp/temp_run.eval', 'r') as eval_f:
eval_results = {}
for row in eval_f:
if not any([metric in row for metric in self.relevant_metrics]):
continue
metric, q_id, score = row.split()
if q_id not in eval_results:
|
eval_results[q_id][metric] = float(score)
for sample in samples:
if sample['q_id'] not in eval_results:
print(f"q_rel missing for q_id {sample['q_id']}. No scores added to sample")
continue
sample.update(eval_results[sample['q_id']])
return samples
class Ranking_Experiment():
def __init__(self, q_rels, save_dir=None, save_name="rerank_eval.run"):
'''
q_rels: dict: {'q_id':[d_id, d_id,...],...}
'''
pytrec_q_rels = {}
for q_id, d_ids in q_rels.items():
pytrec_q_rels[q_id] = {d_id:1 for d_id in d_ids}
self.evaluator = pytrec_eval.RelevanceEvaluator(pytrec_q_rels, {'map', 'ndcg_cut_3', 'set_recall', 'recip_rank'})
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
pytrec_run = {}
for sample_obj in samples:
q_id = sample_obj['q_id']
pytrec_run[q_id] = {}
for d_id, score in sample_obj['search_results']:
pytrec_run[q_id][d_id] = score
results = self.evaluator.evaluate(pytrec_run)
for sample_obj, result in zip(samples, results.values()):
sample_obj.update(result)
aggregate = self.dict_mean(list(results.values()))
return aggregate
class Sequence_BLEU_Experiment():
def __init__(self, fields={}, debug=True):
'''
An Experiment to evaluate sequence similarity through metrics like: BLEU or token accuracy.
'''
self.fields = {'predicted_seq':'predicted_seq', 'target_seq':'target_seq'}
self.debug = debug
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text"},...]
returns: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text", "BELU":0.6},...]
'''
for sample_obj in samples:
pred_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['predicted_seq']])
refrence_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['target_seq']])
if pred_tokens==[]:
pred_tokens = ['']
sample_obj["nltk_BLEU"] = nltk_bleu(refrence_tokens, pred_tokens)
if self.debug:
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples], [self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([ | eval_results[q_id] = {} | conditional_block |
Experiments.py | _translate)
{'BLEU': 1.4384882092392364e-09}
"""
super().__init__(task_data)
self.src_splitter = src_splitter
self.tgt_splitter = tgt_splitter
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", batched=None):
"""
Produces evaluation scores and saves the results to a file. The tokenisation is done through string_split_v1.
So any non spaced text will be considered as one token.
prediction_fn: (str)->(str) or [str]->[str]
save_dir: str: folder to save the file
save_name: str: name of file
batched: int or None: size to use for the prediction function
"""
if batched:
src_sents = [src for (src, tgt) in self.task_data]
chunked_sents = list(chunks(src_sents, batched))
predictions = [prediction_fn(sents) for sents in tqdm.tqdm(chunked_sents, desc="predicting", total=len(chunked_sents))]
predictions = [val for sublist in predictions for val in sublist] # flattening
else:
predictions = [prediction_fn(src) for (src, tgt) in tqdm.tqdm(self.task_data, desc="predicting")]
# BLEU calculation
BLEU_scores = []
for (src, tgt), pred in tqdm.tqdm(list(zip(self.task_data, predictions)), desc="calculating bleu"):
BLEU_score = nltk_bleu(self.tgt_splitter(tgt), self.tgt_splitter(pred))
BLEU_scores.append(BLEU_score)
total_BLEU = np.average(BLEU_scores)
# Write to file
if save_dir != None:
save_path = os.path.join(save_dir, save_name)
print(f"saving translation eval to file: {save_path}")
with open(save_path, "w", encoding="utf-8") as out_fp:
for (src, tgt), pred, BLEU in zip(self.task_data, predictions, BLEU_scores):
out_fp.write("SRC :" + src + "\n")
out_fp.write("TGT :" + tgt + "\n")
out_fp.write("PRED :" + pred + "\n")
out_fp.write("BLEU :" + str(BLEU) + "\n")
out_fp.write("\n")
out_fp.write("\n\n| EVALUATION | BLEU: {:5.2f} |\n".format(total_BLEU))
print("| EVALUATION | BLEU: {:5.3f} |".format(total_BLEU))
return {"BLEU":total_BLEU}
class CAsT_experiment(Experiment):
def __init__(self, topics):
'''
topics: (context:[q_ids], q_id, q_rel:[d_ids])
'''
self.topics = topics
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", hits=100):
full_q_rels = {}
run = {}
for topic in self.topics:
pred_d_ids = prediction_fn(topic, hits=100)
context, q_id, q_rels = topic
full_q_rels[q_id] = {d_id:1 for d_id in q_rels}
run[q_id] = {d_id:score for (d_id, score) in pred_d_ids}
evaluator = pytrec_eval.RelevanceEvaluator(full_q_rels, {'map', 'ndcg'})
results = evaluator.evaluate(run)
aggregate = self.dict_mean(list(results.values()))
return aggregate, results
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
class TREC_Eval_Command_Experiment():
def __init__(self, trec_eval_command='trec_eval -q -c -M1000 -m ndcg_cut.3,5,10,15,20,100,1000 -m all_trec qRELS RUN_FILE',
relevant_metrics=['ndcg_cut_3', 'ndcg_cut_5', 'ndcg_cut_1000', 'map_cut_1000', 'recall_500', 'recall_1000'],
q_rel_file='datasets/TREC_CAsT/2020qrels.txt'):
'''
This is an experiment transform that uses the official trec_eval command to compute scores for each query
and return valid results according to the command specified.
'''
self.trec_eval_command = trec_eval_command
self.relevant_metrics = relevant_metrics
self.q_rel_file = q_rel_file
self.temp_run_file = '/tmp/temp_run_by_carlos.run'
self.run_file_exporter = RUN_File_Transform_Exporter(self.temp_run_file, model_name='temp_model_by_carlos')
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
returns: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...], 'ndcg_cut_3':0.33, 'ndcg_cut_5'...},...]
'''
self.run_file_exporter(samples)
resolved_command = self.trec_eval_command.replace('qRELS', self.q_rel_file).replace('RUN_FILE', self.temp_run_file)
print(f'Running the following command: {resolved_command} > /tmp/temp_run.eval')
os.system(f'{resolved_command} > /tmp/temp_run.eval')
with open('/tmp/temp_run.eval', 'r') as eval_f:
eval_results = {} | eval_results[q_id] = {}
eval_results[q_id][metric] = float(score)
for sample in samples:
if sample['q_id'] not in eval_results:
print(f"q_rel missing for q_id {sample['q_id']}. No scores added to sample")
continue
sample.update(eval_results[sample['q_id']])
return samples
class Ranking_Experiment():
def __init__(self, q_rels, save_dir=None, save_name="rerank_eval.run"):
'''
q_rels: dict: {'q_id':[d_id, d_id,...],...}
'''
pytrec_q_rels = {}
for q_id, d_ids in q_rels.items():
pytrec_q_rels[q_id] = {d_id:1 for d_id in d_ids}
self.evaluator = pytrec_eval.RelevanceEvaluator(pytrec_q_rels, {'map', 'ndcg_cut_3', 'set_recall', 'recip_rank'})
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
pytrec_run = {}
for sample_obj in samples:
q_id = sample_obj['q_id']
pytrec_run[q_id] = {}
for d_id, score in sample_obj['search_results']:
pytrec_run[q_id][d_id] = score
results = self.evaluator.evaluate(pytrec_run)
for sample_obj, result in zip(samples, results.values()):
sample_obj.update(result)
aggregate = self.dict_mean(list(results.values()))
return aggregate
class Sequence_BLEU_Experiment():
def __init__(self, fields={}, debug=True):
'''
An Experiment to evaluate sequence similarity through metrics like: BLEU or token accuracy.
'''
self.fields = {'predicted_seq':'predicted_seq', 'target_seq':'target_seq'}
self.debug = debug
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text"},...]
returns: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text", "BELU":0.6},...]
'''
for sample_obj in samples:
pred_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['predicted_seq']])
refrence_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['target_seq']])
if pred_tokens==[]:
pred_tokens = ['']
sample_obj["nltk_BLEU"] = nltk_bleu(refrence_tokens, pred_tokens)
if self.debug:
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples], [self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([s | for row in eval_f:
if not any([metric in row for metric in self.relevant_metrics]):
continue
metric, q_id, score = row.split()
if q_id not in eval_results: | random_line_split |
Experiments.py | (self, path):
"""
Saves the entire object ready to be loaded.
"""
torch.save(self, path)
def load(path):
"""
STATIC METHOD
accessed through class, loads a pre-existing experiment.
"""
return torch.load(path)
class TranslationExperiment(Experiment):
def __init__(self, task_data, src_splitter=string_split_v1, tgt_splitter=string_split_v1):
"""
task_data: [(str, str)]: this is the expected data format.
>>> from src.Experiments import TranslationExperiment
>>> translation_experiment = TranslationExperiment(validation_pairs)
>>> def simple_translate(src):
>>> return "return output"
>>> translation_experiment.evaluate(simple_translate)
{'BLEU': 1.4384882092392364e-09}
"""
super().__init__(task_data)
self.src_splitter = src_splitter
self.tgt_splitter = tgt_splitter
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", batched=None):
"""
Produces evaluation scores and saves the results to a file. The tokenisation is done through string_split_v1.
So any non spaced text will be considered as one token.
prediction_fn: (str)->(str) or [str]->[str]
save_dir: str: folder to save the file
save_name: str: name of file
batched: int or None: size to use for the prediction function
"""
if batched:
src_sents = [src for (src, tgt) in self.task_data]
chunked_sents = list(chunks(src_sents, batched))
predictions = [prediction_fn(sents) for sents in tqdm.tqdm(chunked_sents, desc="predicting", total=len(chunked_sents))]
predictions = [val for sublist in predictions for val in sublist] # flattening
else:
predictions = [prediction_fn(src) for (src, tgt) in tqdm.tqdm(self.task_data, desc="predicting")]
# BLEU calculation
BLEU_scores = []
for (src, tgt), pred in tqdm.tqdm(list(zip(self.task_data, predictions)), desc="calculating bleu"):
BLEU_score = nltk_bleu(self.tgt_splitter(tgt), self.tgt_splitter(pred))
BLEU_scores.append(BLEU_score)
total_BLEU = np.average(BLEU_scores)
# Write to file
if save_dir != None:
save_path = os.path.join(save_dir, save_name)
print(f"saving translation eval to file: {save_path}")
with open(save_path, "w", encoding="utf-8") as out_fp:
for (src, tgt), pred, BLEU in zip(self.task_data, predictions, BLEU_scores):
out_fp.write("SRC :" + src + "\n")
out_fp.write("TGT :" + tgt + "\n")
out_fp.write("PRED :" + pred + "\n")
out_fp.write("BLEU :" + str(BLEU) + "\n")
out_fp.write("\n")
out_fp.write("\n\n| EVALUATION | BLEU: {:5.2f} |\n".format(total_BLEU))
print("| EVALUATION | BLEU: {:5.3f} |".format(total_BLEU))
return {"BLEU":total_BLEU}
class CAsT_experiment(Experiment):
def __init__(self, topics):
'''
topics: (context:[q_ids], q_id, q_rel:[d_ids])
'''
self.topics = topics
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", hits=100):
full_q_rels = {}
run = {}
for topic in self.topics:
pred_d_ids = prediction_fn(topic, hits=100)
context, q_id, q_rels = topic
full_q_rels[q_id] = {d_id:1 for d_id in q_rels}
run[q_id] = {d_id:score for (d_id, score) in pred_d_ids}
evaluator = pytrec_eval.RelevanceEvaluator(full_q_rels, {'map', 'ndcg'})
results = evaluator.evaluate(run)
aggregate = self.dict_mean(list(results.values()))
return aggregate, results
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
class TREC_Eval_Command_Experiment():
def __init__(self, trec_eval_command='trec_eval -q -c -M1000 -m ndcg_cut.3,5,10,15,20,100,1000 -m all_trec qRELS RUN_FILE',
relevant_metrics=['ndcg_cut_3', 'ndcg_cut_5', 'ndcg_cut_1000', 'map_cut_1000', 'recall_500', 'recall_1000'],
q_rel_file='datasets/TREC_CAsT/2020qrels.txt'):
'''
This is an experiment transform that uses the official trec_eval command to compute scores for each query
and return valid results according to the command specified.
'''
self.trec_eval_command = trec_eval_command
self.relevant_metrics = relevant_metrics
self.q_rel_file = q_rel_file
self.temp_run_file = '/tmp/temp_run_by_carlos.run'
self.run_file_exporter = RUN_File_Transform_Exporter(self.temp_run_file, model_name='temp_model_by_carlos')
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
returns: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...], 'ndcg_cut_3':0.33, 'ndcg_cut_5'...},...]
'''
self.run_file_exporter(samples)
resolved_command = self.trec_eval_command.replace('qRELS', self.q_rel_file).replace('RUN_FILE', self.temp_run_file)
print(f'Running the following command: {resolved_command} > /tmp/temp_run.eval')
os.system(f'{resolved_command} > /tmp/temp_run.eval')
with open('/tmp/temp_run.eval', 'r') as eval_f:
eval_results = {}
for row in eval_f:
if not any([metric in row for metric in self.relevant_metrics]):
continue
metric, q_id, score = row.split()
if q_id not in eval_results:
eval_results[q_id] = {}
eval_results[q_id][metric] = float(score)
for sample in samples:
if sample['q_id'] not in eval_results:
print(f"q_rel missing for q_id {sample['q_id']}. No scores added to sample")
continue
sample.update(eval_results[sample['q_id']])
return samples
class Ranking_Experiment():
def __init__(self, q_rels, save_dir=None, save_name="rerank_eval.run"):
'''
q_rels: dict: {'q_id':[d_id, d_id,...],...}
'''
pytrec_q_rels = {}
for q_id, d_ids in q_rels.items():
pytrec_q_rels[q_id] = {d_id:1 for d_id in d_ids}
self.evaluator = pytrec_eval.RelevanceEvaluator(pytrec_q_rels, {'map', 'ndcg_cut_3', 'set_recall', 'recip_rank'})
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
pytrec_run = {}
for sample_obj in samples:
q_id = sample_obj['q_id']
pytrec_run[q_id] = {}
for d_id, score in sample_obj['search_results']:
pytrec_run[q_id][d_id] = score
results = self.evaluator.evaluate(pytrec_run)
for sample_obj, result in zip(samples, results.values()):
sample_obj.update(result)
aggregate = self.dict_mean(list(results.values()))
return aggregate
class Sequence_BLEU_Experiment():
def __init__(self, fields={}, debug=True):
'''
An Experiment to evaluate sequence similarity through metrics like: BLEU or token accuracy.
'''
self.fields = {'predicted_seq':'predicted_seq', 'target_seq':'target_seq'}
self.debug = debug
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text"},...]
returns: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text", "BELU | save | identifier_name | |
mod.rs | use k8s_openapi::api::core::v1::Pod;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Duration;
mod k8s_paths_provider;
mod lifecycle;
mod parser;
mod partial_events_merger;
mod path_helpers;
mod pod_metadata_annotator;
mod transform_utils;
mod util;
use k8s_paths_provider::K8sPathsProvider;
use lifecycle::Lifecycle;
use pod_metadata_annotator::PodMetadataAnnotator;
/// The key we use for `file` field.
const FILE_KEY: &str = "file";
/// The `self_node_name` value env var key.
const SELF_NODE_NAME_ENV_KEY: &str = "VECTOR_SELF_NODE_NAME";
/// Configuration for the `kubernetes_logs` source.
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The `name` of the Kubernetes `Node` that Vector runs at.
/// Required to filter the `Pod`s to only include the ones with the log
/// files accessible locally.
#[serde(default = "default_self_node_name_env_template")]
self_node_name: String,
/// Automatically merge partial events.
#[serde(default = "crate::serde::default_true")]
auto_partial_merge: bool,
/// Specifies the field names for metadata annotation.
annotation_fields: pod_metadata_annotator::FieldsSpec,
}
inventory::submit! {
SourceDescription::new_without_default::<Config>(COMPONENT_NAME)
}
const COMPONENT_NAME: &str = "kubernetes_logs";
#[typetag::serde(name = "kubernetes_logs")]
impl SourceConfig for Config {
fn build(
&self,
name: &str,
globals: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<sources::Source> {
let source = Source::new(self, Resolver, globals, name)?;
// TODO: this is a workaround for the legacy futures 0.1.
// When the core is updated to futures 0.3 this should be simplied
// significantly.
let out = futures::compat::Compat01As03Sink::new(out);
let fut = source.run(out, shutdown);
let fut = fut.map(|result| {
result.map_err(|error| {
error!(message = "source future failed", ?error);
})
});
let fut = Box::pin(fut);
let fut = futures::compat::Compat::new(fut);
let fut: sources::Source = Box::new(fut);
Ok(fut)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
COMPONENT_NAME
}
}
#[derive(Clone)]
struct Source {
client: k8s::client::Client,
self_node_name: String,
data_dir: PathBuf,
auto_partial_merge: bool,
fields_spec: pod_metadata_annotator::FieldsSpec,
}
impl Source {
fn new(
config: &Config,
resolver: Resolver,
globals: &GlobalOptions,
name: &str,
) -> crate::Result<Self> {
let self_node_name = if config.self_node_name.is_empty()
|| config.self_node_name == default_self_node_name_env_template()
{
std::env::var(SELF_NODE_NAME_ENV_KEY).map_err(|_| {
format!(
"self_node_name config value or {} env var is not set",
SELF_NODE_NAME_ENV_KEY
)
})?
} else {
config.self_node_name.clone()
};
info!(
message = "obtained Kubernetes Node name to collect logs for (self)",
?self_node_name
);
let k8s_config = k8s::client::config::Config::in_cluster()?;
let client = k8s::client::Client::new(k8s_config, resolver)?;
let data_dir = globals.resolve_and_make_data_subdir(None, name)?;
Ok(Self {
client,
self_node_name,
data_dir,
auto_partial_merge: config.auto_partial_merge,
fields_spec: config.annotation_fields.clone(),
})
}
async fn run<O>(self, out: O, global_shutdown: ShutdownSignal) -> crate::Result<()>
where
O: Sink<Event> + Send + 'static,
<O as Sink<Event>>::Error: std::error::Error,
{
let Self {
client,
self_node_name,
data_dir,
auto_partial_merge,
fields_spec,
} = self;
let field_selector = format!("spec.nodeName={}", self_node_name);
let label_selector = "vector.dev/exclude!=true".to_owned();
let watcher = k8s::api_watcher::ApiWatcher::new(client, Pod::watch_pod_for_all_namespaces);
let watcher = k8s::instrumenting_watcher::InstrumentingWatcher::new(watcher);
let (state_reader, state_writer) = evmap::new();
let state_writer =
k8s::state::evmap::Writer::new(state_writer, Some(Duration::from_millis(10)));
let state_writer = k8s::state::instrumenting::Writer::new(state_writer);
let state_writer =
k8s::state::delayed_delete::Writer::new(state_writer, Duration::from_secs(60));
let mut reflector = k8s::reflector::Reflector::new(
watcher,
state_writer,
Some(field_selector),
Some(label_selector),
Duration::from_secs(1),
);
let reflector_process = reflector.run();
let paths_provider = K8sPathsProvider::new(state_reader.clone());
let annotator = PodMetadataAnnotator::new(state_reader, fields_spec);
// TODO: maybe some of the parameters have to be configurable.
let max_line_bytes = 32 * 1024; // 32 KiB
let file_server = FileServer {
paths_provider,
max_read_bytes: 2048,
start_at_beginning: true,
ignore_before: None,
max_line_bytes,
data_dir,
glob_minimum_cooldown: Duration::from_secs(10),
fingerprinter: Fingerprinter::FirstLineChecksum {
max_line_length: max_line_bytes,
},
oldest_first: false,
remove_after: None,
};
let (file_source_tx, file_source_rx) =
futures::channel::mpsc::channel::<(Bytes, String)>(100);
let mut parser = parser::build();
let mut partial_events_merger = partial_events_merger::build(auto_partial_merge);
let events = file_source_rx.map(move |(bytes, file)| {
emit!(KubernetesLogsEventReceived {
file: &file,
byte_size: bytes.len(),
});
let mut event = create_event(bytes, &file);
if annotator.annotate(&mut event, &file).is_none() {
emit!(KubernetesLogsEventAnnotationFailed { event: &event });
}
event
});
let events = events
.filter_map(move |event| futures::future::ready(parser.transform(event)))
.filter_map(move |event| {
futures::future::ready(partial_events_merger.transform(event))
});
let event_processing_loop = events.map(Ok).forward(out);
let mut lifecycle = Lifecycle::new();
{
let (slot, shutdown) = lifecycle.add();
let fut =
util::cancel_on_signal(reflector_process, shutdown).map(|result| match result {
Ok(()) => info!(message = "reflector process completed gracefully"),
Err(error) => {
error!(message = "reflector process exited with an error", ?error)
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::run_file_server(file_server, file_source_tx, shutdown).map(|result| {
match result {
Ok(FileServerShutdown) => info!(message = "file server completed gracefully"),
Err(error) => error!(message = "file server exited with an error", ?error),
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::complete_with_deadline_on_signal(
event_processing_loop,
shutdown,
Duration::from_secs(30), // more than enough time to propagate
)
.map(|result| {
match result {
Ok(Ok(())) => info!(message = "event processing loop completed gracefully"),
Ok(Err(error)) => error!(
message = "event processing loop exited with an error",
?error
),
Err(error) => error!(
message = "event processing loop timed out during the shutdown",
?error
),
};
});
slot.bind(Box::pin(fut));
}
lifecycle.run(global_shutdown).await;
info!(message = "done");
Ok(())
}
}
fn create_event(line: Bytes, file: &str) -> Event | {
let mut event = Event::from(line);
// Add source type.
event
.as_mut_log()
.insert(event::log_schema().source_type_key(), COMPONENT_NAME);
// Add file.
event.as_mut_log().insert(FILE_KEY, file);
event
} | identifier_body | |
mod.rs | , Event};
use crate::internal_events::{KubernetesLogsEventAnnotationFailed, KubernetesLogsEventReceived};
use crate::kubernetes as k8s;
use crate::{
dns::Resolver,
shutdown::ShutdownSignal,
sources,
topology::config::{DataType, GlobalOptions, SourceConfig, SourceDescription},
transforms::Transform,
};
use bytes05::Bytes;
use evmap10::{self as evmap};
use file_source::{FileServer, FileServerShutdown, Fingerprinter};
use futures::{future::FutureExt, sink::Sink, stream::StreamExt};
use futures01::sync::mpsc;
use k8s_openapi::api::core::v1::Pod;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Duration;
mod k8s_paths_provider;
mod lifecycle;
mod parser;
mod partial_events_merger;
mod path_helpers;
mod pod_metadata_annotator;
mod transform_utils;
mod util;
use k8s_paths_provider::K8sPathsProvider;
use lifecycle::Lifecycle;
use pod_metadata_annotator::PodMetadataAnnotator;
/// The key we use for `file` field.
const FILE_KEY: &str = "file";
/// The `self_node_name` value env var key.
const SELF_NODE_NAME_ENV_KEY: &str = "VECTOR_SELF_NODE_NAME";
/// Configuration for the `kubernetes_logs` source.
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The `name` of the Kubernetes `Node` that Vector runs at.
/// Required to filter the `Pod`s to only include the ones with the log
/// files accessible locally.
#[serde(default = "default_self_node_name_env_template")]
self_node_name: String,
/// Automatically merge partial events.
#[serde(default = "crate::serde::default_true")]
auto_partial_merge: bool,
/// Specifies the field names for metadata annotation.
annotation_fields: pod_metadata_annotator::FieldsSpec,
}
inventory::submit! {
SourceDescription::new_without_default::<Config>(COMPONENT_NAME)
}
const COMPONENT_NAME: &str = "kubernetes_logs";
#[typetag::serde(name = "kubernetes_logs")]
impl SourceConfig for Config {
fn build(
&self,
name: &str,
globals: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<sources::Source> {
let source = Source::new(self, Resolver, globals, name)?;
// TODO: this is a workaround for the legacy futures 0.1.
// When the core is updated to futures 0.3 this should be simplied
// significantly.
let out = futures::compat::Compat01As03Sink::new(out);
let fut = source.run(out, shutdown);
let fut = fut.map(|result| {
result.map_err(|error| {
error!(message = "source future failed", ?error);
})
});
let fut = Box::pin(fut);
let fut = futures::compat::Compat::new(fut);
let fut: sources::Source = Box::new(fut);
Ok(fut)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
COMPONENT_NAME
}
}
#[derive(Clone)]
struct Source {
client: k8s::client::Client,
self_node_name: String,
data_dir: PathBuf,
auto_partial_merge: bool,
fields_spec: pod_metadata_annotator::FieldsSpec,
}
impl Source {
fn new(
config: &Config,
resolver: Resolver,
globals: &GlobalOptions,
name: &str,
) -> crate::Result<Self> {
let self_node_name = if config.self_node_name.is_empty()
|| config.self_node_name == default_self_node_name_env_template()
{
std::env::var(SELF_NODE_NAME_ENV_KEY).map_err(|_| {
format!(
"self_node_name config value or {} env var is not set",
SELF_NODE_NAME_ENV_KEY
)
})?
} else {
config.self_node_name.clone()
};
info!(
message = "obtained Kubernetes Node name to collect logs for (self)",
?self_node_name
);
let k8s_config = k8s::client::config::Config::in_cluster()?;
let client = k8s::client::Client::new(k8s_config, resolver)?;
let data_dir = globals.resolve_and_make_data_subdir(None, name)?;
Ok(Self {
client,
self_node_name,
data_dir,
auto_partial_merge: config.auto_partial_merge,
fields_spec: config.annotation_fields.clone(),
})
}
async fn | <O>(self, out: O, global_shutdown: ShutdownSignal) -> crate::Result<()>
where
O: Sink<Event> + Send + 'static,
<O as Sink<Event>>::Error: std::error::Error,
{
let Self {
client,
self_node_name,
data_dir,
auto_partial_merge,
fields_spec,
} = self;
let field_selector = format!("spec.nodeName={}", self_node_name);
let label_selector = "vector.dev/exclude!=true".to_owned();
let watcher = k8s::api_watcher::ApiWatcher::new(client, Pod::watch_pod_for_all_namespaces);
let watcher = k8s::instrumenting_watcher::InstrumentingWatcher::new(watcher);
let (state_reader, state_writer) = evmap::new();
let state_writer =
k8s::state::evmap::Writer::new(state_writer, Some(Duration::from_millis(10)));
let state_writer = k8s::state::instrumenting::Writer::new(state_writer);
let state_writer =
k8s::state::delayed_delete::Writer::new(state_writer, Duration::from_secs(60));
let mut reflector = k8s::reflector::Reflector::new(
watcher,
state_writer,
Some(field_selector),
Some(label_selector),
Duration::from_secs(1),
);
let reflector_process = reflector.run();
let paths_provider = K8sPathsProvider::new(state_reader.clone());
let annotator = PodMetadataAnnotator::new(state_reader, fields_spec);
// TODO: maybe some of the parameters have to be configurable.
let max_line_bytes = 32 * 1024; // 32 KiB
let file_server = FileServer {
paths_provider,
max_read_bytes: 2048,
start_at_beginning: true,
ignore_before: None,
max_line_bytes,
data_dir,
glob_minimum_cooldown: Duration::from_secs(10),
fingerprinter: Fingerprinter::FirstLineChecksum {
max_line_length: max_line_bytes,
},
oldest_first: false,
remove_after: None,
};
let (file_source_tx, file_source_rx) =
futures::channel::mpsc::channel::<(Bytes, String)>(100);
let mut parser = parser::build();
let mut partial_events_merger = partial_events_merger::build(auto_partial_merge);
let events = file_source_rx.map(move |(bytes, file)| {
emit!(KubernetesLogsEventReceived {
file: &file,
byte_size: bytes.len(),
});
let mut event = create_event(bytes, &file);
if annotator.annotate(&mut event, &file).is_none() {
emit!(KubernetesLogsEventAnnotationFailed { event: &event });
}
event
});
let events = events
.filter_map(move |event| futures::future::ready(parser.transform(event)))
.filter_map(move |event| {
futures::future::ready(partial_events_merger.transform(event))
});
let event_processing_loop = events.map(Ok).forward(out);
let mut lifecycle = Lifecycle::new();
{
let (slot, shutdown) = lifecycle.add();
let fut =
util::cancel_on_signal(reflector_process, shutdown).map(|result| match result {
Ok(()) => info!(message = "reflector process completed gracefully"),
Err(error) => {
error!(message = "reflector process exited with an error", ?error)
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::run_file_server(file_server, file_source_tx, shutdown).map(|result| {
match result {
Ok(FileServerShutdown) => info!(message = "file server completed gracefully"),
Err(error) => error!(message = "file server exited with an error", ?error),
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::complete_with_deadline_on_signal(
event_processing_loop,
shutdown,
Duration::from_secs(30), // more than enough time to propagate
)
.map(|result| {
match result {
Ok(Ok(())) => info!(message = "event processing loop completed gracefully"),
Ok(Err(error)) => error!(
message = "event processing loop exited with an error",
?error
),
Err(error) => error!(
message | run | identifier_name |
mod.rs | , Event};
use crate::internal_events::{KubernetesLogsEventAnnotationFailed, KubernetesLogsEventReceived};
use crate::kubernetes as k8s;
use crate::{
dns::Resolver,
shutdown::ShutdownSignal,
sources,
topology::config::{DataType, GlobalOptions, SourceConfig, SourceDescription},
transforms::Transform,
};
use bytes05::Bytes;
use evmap10::{self as evmap};
use file_source::{FileServer, FileServerShutdown, Fingerprinter};
use futures::{future::FutureExt, sink::Sink, stream::StreamExt};
use futures01::sync::mpsc;
use k8s_openapi::api::core::v1::Pod;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Duration;
mod k8s_paths_provider;
mod lifecycle;
mod parser;
mod partial_events_merger;
mod path_helpers;
mod pod_metadata_annotator;
mod transform_utils;
mod util;
use k8s_paths_provider::K8sPathsProvider;
use lifecycle::Lifecycle;
use pod_metadata_annotator::PodMetadataAnnotator;
/// The key we use for `file` field.
const FILE_KEY: &str = "file";
/// The `self_node_name` value env var key.
const SELF_NODE_NAME_ENV_KEY: &str = "VECTOR_SELF_NODE_NAME";
/// Configuration for the `kubernetes_logs` source.
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The `name` of the Kubernetes `Node` that Vector runs at.
/// Required to filter the `Pod`s to only include the ones with the log
/// files accessible locally.
#[serde(default = "default_self_node_name_env_template")]
self_node_name: String,
/// Automatically merge partial events.
#[serde(default = "crate::serde::default_true")]
auto_partial_merge: bool,
/// Specifies the field names for metadata annotation.
annotation_fields: pod_metadata_annotator::FieldsSpec,
}
inventory::submit! {
SourceDescription::new_without_default::<Config>(COMPONENT_NAME)
}
const COMPONENT_NAME: &str = "kubernetes_logs";
#[typetag::serde(name = "kubernetes_logs")]
impl SourceConfig for Config {
fn build(
&self,
name: &str,
globals: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<sources::Source> {
let source = Source::new(self, Resolver, globals, name)?;
// TODO: this is a workaround for the legacy futures 0.1.
// When the core is updated to futures 0.3 this should be simplied
// significantly.
let out = futures::compat::Compat01As03Sink::new(out);
let fut = source.run(out, shutdown);
let fut = fut.map(|result| {
result.map_err(|error| {
error!(message = "source future failed", ?error);
})
});
let fut = Box::pin(fut);
let fut = futures::compat::Compat::new(fut);
let fut: sources::Source = Box::new(fut);
Ok(fut)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
COMPONENT_NAME
}
}
#[derive(Clone)]
struct Source {
client: k8s::client::Client,
self_node_name: String,
data_dir: PathBuf,
auto_partial_merge: bool,
fields_spec: pod_metadata_annotator::FieldsSpec,
}
impl Source {
fn new(
config: &Config,
resolver: Resolver,
globals: &GlobalOptions,
name: &str,
) -> crate::Result<Self> {
let self_node_name = if config.self_node_name.is_empty()
|| config.self_node_name == default_self_node_name_env_template()
{
std::env::var(SELF_NODE_NAME_ENV_KEY).map_err(|_| {
format!(
"self_node_name config value or {} env var is not set",
SELF_NODE_NAME_ENV_KEY
)
})?
} else {
config.self_node_name.clone()
};
info!(
message = "obtained Kubernetes Node name to collect logs for (self)",
?self_node_name
);
let k8s_config = k8s::client::config::Config::in_cluster()?;
let client = k8s::client::Client::new(k8s_config, resolver)?;
let data_dir = globals.resolve_and_make_data_subdir(None, name)?;
Ok(Self {
client,
self_node_name,
data_dir,
auto_partial_merge: config.auto_partial_merge,
fields_spec: config.annotation_fields.clone(),
})
}
async fn run<O>(self, out: O, global_shutdown: ShutdownSignal) -> crate::Result<()>
where
O: Sink<Event> + Send + 'static,
<O as Sink<Event>>::Error: std::error::Error,
{
let Self {
client,
self_node_name,
data_dir,
auto_partial_merge,
fields_spec,
} = self;
let field_selector = format!("spec.nodeName={}", self_node_name);
let label_selector = "vector.dev/exclude!=true".to_owned();
let watcher = k8s::api_watcher::ApiWatcher::new(client, Pod::watch_pod_for_all_namespaces);
let watcher = k8s::instrumenting_watcher::InstrumentingWatcher::new(watcher);
let (state_reader, state_writer) = evmap::new();
let state_writer =
k8s::state::evmap::Writer::new(state_writer, Some(Duration::from_millis(10)));
let state_writer = k8s::state::instrumenting::Writer::new(state_writer);
let state_writer =
k8s::state::delayed_delete::Writer::new(state_writer, Duration::from_secs(60));
let mut reflector = k8s::reflector::Reflector::new(
watcher,
state_writer,
Some(field_selector),
Some(label_selector),
Duration::from_secs(1),
);
let reflector_process = reflector.run();
let paths_provider = K8sPathsProvider::new(state_reader.clone());
let annotator = PodMetadataAnnotator::new(state_reader, fields_spec);
// TODO: maybe some of the parameters have to be configurable.
let max_line_bytes = 32 * 1024; // 32 KiB
let file_server = FileServer {
paths_provider,
max_read_bytes: 2048,
start_at_beginning: true,
ignore_before: None,
max_line_bytes,
data_dir,
glob_minimum_cooldown: Duration::from_secs(10),
fingerprinter: Fingerprinter::FirstLineChecksum {
max_line_length: max_line_bytes,
},
oldest_first: false,
remove_after: None,
};
let (file_source_tx, file_source_rx) =
futures::channel::mpsc::channel::<(Bytes, String)>(100);
let mut parser = parser::build();
let mut partial_events_merger = partial_events_merger::build(auto_partial_merge);
let events = file_source_rx.map(move |(bytes, file)| {
emit!(KubernetesLogsEventReceived {
file: &file,
byte_size: bytes.len(),
});
let mut event = create_event(bytes, &file); | emit!(KubernetesLogsEventAnnotationFailed { event: &event });
}
event
});
let events = events
.filter_map(move |event| futures::future::ready(parser.transform(event)))
.filter_map(move |event| {
futures::future::ready(partial_events_merger.transform(event))
});
let event_processing_loop = events.map(Ok).forward(out);
let mut lifecycle = Lifecycle::new();
{
let (slot, shutdown) = lifecycle.add();
let fut =
util::cancel_on_signal(reflector_process, shutdown).map(|result| match result {
Ok(()) => info!(message = "reflector process completed gracefully"),
Err(error) => {
error!(message = "reflector process exited with an error", ?error)
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::run_file_server(file_server, file_source_tx, shutdown).map(|result| {
match result {
Ok(FileServerShutdown) => info!(message = "file server completed gracefully"),
Err(error) => error!(message = "file server exited with an error", ?error),
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::complete_with_deadline_on_signal(
event_processing_loop,
shutdown,
Duration::from_secs(30), // more than enough time to propagate
)
.map(|result| {
match result {
Ok(Ok(())) => info!(message = "event processing loop completed gracefully"),
Ok(Err(error)) => error!(
message = "event processing loop exited with an error",
?error
),
Err(error) => error!(
message = | if annotator.annotate(&mut event, &file).is_none() { | random_line_split |
PacketDownloader.py |
from oauth2client import tools
from oauth2client import file
from googleapiclient import errors
UPDATE_INTERVAL = 5 # seconds
NEW_LABEL_ID = None # Gmail label ID of 'new' label
# command line arguments
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-a', '--all', action='store_true', dest='download_all', default='false', help='Download all attachments (else only download new)')
parser.add_argument('-l', '--label', required=True, help='Gmail label to use after attachment is downloaded (or label to download attachments from if --all is used)')
parser.add_argument('-d', '--directory', default='.', help='Specify parent directory in which download directory will be created')
flags = parser.parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Packet Downloader'
# Gmail authentication
def get_credentials():
# home_dir = os.path.expanduser('~')
# credential_dir = os.path.join(home_dir, '.credentials')
credential_dir = './.credentials'
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
# Gmail advanced search
def ListMessagesMatchingQuery(service, user_id, query=''):
try:
response = service.users().messages().list(userId=user_id, q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
# Download message body and attachment
def GetData(service, user_id, msg_id, prefix=""):
sbd_filename = ''
csv_filename = 'packets.csv'
try:
message = service.users().messages().get(userId=user_id, id=msg_id).execute()
for part in message['payload']['parts']:
if part['filename']:
sbd_filename = message['internalDate'] + '.sbd'
if not sbd_filename is '':
if 'data' in part['body']:
data=part['body']['data']
else:
att_id=part['body']['attachmentId']
att=service.users().messages().attachments().get(userId=user_id, messageId=msg_id,id=att_id).execute()
data=att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
sbd_dl_path = os.path.join(prefix, 'sbd', 'new', sbd_filename)
csv_dl_path = os.path.join(prefix, csv_filename)
if not os.path.exists(sbd_dl_path) and not os.path.exists(os.path.join(prefix, 'sbd', sbd_filename)):
#download individual sbd
with open(sbd_dl_path, 'w') as f:
f.write(file_data)
f.close()
#append contents to packets.csv
with open(csv_dl_path, 'a') as f:
f.write(file_data + '\n')
f.close()
record('Downloaded ' + sbd_dl_path)
else:
record('Skipped ' + sbd_dl_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
try:
if not sbd_filename is '':
message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()
txt_file = sbd_filename[:-3] + 'txt'
txt_path = os.path.join(prefix, 'txt', txt_file)
if message['raw']:
if not os.path.exists(txt_path):
data=message['raw']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
msg = email.message_from_string(file_data)
for part in msg.walk():
if part.get_content_type() == 'text/plain':
msg_txt = part.get_payload()
with open(txt_path, 'w') as f:
f.write(msg_txt)
f.close()
record('Downloaded ' + txt_path)
else:
record('Skipped ' + txt_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
# create label object
def CreateLabel(service, user_id, label_object):
try:
label = service.users().labels().create(userId=user_id, body=label_object).execute()
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
# make actual label in Gmail
def MakeLabel(label_name, mlv='show', llv='labelShow'):
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
# add/remove labels from email
def | (service, user_id, msg_id, msg_labels):
try:
message = service.users().messages().modify(userId=user_id, id=msg_id, body=msg_labels).execute()
label_ids = message['labelIds']
return message
except errors.HttpError, error:
print('An error occurred: %s' % error)
# set which labels to add/remove
def CreateMsgLabels(new_label_id, label_id):
return {'removeLabelIds': [new_label_id], 'addLabelIds': [label_id]}
# use to find label ID of 'new' label (only used on initial run for each new Gmail account)
def ListLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
# log data and print to screen
def record(text):
localtime = time.asctime(time.localtime(time.time()))
log_path = os.path.join(flags.directory, flags.label, 'log.txt')
with open(log_path, 'a') as log:
log.write(localtime + '\t' + text + '\n')
log.close()
print(localtime + '\t' + text)
def main():
# Gmail authentication
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
check = True
label_exists = False
# retrieve list of Gmail labels
labels = ListLabels(service, 'me')
for label in labels:
# check if specified label exists
if label['name'] == flags.label:
label_id = label['id']
label_exists = True
# get label_ID of 'new' label
elif label['name'] == 'new':
NEW_LABEL_ID = label['id']
if flags.directory is '.':
dir_path = os.path.join(os.getcwd(), flags.label)
else:
dir_path = os.path.join(flags.directory, flags.label)
# check if directory/logfile must be created
if label_exists is True or flags.download_all == 'false':
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record('Created directory ' + dir_path)
log_path = os.path.join(dir_path, 'log.txt')
if not os.path.exists(log_path):
open(log_path, 'w').close()
sbd_path = os.path.join(dir_path, 'sbd')
if not os.path.exists(sbd_path):
os.makedirs(sbd_path)
record('Created directory ' + sbd_path)
sbd_dl_path = os.path.join(sbd_path, 'new')
if not os.path.exists(sbd_dl_path):
os.makedirs(sbd_dl_path)
record('Created directory ' + sbd_dl_path)
txt_path = os.path.join(dir_path, 'txt')
if not os.path.exists(txt_path):
os.makedirs(txt_path)
record('Created directory ' + txt_path)
while check is True:
# download all packets with specified label
if flags.download_all is True:
if label_exists is True:
messages = ListMessagesMatchingQuery(service,'me', 'label:' + flags.label)
if not messages:
record('No messages found.')
else:
for message in messages:
GetData(service, 'me', message['id'], dir_path)
else:
localtime = time.asctime(time.localtime(time.time()))
print(localtime + '\tLabel \'' + flags.label + '\' does not exist.')
check = False
# download all new packets and relabel with specified label
else:
messages = ListMessagesMatchingQuery(service,'me', 'label:new')
if not messages:
record('No messages | ModifyMessage | identifier_name |
PacketDownloader.py |
from oauth2client import tools
from oauth2client import file
from googleapiclient import errors
UPDATE_INTERVAL = 5 # seconds
NEW_LABEL_ID = None # Gmail label ID of 'new' label
# command line arguments
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-a', '--all', action='store_true', dest='download_all', default='false', help='Download all attachments (else only download new)')
parser.add_argument('-l', '--label', required=True, help='Gmail label to use after attachment is downloaded (or label to download attachments from if --all is used)')
parser.add_argument('-d', '--directory', default='.', help='Specify parent directory in which download directory will be created')
flags = parser.parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Packet Downloader'
# Gmail authentication
def get_credentials():
# home_dir = os.path.expanduser('~')
# credential_dir = os.path.join(home_dir, '.credentials')
credential_dir = './.credentials'
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
# Gmail advanced search
def ListMessagesMatchingQuery(service, user_id, query=''):
try:
response = service.users().messages().list(userId=user_id, q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
# Download message body and attachment
def GetData(service, user_id, msg_id, prefix=""):
sbd_filename = ''
csv_filename = 'packets.csv'
try:
message = service.users().messages().get(userId=user_id, id=msg_id).execute()
for part in message['payload']['parts']:
if part['filename']:
sbd_filename = message['internalDate'] + '.sbd'
if not sbd_filename is '':
if 'data' in part['body']:
data=part['body']['data']
else:
att_id=part['body']['attachmentId']
att=service.users().messages().attachments().get(userId=user_id, messageId=msg_id,id=att_id).execute()
data=att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
sbd_dl_path = os.path.join(prefix, 'sbd', 'new', sbd_filename)
csv_dl_path = os.path.join(prefix, csv_filename)
if not os.path.exists(sbd_dl_path) and not os.path.exists(os.path.join(prefix, 'sbd', sbd_filename)):
#download individual sbd
with open(sbd_dl_path, 'w') as f:
f.write(file_data)
f.close()
#append contents to packets.csv
with open(csv_dl_path, 'a') as f:
f.write(file_data + '\n')
f.close()
record('Downloaded ' + sbd_dl_path)
else:
record('Skipped ' + sbd_dl_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
try:
if not sbd_filename is '':
message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()
txt_file = sbd_filename[:-3] + 'txt'
txt_path = os.path.join(prefix, 'txt', txt_file)
if message['raw']:
if not os.path.exists(txt_path):
data=message['raw']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
msg = email.message_from_string(file_data)
for part in msg.walk():
if part.get_content_type() == 'text/plain':
msg_txt = part.get_payload()
with open(txt_path, 'w') as f:
f.write(msg_txt)
f.close()
record('Downloaded ' + txt_path)
else:
record('Skipped ' + txt_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
# create label object
def CreateLabel(service, user_id, label_object):
try:
label = service.users().labels().create(userId=user_id, body=label_object).execute()
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
# make actual label in Gmail
def MakeLabel(label_name, mlv='show', llv='labelShow'):
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
# add/remove labels from email
def ModifyMessage(service, user_id, msg_id, msg_labels):
|
# set which labels to add/remove
def CreateMsgLabels(new_label_id, label_id):
return {'removeLabelIds': [new_label_id], 'addLabelIds': [label_id]}
# use to find label ID of 'new' label (only used on initial run for each new Gmail account)
def ListLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
# log data and print to screen
def record(text):
localtime = time.asctime(time.localtime(time.time()))
log_path = os.path.join(flags.directory, flags.label, 'log.txt')
with open(log_path, 'a') as log:
log.write(localtime + '\t' + text + '\n')
log.close()
print(localtime + '\t' + text)
def main():
# Gmail authentication
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
check = True
label_exists = False
# retrieve list of Gmail labels
labels = ListLabels(service, 'me')
for label in labels:
# check if specified label exists
if label['name'] == flags.label:
label_id = label['id']
label_exists = True
# get label_ID of 'new' label
elif label['name'] == 'new':
NEW_LABEL_ID = label['id']
if flags.directory is '.':
dir_path = os.path.join(os.getcwd(), flags.label)
else:
dir_path = os.path.join(flags.directory, flags.label)
# check if directory/logfile must be created
if label_exists is True or flags.download_all == 'false':
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record('Created directory ' + dir_path)
log_path = os.path.join(dir_path, 'log.txt')
if not os.path.exists(log_path):
open(log_path, 'w').close()
sbd_path = os.path.join(dir_path, 'sbd')
if not os.path.exists(sbd_path):
os.makedirs(sbd_path)
record('Created directory ' + sbd_path)
sbd_dl_path = os.path.join(sbd_path, 'new')
if not os.path.exists(sbd_dl_path):
os.makedirs(sbd_dl_path)
record('Created directory ' + sbd_dl_path)
txt_path = os.path.join(dir_path, 'txt')
if not os.path.exists(txt_path):
os.makedirs(txt_path)
record('Created directory ' + txt_path)
while check is True:
# download all packets with specified label
if flags.download_all is True:
if label_exists is True:
messages = ListMessagesMatchingQuery(service,'me', 'label:' + flags.label)
if not messages:
record('No messages found.')
else:
for message in messages:
GetData(service, 'me', message['id'], dir_path)
else:
localtime = time.asctime(time.localtime(time.time()))
print(localtime + '\tLabel \'' + flags.label + '\' does not exist.')
check = False
# download all new packets and relabel with specified label
else:
messages = ListMessagesMatchingQuery(service,'me', 'label:new')
if not messages:
record('No messages | try:
message = service.users().messages().modify(userId=user_id, id=msg_id, body=msg_labels).execute()
label_ids = message['labelIds']
return message
except errors.HttpError, error:
print('An error occurred: %s' % error) | identifier_body |
PacketDownloader.py |
from oauth2client import tools
from oauth2client import file
from googleapiclient import errors
UPDATE_INTERVAL = 5 # seconds
NEW_LABEL_ID = None # Gmail label ID of 'new' label
# command line arguments
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-a', '--all', action='store_true', dest='download_all', default='false', help='Download all attachments (else only download new)')
parser.add_argument('-l', '--label', required=True, help='Gmail label to use after attachment is downloaded (or label to download attachments from if --all is used)')
parser.add_argument('-d', '--directory', default='.', help='Specify parent directory in which download directory will be created')
flags = parser.parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Packet Downloader'
# Gmail authentication
def get_credentials():
# home_dir = os.path.expanduser('~')
# credential_dir = os.path.join(home_dir, '.credentials')
credential_dir = './.credentials'
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
# Gmail advanced search
def ListMessagesMatchingQuery(service, user_id, query=''):
try:
response = service.users().messages().list(userId=user_id, q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
# Download message body and attachment
def GetData(service, user_id, msg_id, prefix=""):
sbd_filename = ''
csv_filename = 'packets.csv'
try:
message = service.users().messages().get(userId=user_id, id=msg_id).execute()
for part in message['payload']['parts']:
if part['filename']:
sbd_filename = message['internalDate'] + '.sbd'
if not sbd_filename is '':
if 'data' in part['body']:
data=part['body']['data']
else:
att_id=part['body']['attachmentId']
att=service.users().messages().attachments().get(userId=user_id, messageId=msg_id,id=att_id).execute()
data=att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
sbd_dl_path = os.path.join(prefix, 'sbd', 'new', sbd_filename)
csv_dl_path = os.path.join(prefix, csv_filename)
if not os.path.exists(sbd_dl_path) and not os.path.exists(os.path.join(prefix, 'sbd', sbd_filename)):
#download individual sbd
with open(sbd_dl_path, 'w') as f:
f.write(file_data)
f.close()
#append contents to packets.csv
with open(csv_dl_path, 'a') as f:
f.write(file_data + '\n')
f.close()
record('Downloaded ' + sbd_dl_path)
else:
record('Skipped ' + sbd_dl_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
try:
if not sbd_filename is '':
message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()
txt_file = sbd_filename[:-3] + 'txt'
txt_path = os.path.join(prefix, 'txt', txt_file)
if message['raw']:
if not os.path.exists(txt_path):
data=message['raw']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
msg = email.message_from_string(file_data)
for part in msg.walk():
if part.get_content_type() == 'text/plain':
msg_txt = part.get_payload()
with open(txt_path, 'w') as f:
f.write(msg_txt)
f.close()
record('Downloaded ' + txt_path)
else:
record('Skipped ' + txt_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
# create label object
def CreateLabel(service, user_id, label_object):
try:
label = service.users().labels().create(userId=user_id, body=label_object).execute()
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
# make actual label in Gmail
def MakeLabel(label_name, mlv='show', llv='labelShow'):
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
# add/remove labels from email
def ModifyMessage(service, user_id, msg_id, msg_labels):
try:
message = service.users().messages().modify(userId=user_id, id=msg_id, body=msg_labels).execute()
label_ids = message['labelIds']
return message
except errors.HttpError, error:
print('An error occurred: %s' % error)
# set which labels to add/remove
def CreateMsgLabels(new_label_id, label_id):
return {'removeLabelIds': [new_label_id], 'addLabelIds': [label_id]}
# use to find label ID of 'new' label (only used on initial run for each new Gmail account)
def ListLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
# log data and print to screen
def record(text):
localtime = time.asctime(time.localtime(time.time()))
log_path = os.path.join(flags.directory, flags.label, 'log.txt')
with open(log_path, 'a') as log:
log.write(localtime + '\t' + text + '\n')
log.close()
print(localtime + '\t' + text)
def main():
# Gmail authentication
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
check = True
label_exists = False
# retrieve list of Gmail labels
labels = ListLabels(service, 'me')
for label in labels:
# check if specified label exists
if label['name'] == flags.label:
label_id = label['id']
label_exists = True
# get label_ID of 'new' label
elif label['name'] == 'new':
NEW_LABEL_ID = label['id']
if flags.directory is '.':
dir_path = os.path.join(os.getcwd(), flags.label)
else:
dir_path = os.path.join(flags.directory, flags.label)
# check if directory/logfile must be created
if label_exists is True or flags.download_all == 'false':
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record('Created directory ' + dir_path)
log_path = os.path.join(dir_path, 'log.txt')
if not os.path.exists(log_path):
open(log_path, 'w').close()
sbd_path = os.path.join(dir_path, 'sbd')
if not os.path.exists(sbd_path):
os.makedirs(sbd_path)
record('Created directory ' + sbd_path)
sbd_dl_path = os.path.join(sbd_path, 'new')
if not os.path.exists(sbd_dl_path):
os.makedirs(sbd_dl_path)
record('Created directory ' + sbd_dl_path)
txt_path = os.path.join(dir_path, 'txt')
if not os.path.exists(txt_path):
os.makedirs(txt_path)
record('Created directory ' + txt_path)
while check is True:
# download all packets with specified label
if flags.download_all is True:
if label_exists is True:
|
else:
localtime = time.asctime(time.localtime(time.time()))
print(localtime + '\tLabel \'' + flags.label + '\' does not exist.')
check = False
# download all new packets and relabel with specified label
else:
messages = ListMessagesMatchingQuery(service,'me', 'label:new')
if not messages:
record('No messages | messages = ListMessagesMatchingQuery(service,'me', 'label:' + flags.label)
if not messages:
record('No messages found.')
else:
for message in messages:
GetData(service, 'me', message['id'], dir_path) | conditional_block |
PacketDownloader.py | client
from oauth2client import tools
from oauth2client import file
from googleapiclient import errors
UPDATE_INTERVAL = 5 # seconds
NEW_LABEL_ID = None # Gmail label ID of 'new' label
# command line arguments
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-a', '--all', action='store_true', dest='download_all', default='false', help='Download all attachments (else only download new)')
parser.add_argument('-l', '--label', required=True, help='Gmail label to use after attachment is downloaded (or label to download attachments from if --all is used)')
parser.add_argument('-d', '--directory', default='.', help='Specify parent directory in which download directory will be created')
flags = parser.parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Packet Downloader'
# Gmail authentication
def get_credentials():
# home_dir = os.path.expanduser('~')
# credential_dir = os.path.join(home_dir, '.credentials')
credential_dir = './.credentials'
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
# Gmail advanced search
def ListMessagesMatchingQuery(service, user_id, query=''):
try:
response = service.users().messages().list(userId=user_id, q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
# Download message body and attachment
def GetData(service, user_id, msg_id, prefix=""):
sbd_filename = ''
csv_filename = 'packets.csv'
try:
message = service.users().messages().get(userId=user_id, id=msg_id).execute()
for part in message['payload']['parts']:
if part['filename']:
sbd_filename = message['internalDate'] + '.sbd'
if not sbd_filename is '':
if 'data' in part['body']:
data=part['body']['data']
else:
att_id=part['body']['attachmentId']
att=service.users().messages().attachments().get(userId=user_id, messageId=msg_id,id=att_id).execute()
data=att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
sbd_dl_path = os.path.join(prefix, 'sbd', 'new', sbd_filename)
csv_dl_path = os.path.join(prefix, csv_filename)
if not os.path.exists(sbd_dl_path) and not os.path.exists(os.path.join(prefix, 'sbd', sbd_filename)):
#download individual sbd
with open(sbd_dl_path, 'w') as f:
f.write(file_data)
f.close()
#append contents to packets.csv
with open(csv_dl_path, 'a') as f:
f.write(file_data + '\n')
f.close()
record('Downloaded ' + sbd_dl_path)
else:
record('Skipped ' + sbd_dl_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
try:
if not sbd_filename is '':
message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()
txt_file = sbd_filename[:-3] + 'txt'
txt_path = os.path.join(prefix, 'txt', txt_file)
if message['raw']:
if not os.path.exists(txt_path):
data=message['raw']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
msg = email.message_from_string(file_data)
for part in msg.walk():
if part.get_content_type() == 'text/plain':
msg_txt = part.get_payload()
with open(txt_path, 'w') as f:
f.write(msg_txt)
f.close()
record('Downloaded ' + txt_path)
else:
record('Skipped ' + txt_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
# create label object
def CreateLabel(service, user_id, label_object):
try:
label = service.users().labels().create(userId=user_id, body=label_object).execute()
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
# make actual label in Gmail
def MakeLabel(label_name, mlv='show', llv='labelShow'):
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
# add/remove labels from email
def ModifyMessage(service, user_id, msg_id, msg_labels):
try:
message = service.users().messages().modify(userId=user_id, id=msg_id, body=msg_labels).execute()
label_ids = message['labelIds']
return message
except errors.HttpError, error:
print('An error occurred: %s' % error)
# set which labels to add/remove
def CreateMsgLabels(new_label_id, label_id):
return {'removeLabelIds': [new_label_id], 'addLabelIds': [label_id]}
# use to find label ID of 'new' label (only used on initial run for each new Gmail account)
def ListLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute() | labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
# log data and print to screen
def record(text):
localtime = time.asctime(time.localtime(time.time()))
log_path = os.path.join(flags.directory, flags.label, 'log.txt')
with open(log_path, 'a') as log:
log.write(localtime + '\t' + text + '\n')
log.close()
print(localtime + '\t' + text)
def main():
# Gmail authentication
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
check = True
label_exists = False
# retrieve list of Gmail labels
labels = ListLabels(service, 'me')
for label in labels:
# check if specified label exists
if label['name'] == flags.label:
label_id = label['id']
label_exists = True
# get label_ID of 'new' label
elif label['name'] == 'new':
NEW_LABEL_ID = label['id']
if flags.directory is '.':
dir_path = os.path.join(os.getcwd(), flags.label)
else:
dir_path = os.path.join(flags.directory, flags.label)
# check if directory/logfile must be created
if label_exists is True or flags.download_all == 'false':
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record('Created directory ' + dir_path)
log_path = os.path.join(dir_path, 'log.txt')
if not os.path.exists(log_path):
open(log_path, 'w').close()
sbd_path = os.path.join(dir_path, 'sbd')
if not os.path.exists(sbd_path):
os.makedirs(sbd_path)
record('Created directory ' + sbd_path)
sbd_dl_path = os.path.join(sbd_path, 'new')
if not os.path.exists(sbd_dl_path):
os.makedirs(sbd_dl_path)
record('Created directory ' + sbd_dl_path)
txt_path = os.path.join(dir_path, 'txt')
if not os.path.exists(txt_path):
os.makedirs(txt_path)
record('Created directory ' + txt_path)
while check is True:
# download all packets with specified label
if flags.download_all is True:
if label_exists is True:
messages = ListMessagesMatchingQuery(service,'me', 'label:' + flags.label)
if not messages:
record('No messages found.')
else:
for message in messages:
GetData(service, 'me', message['id'], dir_path)
else:
localtime = time.asctime(time.localtime(time.time()))
print(localtime + '\tLabel \'' + flags.label + '\' does not exist.')
check = False
# download all new packets and relabel with specified label
else:
messages = ListMessagesMatchingQuery(service,'me', 'label:new')
if not messages:
record('No messages | random_line_split | |
trace_context.rs | size,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service
}
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str {
&self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
}
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn | peek_active_span_id | identifier_name | |
trace_context.rs | ()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service
}
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str {
&self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
}
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn peek_active_span_id(&self) -> Option<i32> {
self.active_span().map(|span| span.span_id)
}
fn push_active_span(&mut self, span: &SpanObject) -> SpanUid {
let uid = self.generate_span_uid();
self.primary_endpoint_name = span.operation_name.clone();
let mut stack = self.active_span_stack_mut();
stack.push(ActiveSpan::new(uid, span.span_id));
uid
}
fn upgrade_tracer(&self) -> Tracer | {
self.tracer.upgrade().expect("Tracer has dropped")
} | identifier_body | |
trace_context.rs | {
self.uid
}
}
pub(crate) struct FinalizeSpan {
uid: SpanUid,
/// When the span is [AsyncSpan] and unfinished, it is None.
obj: Option<SpanObject>,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl FinalizeSpan {
pub(crate) fn new(
uid: usize,
obj: Option<SpanObject>,
r#ref: Option<SegmentReference>,
) -> Self {
Self { uid, obj, r#ref }
}
}
#[derive(Default)]
pub(crate) struct SpanStack {
pub(crate) finalized: RwLock<Vec<FinalizeSpan>>,
pub(crate) active: RwLock<Vec<ActiveSpan>>,
}
impl SpanStack {
pub(crate) fn finalized(&self) -> RwLockReadGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_read().expect(LOCK_MSG)
}
pub(crate) fn finalized_mut(&self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.active.try_read().expect(LOCK_MSG)
}
pub(crate) fn active_mut(&self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.active.try_write().expect(LOCK_MSG)
}
fn pop_active(&self, uid: SpanUid) -> Option<ActiveSpan> {
let mut stack = self.active_mut();
if stack
.last()
.map(|span| span.uid() == uid)
.unwrap_or_default()
{
stack.pop()
} else {
None
}
}
/// Close span. We can't use closed span after finalize called.
pub(crate) fn finalize_span(&self, uid: SpanUid, obj: Option<SpanObject>) {
let Some(active_span) = self.pop_active(uid) else {
panic!("Finalize span isn't the active span");
};
let finalize_span = match obj {
Some(mut obj) => {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = active_span.r#ref {
obj.refs.push(r#ref);
}
FinalizeSpan::new(uid, Some(obj), None)
}
None => FinalizeSpan::new(uid, None, active_span.r#ref),
};
self.finalized_mut().push(finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service | &self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str | }
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str { | random_line_split |
trace_context.rs | (finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service
}
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str {
&self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() | {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
} | conditional_block | |
fslogical.go | // Just advance the consistent point.
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
if err := events.OnCommit(ctx); err != nil {
return err
}
case batchStart:
toMark = toMark[:0]
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
case batchDoc:
doc := t.doc
if ok, err := d.shouldProcess(ctx, doc.Ref, doc.UpdateTime); err != nil {
return err
} else if !ok {
continue
}
docUpdatedAt, err := d.docUpdatedAt(doc)
if err != nil {
return err
}
mut, err := d.marshalMutation(doc, docUpdatedAt)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.recurse {
if err := d.doRecurse(ctx, doc.Ref, events); err != nil {
return err
}
}
if d.idempotent {
toMark = append(toMark, mark{doc.Ref, doc.UpdateTime})
}
case batchDelete:
if ok, err := d.shouldProcess(ctx, t.ref, t.ts); err != nil {
return err
} else if !ok {
continue
}
mut, err := marshalDeletion(t.ref, t.ts)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.idempotent {
toMark = append(toMark, mark{t.ref, t.ts})
}
case batchEnd:
if err := events.OnCommit(ctx); err != nil {
return err
}
for _, mark := range toMark {
if err := d.markProcessed(ctx, mark.ref, mark.time); err != nil {
return err
}
}
default:
panic(fmt.Sprintf("unimplemented type %T", msg))
}
}
return nil
}
// ZeroStamp implements logical.Dialect.
func (d *Dialect) ZeroStamp() stamp.Stamp {
return &consistentPoint{}
}
// Compute the query-relative document start id. We need to do this so
// that sub-collections can be accessed in a consistent way.
//
// 2022-08-29: One way that does not work is to call Query.StartAfter()
// and then use Query.Serialize to hand the status over to the next
// backfill cycle.
func (d *Dialect) backfillPoint(doc *firestore.DocumentSnapshot) (*consistentPoint, error) {
topCollection := doc.Ref.Parent
for topCollection.Parent != nil {
// collection -> parent doc -> parent collection
topCollection = topCollection.Parent.Parent
}
relativePath := fmt.Sprintf("documents/%s/%s",
topCollection.ID, doc.Ref.Path[len(topCollection.Path)+1:])
updateTime, err := d.docUpdatedAt(doc)
if err != nil {
return nil, err
}
return &consistentPoint{
BackfillID: relativePath,
Time: updateTime,
}, nil
}
// docUpdatedAt extracts a timestamp from the document.
func (d *Dialect) docUpdatedAt(doc *firestore.DocumentSnapshot) (time.Time, error) {
val, err := doc.DataAt(d.updatedAtProperty.Raw())
if err != nil {
return time.Time{}, errors.WithStack(err)
}
if t, ok := val.(time.Time); ok {
return t, nil
}
return time.Time{}, errors.Errorf("document missing %q property", d.updatedAtProperty.Raw())
}
// marshalDeletion creates a mutation to represent the deletion of the
// specified document.
func marshalDeletion(id *firestore.DocumentRef, updatedAt time.Time) (types.Mutation, error) {
key, err := json.Marshal([]string{id.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
return types.Mutation{
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
}, nil
}
func (d *Dialect) marshalMutation(
doc *firestore.DocumentSnapshot, updatedAt time.Time,
) (types.Mutation, error) {
dataMap := doc.Data()
// Allow the doc id to be baked into the mutation.
if d.docIDProperty != "" {
dataMap[d.docIDProperty] = doc.Ref.ID
}
data, err := json.Marshal(dataMap)
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
key, err := json.Marshal([]string{doc.Ref.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
// Create empty slices so that we never pass a null value into JS.
parentCollections := make([]string, 0)
parentDocIds := make([]string, 0)
for parentCollection := doc.Ref.Parent; parentCollection != nil; {
parentCollections = append(parentCollections, parentCollection.ID)
if parentCollection.Parent != nil {
parentDocIds = append(parentDocIds, parentCollection.Parent.ID)
parentCollection = parentCollection.Parent.Parent
} else {
break
}
}
// The timestamps are converted to values that are easy to wrap
// a JS Date around in the user script.
// https://pkg.go.dev/github.com/dop251/goja#hdr-Handling_of_time_Time
meta := map[string]any{
"createTime": doc.CreateTime.UnixNano() / 1e6,
"id": doc.Ref.ID,
"parentCollections": parentCollections,
"parentDocIds": parentDocIds,
"path": doc.Ref.Path,
"readTime": doc.ReadTime.UnixNano() / 1e6,
"updateTime": doc.UpdateTime.UnixNano() / 1e6,
}
return types.Mutation{
Data: data,
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
Meta: meta,
}, nil
}
// doRecurse, if configured, will load dynamic sub-collections of
// the given document.
func (d *Dialect) doRecurse(
ctx context.Context, doc *firestore.DocumentRef, events logical.Events,
) error {
it := doc.Collections(ctx)
for {
coll, err := it.Next()
if err == iterator.Done {
return nil
}
if err != nil {
return errors.Wrapf(err, "loading dynamic collections of %s", doc.Path)
}
if _, skip := d.recurseFilter.Get(ident.New(coll.ID)); skip {
continue
}
fork := *d
fork.query = coll.Query
fork.sourcePath = coll.Path
if err := events.Backfill(ctx, coll.Path, &fork); err != nil {
return errors.WithMessage(err, coll.Path)
}
}
}
// markProcessed records an incoming document as having been processed.
func (d *Dialect) markProcessed(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) error {
payload := processedPayload{UpdatedAt: ts}
data, err := json.Marshal(&payload)
if err != nil {
return errors.WithStack(err)
}
return d.memo.Put(ctx, d.pool, processedKey(doc), data)
}
// shouldProcess implements idempotent processing of document snapshots.
// It ensures that the update-time of any given document always
// advances.
func (d *Dialect) shouldProcess(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) (bool, error) {
if !d.idempotent {
return true, nil
}
data, err := d.memo.Get(ctx, d.pool, processedKey(doc))
if err != nil {
return false, err
}
// No data means we're seeing the document for the first time.
if data == nil {
log.Tracef("accepting document %s at %s", doc.ID, ts)
return true, nil
}
var payload processedPayload
if err := json.Unmarshal(data, &payload); err != nil {
return false, errors.WithStack(err)
}
if ts.After(payload.UpdatedAt) {
log.Tracef("accepting document %s at %s > %s", doc.ID, ts, payload.UpdatedAt)
return true, nil
}
log.Tracef("ignoring document %s at %s <= %s", doc.ID, ts, payload.UpdatedAt)
return false, nil
}
// processedPayload is used by markProcessed and shouldProcess.
type processedPayload struct {
UpdatedAt time.Time `json:"u,omitempty"`
}
// processedKey returns the memo key used by markProcessed and
// shouldProcess.
func processedKey(ref *firestore.DocumentRef) string | {
return fmt.Sprintf("fs-doc-%s", ref.Path)
} | identifier_body | |
fslogical.go | .State,
) error {
prev, _ := state.GetConsistentPoint().(*consistentPoint)
to := time.Now()
for {
log.Tracef("backfilling %s from %s", d.sourcePath, prev)
err := d.backfillOneBatch(ctx, ch, to, prev, state)
if err != nil {
return errors.Wrap(err, d.sourcePath)
}
select {
case next := <-state.NotifyConsistentPoint(ctx, logical.AwaitGT, prev):
prev = next.(*consistentPoint)
continue
case <-state.Stopping():
return nil
case <-ctx.Done():
return ctx.Err()
}
}
}
// backfillOneBatch grabs a single batch of documents from the backend.
// It will return the next incremental consistentPoint and whether the
// backfill is expected to continue.
func (d *Dialect) backfillOneBatch(
ctx context.Context,
ch chan<- logical.Message,
now time.Time,
cp *consistentPoint,
state logical.State,
) error {
// We need to make the call to snaps.Next() interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Expected path when backfillOneBatch exits.
}
}()
// Iterate over the collection by (updated_at, __doc_id__) using
// a cursor-like approach so that we can checkpoint along the way.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
OrderBy(firestore.DocumentID, firestore.Asc).
Where(d.updatedAtProperty.Raw(), "<=", now).
Limit(d.backfillBatchSize)
if !cp.IsZero() {
if cp.AsID() == "" {
q = q.Where(d.updatedAtProperty.Raw(), ">=", cp.AsTime())
} else {
q = q.StartAfter(cp.AsTime(), cp.AsID())
}
}
snaps := q.Snapshots(ctx)
defer snaps.Stop()
snap, err := snaps.Next()
if err != nil {
// Mask cancellation errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
// We're going to call GetAll since we're running with a reasonable
// limit value. This allows us to peek at the id of the last
// document, so we can compute the eventual consistent point for
// this batch of docs.
docs, err := snap.Documents.GetAll()
if err != nil {
return errors.WithStack(err)
}
log.Tracef("received %d documents from %s", len(docs), d.sourcePath)
// Workaround / BUG? It appears that the StartAfter call above
// sometimes returns the last document from the previous backfill
// loop. This loop ensures that the effective consistent point
// always goes forward in time.
for len(docs) > 0 {
firstCP, err := d.backfillPoint(docs[0])
if err != nil {
return err
}
if stamp.Compare(firstCP, cp) > 0 {
break
}
log.Tracef("filtering")
docs = docs[1:]
}
// Helper for interruptible send idiom.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// If we have read through the end of all documents in the
// collection, we want the consistent-point to jump forward in time
// to the server read-time.
if len(docs) == 0 {
cp = streamPoint(snap.ReadTime)
return send(backfillEnd{cp})
}
// Move the proposed consistent point to the last document.
lastDoc := docs[len(docs)-1]
if cp, err = d.backfillPoint(lastDoc); err != nil {
return err
}
// Send a batch of messages downstream. We use a non-blocking idiom
if err := send(batchStart{cp}); err != nil {
return err
}
for _, doc := range docs {
if err := send(batchDoc{doc}); err != nil {
return err
}
}
return send(batchEnd{})
}
// ReadInto implements logical.Dialect and subscribes to streaming
// updates from the source.
func (d *Dialect) ReadInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
// The call to snaps.Next() below needs to be made interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Normal exit path when ReadInto exits.
}
}()
cp, _ := state.GetConsistentPoint().(*consistentPoint)
// Stream from the last updated time.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
StartAt(cp.AsTime().Truncate(time.Second))
snaps := q.Snapshots(ctx)
defer snaps.Stop()
// Helper for interruptible send.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
for {
log.Tracef("getting snapshot for %s", d.sourcePath)
snap, err := snaps.Next()
if err != nil |
log.Tracef("collection %s: %d events", d.sourcePath, len(snap.Changes))
if err := send(batchStart{streamPoint(snap.ReadTime)}); err != nil {
return err
}
for _, change := range snap.Changes {
switch change.Kind {
case firestore.DocumentAdded,
firestore.DocumentModified:
// Ignore documents that we already know have been deleted.
if d.tombstones.IsDeleted(change.Doc.Ref) {
continue
}
if err := send(batchDoc{change.Doc}); err != nil {
return err
}
case firestore.DocumentRemoved:
d.tombstones.NotifyDeleted(change.Doc.Ref)
if err := send(batchDelete{change.Doc.Ref, change.Doc.ReadTime}); err != nil {
return err
}
}
}
if err := send(batchEnd{}); err != nil {
return err
}
}
}
// Process implements logical.Dialect.
func (d *Dialect) Process(
ctx context.Context, ch <-chan logical.Message, events logical.Events,
) error {
// Only write idempotency mark when we've committed a db transaction.
type mark struct {
ref *firestore.DocumentRef
time time.Time
}
var toMark []mark
for msg := range ch {
if logical.IsRollback(msg) {
if err := events.OnRollback(ctx, msg); err != nil {
return err
}
continue
}
switch t := msg.(type) {
case backfillEnd:
// Just advance the consistent point.
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
if err := events.OnCommit(ctx); err != nil {
return err
}
case batchStart:
toMark = toMark[:0]
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
case batchDoc:
doc := t.doc
if ok, err := d.shouldProcess(ctx, doc.Ref, doc.UpdateTime); err != nil {
return err
} else if !ok {
continue
}
docUpdatedAt, err := d.docUpdatedAt(doc)
if err != nil {
return err
}
mut, err := d.marshalMutation(doc, docUpdatedAt)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.recurse {
if err := d.doRecurse(ctx, doc.Ref, events); err != nil {
return err
}
}
if d.idempotent {
toMark = append(toMark, mark{doc.Ref, doc.UpdateTime})
}
case batchDelete:
if ok, err := d.shouldProcess(ctx, t.ref, t.ts); err != nil {
return err
} else if !ok {
continue
}
mut, err := marshalDeletion(t.ref, t.ts)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types | {
// Mask cancellations errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
} | conditional_block |
fslogical.go | fillOneBatch grabs a single batch of documents from the backend.
// It will return the next incremental consistentPoint and whether the
// backfill is expected to continue.
func (d *Dialect) backfillOneBatch(
ctx context.Context,
ch chan<- logical.Message,
now time.Time,
cp *consistentPoint,
state logical.State,
) error {
// We need to make the call to snaps.Next() interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Expected path when backfillOneBatch exits.
}
}()
// Iterate over the collection by (updated_at, __doc_id__) using
// a cursor-like approach so that we can checkpoint along the way.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
OrderBy(firestore.DocumentID, firestore.Asc).
Where(d.updatedAtProperty.Raw(), "<=", now).
Limit(d.backfillBatchSize)
if !cp.IsZero() {
if cp.AsID() == "" {
q = q.Where(d.updatedAtProperty.Raw(), ">=", cp.AsTime())
} else {
q = q.StartAfter(cp.AsTime(), cp.AsID())
}
}
snaps := q.Snapshots(ctx)
defer snaps.Stop()
snap, err := snaps.Next()
if err != nil {
// Mask cancellation errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
// We're going to call GetAll since we're running with a reasonable
// limit value. This allows us to peek at the id of the last
// document, so we can compute the eventual consistent point for
// this batch of docs.
docs, err := snap.Documents.GetAll()
if err != nil {
return errors.WithStack(err)
}
log.Tracef("received %d documents from %s", len(docs), d.sourcePath)
// Workaround / BUG? It appears that the StartAfter call above
// sometimes returns the last document from the previous backfill
// loop. This loop ensures that the effective consistent point
// always goes forward in time.
for len(docs) > 0 {
firstCP, err := d.backfillPoint(docs[0])
if err != nil {
return err
}
if stamp.Compare(firstCP, cp) > 0 {
break
}
log.Tracef("filtering")
docs = docs[1:]
}
// Helper for interruptible send idiom.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// If we have read through the end of all documents in the
// collection, we want the consistent-point to jump forward in time
// to the server read-time.
if len(docs) == 0 {
cp = streamPoint(snap.ReadTime)
return send(backfillEnd{cp})
}
// Move the proposed consistent point to the last document.
lastDoc := docs[len(docs)-1]
if cp, err = d.backfillPoint(lastDoc); err != nil {
return err
}
// Send a batch of messages downstream. We use a non-blocking idiom
if err := send(batchStart{cp}); err != nil {
return err
}
for _, doc := range docs {
if err := send(batchDoc{doc}); err != nil {
return err
}
}
return send(batchEnd{})
}
// ReadInto implements logical.Dialect and subscribes to streaming
// updates from the source.
func (d *Dialect) ReadInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
// The call to snaps.Next() below needs to be made interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Normal exit path when ReadInto exits.
}
}()
cp, _ := state.GetConsistentPoint().(*consistentPoint)
// Stream from the last updated time.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
StartAt(cp.AsTime().Truncate(time.Second))
snaps := q.Snapshots(ctx)
defer snaps.Stop()
// Helper for interruptible send.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
for {
log.Tracef("getting snapshot for %s", d.sourcePath)
snap, err := snaps.Next()
if err != nil {
// Mask cancellations errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
log.Tracef("collection %s: %d events", d.sourcePath, len(snap.Changes))
if err := send(batchStart{streamPoint(snap.ReadTime)}); err != nil {
return err
}
for _, change := range snap.Changes {
switch change.Kind {
case firestore.DocumentAdded,
firestore.DocumentModified:
// Ignore documents that we already know have been deleted.
if d.tombstones.IsDeleted(change.Doc.Ref) {
continue
}
if err := send(batchDoc{change.Doc}); err != nil {
return err
}
case firestore.DocumentRemoved:
d.tombstones.NotifyDeleted(change.Doc.Ref)
if err := send(batchDelete{change.Doc.Ref, change.Doc.ReadTime}); err != nil {
return err
}
}
}
if err := send(batchEnd{}); err != nil {
return err
}
}
}
// Process implements logical.Dialect.
func (d *Dialect) Process(
ctx context.Context, ch <-chan logical.Message, events logical.Events,
) error {
// Only write idempotency mark when we've committed a db transaction.
type mark struct {
ref *firestore.DocumentRef
time time.Time
}
var toMark []mark
for msg := range ch {
if logical.IsRollback(msg) {
if err := events.OnRollback(ctx, msg); err != nil {
return err
}
continue
}
switch t := msg.(type) {
case backfillEnd:
// Just advance the consistent point.
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
if err := events.OnCommit(ctx); err != nil {
return err
}
case batchStart:
toMark = toMark[:0]
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
case batchDoc:
doc := t.doc
if ok, err := d.shouldProcess(ctx, doc.Ref, doc.UpdateTime); err != nil {
return err
} else if !ok {
continue
}
docUpdatedAt, err := d.docUpdatedAt(doc)
if err != nil {
return err
}
mut, err := d.marshalMutation(doc, docUpdatedAt)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.recurse {
if err := d.doRecurse(ctx, doc.Ref, events); err != nil {
return err
}
}
if d.idempotent {
toMark = append(toMark, mark{doc.Ref, doc.UpdateTime})
}
case batchDelete:
if ok, err := d.shouldProcess(ctx, t.ref, t.ts); err != nil {
return err
} else if !ok {
continue
}
mut, err := marshalDeletion(t.ref, t.ts)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.idempotent {
toMark = append(toMark, mark{t.ref, t.ts})
}
case batchEnd:
if err := events.OnCommit(ctx); err != nil {
return err
}
for _, mark := range toMark {
if err := d.markProcessed(ctx, mark.ref, mark.time); err != nil {
return err
}
}
default:
panic(fmt.Sprintf("unimplemented type %T", msg))
}
}
return nil
}
// ZeroStamp implements logical.Dialect.
func (d *Dialect) | ZeroStamp | identifier_name | |
fslogical.go | We're going to call GetAll since we're running with a reasonable
// limit value. This allows us to peek at the id of the last
// document, so we can compute the eventual consistent point for
// this batch of docs.
docs, err := snap.Documents.GetAll()
if err != nil {
return errors.WithStack(err)
}
log.Tracef("received %d documents from %s", len(docs), d.sourcePath)
// Workaround / BUG? It appears that the StartAfter call above
// sometimes returns the last document from the previous backfill
// loop. This loop ensures that the effective consistent point
// always goes forward in time.
for len(docs) > 0 {
firstCP, err := d.backfillPoint(docs[0])
if err != nil {
return err
}
if stamp.Compare(firstCP, cp) > 0 {
break
}
log.Tracef("filtering")
docs = docs[1:]
}
// Helper for interruptible send idiom.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// If we have read through the end of all documents in the
// collection, we want the consistent-point to jump forward in time
// to the server read-time.
if len(docs) == 0 {
cp = streamPoint(snap.ReadTime)
return send(backfillEnd{cp})
}
// Move the proposed consistent point to the last document.
lastDoc := docs[len(docs)-1]
if cp, err = d.backfillPoint(lastDoc); err != nil {
return err
}
// Send a batch of messages downstream. We use a non-blocking idiom
if err := send(batchStart{cp}); err != nil {
return err
}
for _, doc := range docs {
if err := send(batchDoc{doc}); err != nil {
return err
}
}
return send(batchEnd{})
}
// ReadInto implements logical.Dialect and subscribes to streaming
// updates from the source.
func (d *Dialect) ReadInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
// The call to snaps.Next() below needs to be made interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Normal exit path when ReadInto exits.
}
}()
cp, _ := state.GetConsistentPoint().(*consistentPoint)
// Stream from the last updated time.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
StartAt(cp.AsTime().Truncate(time.Second))
snaps := q.Snapshots(ctx)
defer snaps.Stop()
// Helper for interruptible send.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
for {
log.Tracef("getting snapshot for %s", d.sourcePath)
snap, err := snaps.Next()
if err != nil {
// Mask cancellations errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
log.Tracef("collection %s: %d events", d.sourcePath, len(snap.Changes))
if err := send(batchStart{streamPoint(snap.ReadTime)}); err != nil {
return err
}
for _, change := range snap.Changes {
switch change.Kind {
case firestore.DocumentAdded,
firestore.DocumentModified:
// Ignore documents that we already know have been deleted.
if d.tombstones.IsDeleted(change.Doc.Ref) {
continue
}
if err := send(batchDoc{change.Doc}); err != nil {
return err
}
case firestore.DocumentRemoved:
d.tombstones.NotifyDeleted(change.Doc.Ref)
if err := send(batchDelete{change.Doc.Ref, change.Doc.ReadTime}); err != nil {
return err
}
}
}
if err := send(batchEnd{}); err != nil {
return err
}
}
}
// Process implements logical.Dialect.
func (d *Dialect) Process(
ctx context.Context, ch <-chan logical.Message, events logical.Events,
) error {
// Only write idempotency mark when we've committed a db transaction.
type mark struct {
ref *firestore.DocumentRef
time time.Time
}
var toMark []mark
for msg := range ch {
if logical.IsRollback(msg) {
if err := events.OnRollback(ctx, msg); err != nil {
return err
}
continue
}
switch t := msg.(type) {
case backfillEnd:
// Just advance the consistent point.
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
if err := events.OnCommit(ctx); err != nil {
return err
}
case batchStart:
toMark = toMark[:0]
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
case batchDoc:
doc := t.doc
if ok, err := d.shouldProcess(ctx, doc.Ref, doc.UpdateTime); err != nil {
return err
} else if !ok {
continue
}
docUpdatedAt, err := d.docUpdatedAt(doc)
if err != nil {
return err
}
mut, err := d.marshalMutation(doc, docUpdatedAt)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.recurse {
if err := d.doRecurse(ctx, doc.Ref, events); err != nil {
return err
}
}
if d.idempotent {
toMark = append(toMark, mark{doc.Ref, doc.UpdateTime})
}
case batchDelete:
if ok, err := d.shouldProcess(ctx, t.ref, t.ts); err != nil {
return err
} else if !ok {
continue
}
mut, err := marshalDeletion(t.ref, t.ts)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.idempotent {
toMark = append(toMark, mark{t.ref, t.ts})
}
case batchEnd:
if err := events.OnCommit(ctx); err != nil {
return err
}
for _, mark := range toMark {
if err := d.markProcessed(ctx, mark.ref, mark.time); err != nil {
return err
}
}
default:
panic(fmt.Sprintf("unimplemented type %T", msg))
}
}
return nil
}
// ZeroStamp implements logical.Dialect.
func (d *Dialect) ZeroStamp() stamp.Stamp {
return &consistentPoint{}
}
// Compute the query-relative document start id. We need to do this so
// that sub-collections can be accessed in a consistent way.
//
// 2022-08-29: One way that does not work is to call Query.StartAfter()
// and then use Query.Serialize to hand the status over to the next
// backfill cycle.
func (d *Dialect) backfillPoint(doc *firestore.DocumentSnapshot) (*consistentPoint, error) {
topCollection := doc.Ref.Parent
for topCollection.Parent != nil {
// collection -> parent doc -> parent collection
topCollection = topCollection.Parent.Parent
}
relativePath := fmt.Sprintf("documents/%s/%s",
topCollection.ID, doc.Ref.Path[len(topCollection.Path)+1:])
updateTime, err := d.docUpdatedAt(doc)
if err != nil {
return nil, err
}
return &consistentPoint{
BackfillID: relativePath,
Time: updateTime,
}, nil
}
// docUpdatedAt extracts a timestamp from the document.
func (d *Dialect) docUpdatedAt(doc *firestore.DocumentSnapshot) (time.Time, error) {
val, err := doc.DataAt(d.updatedAtProperty.Raw())
if err != nil {
return time.Time{}, errors.WithStack(err)
}
if t, ok := val.(time.Time); ok {
return t, nil
}
return time.Time{}, errors.Errorf("document missing %q property", d.updatedAtProperty.Raw())
}
// marshalDeletion creates a mutation to represent the deletion of the | // specified document.
func marshalDeletion(id *firestore.DocumentRef, updatedAt time.Time) (types.Mutation, error) { | random_line_split | |
test_query_performance.py | """)
index_para_template = Template("""
<parameters>
<index>$index_path</index>
<memory>$memory</memory>
$corpora
<stemmer><name>$stemmer</name></stemmer>
$fields
$stopper
</parameters>""")
corpus_template = Template("""
<corpus>
\t<path>$path</path>
\t<class>trectext</class>
</corpus>
""")
text_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<TEXT>$text</TEXT>$fields
</DOC>""")
class Query(object):
"""Base query class
"""
def __init__(self,qid,query_text):
self._qid = qid
self._text = query_text
self._text_struct = Text(query_text)
@property
def | (self):
return self._text_struct.raw_model()
@property
def text(self):
return "%s" %self._text
class ExpandedQuery(Query):
"""Queries with expansion
"""
def __init__(self,qid,query_text,para_lambda):
self._para_lambda = para_lambda
super(ExpandedQuery,self).__init__(qid,query_text)
self._expanding_model = None
def expand(self,expanding_term_weights):
self._expanding_model = Model(False,text_dict=expanding_term_weights)
@property
def expanding_model(self):
if not self._expanding_model:
raise RuntimeError("Not expanded yet!")
return self._expanding_model.model
@property
def para_lambda(self):
return self._para_lambda
class IndriQueryFactory(object):
"""Take in query related parameters for indri and
generate indri query file
"""
def __init__(self,count,rule=None,
use_stopper=False,date_when=None,
numeric_compare=None, psr=False):
self._count,self._rule,self._use_stopper,self._psr = count,rule,use_stopper,psr
if date_when:
if date_when not in ["dateafter","datebefore", "datebetween","dateequals"]:
raise ValueError("When value %s is not supported" %(date_when))
if numeric_compare is not None:
if numeric_compare not in ["less","greater","between","equals"]:
raise ValueError("Compare value %s is not supported" %(numeric_compare))
self._date_when,self._numeric_compare = date_when,numeric_compare
def _gene_query(self,file_path,queries,index,run_id,
date_value=None,numeric_value=None,
numeric_field_name=None,fbDocs=None,
fbTerms=None,fbOrigWeight=None):
query_body = ""
if self._rule is None:
rule = ""
else:
rule = "<rule>%s</rule>" %self._rule
if self._use_stopper:
stopper = "<stopper>\n"
stopwords = get_stopwords()
for stopword in stopwords:
stopper += "<word>%s</word>\n" %stopword
stopper += "</stopper>"
else:
stopper = ""
for qid in queries:
sinlge_query_data = queries[qid]
if isinstance(sinlge_query_data,Query):
original_text = re.sub("[^\w]"," ",sinlge_query_data.text)
if isinstance(sinlge_query_data,ExpandedQuery):
original_weight = sinlge_query_data.para_lambda
expanding_weight = 1-sinlge_query_data.para_lambda
expanding_string = ""
for term in sinlge_query_data.expanding_model:
term_weight = sinlge_query_data.expanding_model[term]
expanding_string += "%f %s " %(term_weight,term)
if len(expanding_string) == 0:
q_string = "#combine( %s )" %(original_text)
else:
q_string = "#weight( %f #combine( %s) %f #weight( %s ) )" \
%(original_weight,original_text,
expanding_weight,expanding_string)
else:
q_string = "#combine( %s )" %(original_text)
elif isinstance(sinlge_query_data,str) or isinstance(sinlge_query_data,unicode):
q_string = sinlge_query_data.lower()
q_string = re.sub("[^\w]"," ",q_string)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,list):
q_string = " ".join(sinlge_query_data)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,dict):
q_string = ""
for term in sinlge_query_data:
weight = sinlge_query_data[term]
q_string += "%f %s " %(weight,term)
q_string = "#weight( %s )" %(q_string)
else:
raise TypeError("unsupported value type %s for query data" %type(sinlge_query_data))
if self._date_when:
q_string = "#filreq( #%s( %s ) %s)" %(self._date_when,date_value,
q_string)
if self._numeric_compare is not None:
q_string = "#filreq( #%s( %s %d ) %s)" %(self._numeric_compare,
numeric_field_name,numeric_value,q_string)
psr = ""
if self._psr :
if not (fbDocs and fbTerms and fbOrigWeight):
raise ValueError("need valid fbDocs and fbTerms and fbOrigWeight!")
psr += "<fbDocs>%d</fbDocs>" %(fbDocs)
psr += "<fbTerms>%d</fbTerms>" %(fbTerms)
psr += "<fbOrigWeight>%f</fbOrigWeight>" %(fbOrigWeight)
query_body+=query_template.substitute(
qid=qid,q_string=q_string)
with codecs.open(file_path, 'w','utf-8') as f:
f.write(structure_template.substitute(query_body=query_body,index=index,
run_id=run_id,count=str(self._count),
rule=rule,stopper=stopper,psr=psr))
def gene_query_with_date_filter(self,file_path,queries,index,
date_value,run_id="test",fbDocs=None,
fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id=run_id,date_value=date_value,
fbDocs=fbDocs,fbTerms=fbTerms,fbOrigWeight=fbOrigWeight)
def gene_query_with_numeric_filter(self,file_path,queries,index,
numeric_value,numeric_field_name,run_id="test",
fbDocs=None,fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id,numeric_value=numeric_value,
numeric_field_name=numeric_field_name,fbDocs=fbDocs,fbTerms=fbTerms,
fbOrigWeight=fbOrigWeight)
def gene_normal_query(self,file_path,queries,index,run_id="test"):
self._gene_query(file_path,queries,index,run_id=run_id)
#
#-------------------before are utility code----------------------------
#-------------------below are the code that SHOULD be modified---------
#
def read_qrels(eval_dir):
qrel_file = os.path.join(eval_dir,"qrels.txt")
qrels = {}
with open(qrel_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
jud = max(0,int(parts[3]) )
if qid not in qrels:
qrels[qid] = {}
qrels[qid][docid] = jud
return qrels
def read_query_file(query_file,qrels):
queries = {}
data = json.load(open(query_file))
for single_query in data:
qid = single_query["topid"]
if qid not in qrels:
continue
# text = re.sub("[^\w ]+"," ",single_query["title"])
# queries[qid] = text
queries[qid] = single_query["title"]
return queries
def build_temp_query(queries,temp_query_para_file,index_dir):
retrieval_method = "method:f2exp,s:0.1"
temp_query_builder = IndriQueryFactory(count=100,
rule=retrieval_method)
temp_query_builder.gene_normal_query(temp_query_para_file,
queries,index_dir)
def run_query(temp_query_para_file,temp_result_file):
os.system("IndriRunQuery %s > %s" %(temp_query_para_file,temp_result_file))
def evaluate_temp_result(temp_result_file,qrels):
performance = {}
with open(temp_result_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
if qid not in qrels:
# print "query %s does not have judgement" %(qid)
continue
else:
if qid not in performance:
performance[qid] = .0
if docid in qrels[qid]:
performance[qid] += qrels[qid][docid]*1.0/100
final_performance = sum(performance.values())*1.0/len(qrels)
print | original_model | identifier_name |
test_query_performance.py | """)
index_para_template = Template("""
<parameters>
<index>$index_path</index>
<memory>$memory</memory>
$corpora
<stemmer><name>$stemmer</name></stemmer>
$fields
$stopper
</parameters>""")
corpus_template = Template("""
<corpus>
\t<path>$path</path>
\t<class>trectext</class>
</corpus>
""")
text_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<TEXT>$text</TEXT>$fields
</DOC>""")
class Query(object):
"""Base query class
"""
def __init__(self,qid,query_text):
self._qid = qid
self._text = query_text
self._text_struct = Text(query_text)
@property
def original_model(self):
return self._text_struct.raw_model()
@property
def text(self):
return "%s" %self._text
class ExpandedQuery(Query):
"""Queries with expansion
"""
def __init__(self,qid,query_text,para_lambda):
self._para_lambda = para_lambda
super(ExpandedQuery,self).__init__(qid,query_text)
self._expanding_model = None
def expand(self,expanding_term_weights):
self._expanding_model = Model(False,text_dict=expanding_term_weights)
@property
def expanding_model(self):
if not self._expanding_model:
raise RuntimeError("Not expanded yet!")
return self._expanding_model.model
@property
def para_lambda(self):
return self._para_lambda
class IndriQueryFactory(object):
"""Take in query related parameters for indri and
generate indri query file
"""
def __init__(self,count,rule=None,
use_stopper=False,date_when=None,
numeric_compare=None, psr=False):
self._count,self._rule,self._use_stopper,self._psr = count,rule,use_stopper,psr
if date_when:
if date_when not in ["dateafter","datebefore", "datebetween","dateequals"]:
raise ValueError("When value %s is not supported" %(date_when))
if numeric_compare is not None:
if numeric_compare not in ["less","greater","between","equals"]:
raise ValueError("Compare value %s is not supported" %(numeric_compare))
self._date_when,self._numeric_compare = date_when,numeric_compare
def _gene_query(self,file_path,queries,index,run_id,
date_value=None,numeric_value=None,
numeric_field_name=None,fbDocs=None,
fbTerms=None,fbOrigWeight=None):
query_body = ""
if self._rule is None:
rule = ""
else:
rule = "<rule>%s</rule>" %self._rule
if self._use_stopper:
stopper = "<stopper>\n"
stopwords = get_stopwords()
for stopword in stopwords:
stopper += "<word>%s</word>\n" %stopword
stopper += "</stopper>"
else:
stopper = ""
for qid in queries:
sinlge_query_data = queries[qid]
if isinstance(sinlge_query_data,Query):
original_text = re.sub("[^\w]"," ",sinlge_query_data.text)
if isinstance(sinlge_query_data,ExpandedQuery):
original_weight = sinlge_query_data.para_lambda
expanding_weight = 1-sinlge_query_data.para_lambda
expanding_string = ""
for term in sinlge_query_data.expanding_model:
term_weight = sinlge_query_data.expanding_model[term]
expanding_string += "%f %s " %(term_weight,term)
if len(expanding_string) == 0:
q_string = "#combine( %s )" %(original_text)
else:
q_string = "#weight( %f #combine( %s) %f #weight( %s ) )" \
%(original_weight,original_text,
expanding_weight,expanding_string)
else:
q_string = "#combine( %s )" %(original_text)
elif isinstance(sinlge_query_data,str) or isinstance(sinlge_query_data,unicode):
q_string = sinlge_query_data.lower()
q_string = re.sub("[^\w]"," ",q_string)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,list):
q_string = " ".join(sinlge_query_data)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,dict):
q_string = ""
for term in sinlge_query_data:
weight = sinlge_query_data[term]
q_string += "%f %s " %(weight,term)
q_string = "#weight( %s )" %(q_string)
else:
raise TypeError("unsupported value type %s for query data" %type(sinlge_query_data))
if self._date_when:
q_string = "#filreq( #%s( %s ) %s)" %(self._date_when,date_value,
q_string)
if self._numeric_compare is not None:
q_string = "#filreq( #%s( %s %d ) %s)" %(self._numeric_compare,
numeric_field_name,numeric_value,q_string)
psr = ""
if self._psr :
if not (fbDocs and fbTerms and fbOrigWeight):
raise ValueError("need valid fbDocs and fbTerms and fbOrigWeight!")
psr += "<fbDocs>%d</fbDocs>" %(fbDocs)
psr += "<fbTerms>%d</fbTerms>" %(fbTerms)
psr += "<fbOrigWeight>%f</fbOrigWeight>" %(fbOrigWeight)
query_body+=query_template.substitute(
qid=qid,q_string=q_string)
with codecs.open(file_path, 'w','utf-8') as f:
f.write(structure_template.substitute(query_body=query_body,index=index,
run_id=run_id,count=str(self._count),
rule=rule,stopper=stopper,psr=psr))
def gene_query_with_date_filter(self,file_path,queries,index,
date_value,run_id="test",fbDocs=None,
fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id=run_id,date_value=date_value,
fbDocs=fbDocs,fbTerms=fbTerms,fbOrigWeight=fbOrigWeight)
def gene_query_with_numeric_filter(self,file_path,queries,index,
numeric_value,numeric_field_name,run_id="test",
fbDocs=None,fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id,numeric_value=numeric_value,
numeric_field_name=numeric_field_name,fbDocs=fbDocs,fbTerms=fbTerms,
fbOrigWeight=fbOrigWeight)
def gene_normal_query(self,file_path,queries,index,run_id="test"):
self._gene_query(file_path,queries,index,run_id=run_id)
#
#-------------------before are utility code----------------------------
#-------------------below are the code that SHOULD be modified---------
#
def read_qrels(eval_dir):
|
def read_query_file(query_file,qrels):
queries = {}
data = json.load(open(query_file))
for single_query in data:
qid = single_query["topid"]
if qid not in qrels:
continue
# text = re.sub("[^\w ]+"," ",single_query["title"])
# queries[qid] = text
queries[qid] = single_query["title"]
return queries
def build_temp_query(queries,temp_query_para_file,index_dir):
retrieval_method = "method:f2exp,s:0.1"
temp_query_builder = IndriQueryFactory(count=100,
rule=retrieval_method)
temp_query_builder.gene_normal_query(temp_query_para_file,
queries,index_dir)
def run_query(temp_query_para_file,temp_result_file):
os.system("IndriRunQuery %s > %s" %(temp_query_para_file,temp_result_file))
def evaluate_temp_result(temp_result_file,qrels):
performance = {}
with open(temp_result_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
if qid not in qrels:
# print "query %s does not have judgement" %(qid)
continue
else:
if qid not in performance:
performance[qid] = .0
if docid in qrels[qid]:
performance[qid] += qrels[qid][docid]*1.0/100
final_performance = sum(performance.values())*1.0/len(qrels)
print " | qrel_file = os.path.join(eval_dir,"qrels.txt")
qrels = {}
with open(qrel_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
jud = max(0,int(parts[3]) )
if qid not in qrels:
qrels[qid] = {}
qrels[qid][docid] = jud
return qrels | identifier_body |
test_query_performance.py | query_template = Template("""
<query>
\t<number>$qid</number>
\t<text>$q_string</text>
</query>
""")
structure_template = Template("""
<parameters>
<index>$index</index>
<trecFormat>true</trecFormat>
<runID>$run_id</runID>
<count>$count</count>
$query_body
$rule
$stopper
$psr
</parameters>""")
index_para_template = Template("""
<parameters>
<index>$index_path</index>
<memory>$memory</memory>
$corpora
<stemmer><name>$stemmer</name></stemmer>
$fields
$stopper
</parameters>""")
corpus_template = Template("""
<corpus>
\t<path>$path</path>
\t<class>trectext</class>
</corpus>
""")
text_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<TEXT>$text</TEXT>$fields
</DOC>""")
class Query(object):
"""Base query class
"""
def __init__(self,qid,query_text):
self._qid = qid
self._text = query_text
self._text_struct = Text(query_text)
@property
def original_model(self):
return self._text_struct.raw_model()
@property
def text(self):
return "%s" %self._text
class ExpandedQuery(Query):
"""Queries with expansion
"""
def __init__(self,qid,query_text,para_lambda):
self._para_lambda = para_lambda
super(ExpandedQuery,self).__init__(qid,query_text)
self._expanding_model = None
def expand(self,expanding_term_weights):
self._expanding_model = Model(False,text_dict=expanding_term_weights)
@property
def expanding_model(self):
if not self._expanding_model:
raise RuntimeError("Not expanded yet!")
return self._expanding_model.model
@property
def para_lambda(self):
return self._para_lambda
class IndriQueryFactory(object):
"""Take in query related parameters for indri and
generate indri query file
"""
def __init__(self,count,rule=None,
use_stopper=False,date_when=None,
numeric_compare=None, psr=False):
self._count,self._rule,self._use_stopper,self._psr = count,rule,use_stopper,psr
if date_when:
if date_when not in ["dateafter","datebefore", "datebetween","dateequals"]:
raise ValueError("When value %s is not supported" %(date_when))
if numeric_compare is not None:
if numeric_compare not in ["less","greater","between","equals"]:
raise ValueError("Compare value %s is not supported" %(numeric_compare))
self._date_when,self._numeric_compare = date_when,numeric_compare
def _gene_query(self,file_path,queries,index,run_id,
date_value=None,numeric_value=None,
numeric_field_name=None,fbDocs=None,
fbTerms=None,fbOrigWeight=None):
query_body = ""
if self._rule is None:
rule = ""
else:
rule = "<rule>%s</rule>" %self._rule
if self._use_stopper:
stopper = "<stopper>\n"
stopwords = get_stopwords()
for stopword in stopwords:
stopper += "<word>%s</word>\n" %stopword
stopper += "</stopper>"
else:
stopper = ""
for qid in queries:
sinlge_query_data = queries[qid]
if isinstance(sinlge_query_data,Query):
original_text = re.sub("[^\w]"," ",sinlge_query_data.text)
if isinstance(sinlge_query_data,ExpandedQuery):
original_weight = sinlge_query_data.para_lambda
expanding_weight = 1-sinlge_query_data.para_lambda
expanding_string = ""
for term in sinlge_query_data.expanding_model:
term_weight = sinlge_query_data.expanding_model[term]
expanding_string += "%f %s " %(term_weight,term)
if len(expanding_string) == 0:
q_string = "#combine( %s )" %(original_text)
else:
q_string = "#weight( %f #combine( %s) %f #weight( %s ) )" \
%(original_weight,original_text,
expanding_weight,expanding_string)
else:
q_string = "#combine( %s )" %(original_text)
elif isinstance(sinlge_query_data,str) or isinstance(sinlge_query_data,unicode):
q_string = sinlge_query_data.lower()
q_string = re.sub("[^\w]"," ",q_string)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,list):
q_string = " ".join(sinlge_query_data)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,dict):
q_string = ""
for term in sinlge_query_data:
weight = sinlge_query_data[term]
q_string += "%f %s " %(weight,term)
q_string = "#weight( %s )" %(q_string)
else:
raise TypeError("unsupported value type %s for query data" %type(sinlge_query_data))
if self._date_when:
q_string = "#filreq( #%s( %s ) %s)" %(self._date_when,date_value,
q_string)
if self._numeric_compare is not None:
q_string = "#filreq( #%s( %s %d ) %s)" %(self._numeric_compare,
numeric_field_name,numeric_value,q_string)
psr = ""
if self._psr :
if not (fbDocs and fbTerms and fbOrigWeight):
raise ValueError("need valid fbDocs and fbTerms and fbOrigWeight!")
psr += "<fbDocs>%d</fbDocs>" %(fbDocs)
psr += "<fbTerms>%d</fbTerms>" %(fbTerms)
psr += "<fbOrigWeight>%f</fbOrigWeight>" %(fbOrigWeight)
query_body+=query_template.substitute(
qid=qid,q_string=q_string)
with codecs.open(file_path, 'w','utf-8') as f:
f.write(structure_template.substitute(query_body=query_body,index=index,
run_id=run_id,count=str(self._count),
rule=rule,stopper=stopper,psr=psr))
def gene_query_with_date_filter(self,file_path,queries,index,
date_value,run_id="test",fbDocs=None,
fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id=run_id,date_value=date_value,
fbDocs=fbDocs,fbTerms=fbTerms,fbOrigWeight=fbOrigWeight)
def gene_query_with_numeric_filter(self,file_path,queries,index,
numeric_value,numeric_field_name,run_id="test",
fbDocs=None,fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id,numeric_value=numeric_value,
numeric_field_name=numeric_field_name,fbDocs=fbDocs,fbTerms=fbTerms,
fbOrigWeight=fbOrigWeight)
def gene_normal_query(self,file_path,queries,index,run_id="test"):
self._gene_query(file_path,queries,index,run_id=run_id)
#
#-------------------before are utility code----------------------------
#-------------------below are the code that SHOULD be modified---------
#
def read_qrels(eval_dir):
qrel_file = os.path.join(eval_dir,"qrels.txt")
qrels = {}
with open(qrel_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
jud = max(0,int(parts[3]) )
if qid not in qrels:
qrels[qid] = {}
qrels[qid][docid] = jud
return qrels
def read_query_file(query_file,qrels):
queries = {}
data = json.load(open(query_file))
for single_query in data:
qid = single_query["topid"]
if qid not in qrels:
continue
# text = re.sub("[^\w ]+"," ",single_query["title"])
# queries[qid] = text
queries[qid] = single_query["title"]
return queries
def build_temp_query(queries,temp_query_para_file,index_dir):
retrieval_method = "method:f2exp,s:0.1"
temp_query_builder = IndriQueryFactory(count=100,
rule=retrieval_method)
temp_query_builder.gene_normal_query(temp_query_para_file,
queries,index_dir)
def run_query(temp_query_para_file,temp_result_file):
os.system("IndriRunQuery %s > %s" %(temp_query_para_file,temp_result_file))
def evaluate_temp_result(temp_result_file,qrels):
performance = {}
with open(temp_result_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
if q | import argparse
import codecs
from string import Template
| random_line_split | |
test_query_performance.py | """)
index_para_template = Template("""
<parameters>
<index>$index_path</index>
<memory>$memory</memory>
$corpora
<stemmer><name>$stemmer</name></stemmer>
$fields
$stopper
</parameters>""")
corpus_template = Template("""
<corpus>
\t<path>$path</path>
\t<class>trectext</class>
</corpus>
""")
text_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<TEXT>$text</TEXT>$fields
</DOC>""")
class Query(object):
"""Base query class
"""
def __init__(self,qid,query_text):
self._qid = qid
self._text = query_text
self._text_struct = Text(query_text)
@property
def original_model(self):
return self._text_struct.raw_model()
@property
def text(self):
return "%s" %self._text
class ExpandedQuery(Query):
"""Queries with expansion
"""
def __init__(self,qid,query_text,para_lambda):
self._para_lambda = para_lambda
super(ExpandedQuery,self).__init__(qid,query_text)
self._expanding_model = None
def expand(self,expanding_term_weights):
self._expanding_model = Model(False,text_dict=expanding_term_weights)
@property
def expanding_model(self):
if not self._expanding_model:
raise RuntimeError("Not expanded yet!")
return self._expanding_model.model
@property
def para_lambda(self):
return self._para_lambda
class IndriQueryFactory(object):
"""Take in query related parameters for indri and
generate indri query file
"""
def __init__(self,count,rule=None,
use_stopper=False,date_when=None,
numeric_compare=None, psr=False):
self._count,self._rule,self._use_stopper,self._psr = count,rule,use_stopper,psr
if date_when:
if date_when not in ["dateafter","datebefore", "datebetween","dateequals"]:
raise ValueError("When value %s is not supported" %(date_when))
if numeric_compare is not None:
if numeric_compare not in ["less","greater","between","equals"]:
raise ValueError("Compare value %s is not supported" %(numeric_compare))
self._date_when,self._numeric_compare = date_when,numeric_compare
def _gene_query(self,file_path,queries,index,run_id,
date_value=None,numeric_value=None,
numeric_field_name=None,fbDocs=None,
fbTerms=None,fbOrigWeight=None):
query_body = ""
if self._rule is None:
rule = ""
else:
rule = "<rule>%s</rule>" %self._rule
if self._use_stopper:
stopper = "<stopper>\n"
stopwords = get_stopwords()
for stopword in stopwords:
|
stopper += "</stopper>"
else:
stopper = ""
for qid in queries:
sinlge_query_data = queries[qid]
if isinstance(sinlge_query_data,Query):
original_text = re.sub("[^\w]"," ",sinlge_query_data.text)
if isinstance(sinlge_query_data,ExpandedQuery):
original_weight = sinlge_query_data.para_lambda
expanding_weight = 1-sinlge_query_data.para_lambda
expanding_string = ""
for term in sinlge_query_data.expanding_model:
term_weight = sinlge_query_data.expanding_model[term]
expanding_string += "%f %s " %(term_weight,term)
if len(expanding_string) == 0:
q_string = "#combine( %s )" %(original_text)
else:
q_string = "#weight( %f #combine( %s) %f #weight( %s ) )" \
%(original_weight,original_text,
expanding_weight,expanding_string)
else:
q_string = "#combine( %s )" %(original_text)
elif isinstance(sinlge_query_data,str) or isinstance(sinlge_query_data,unicode):
q_string = sinlge_query_data.lower()
q_string = re.sub("[^\w]"," ",q_string)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,list):
q_string = " ".join(sinlge_query_data)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,dict):
q_string = ""
for term in sinlge_query_data:
weight = sinlge_query_data[term]
q_string += "%f %s " %(weight,term)
q_string = "#weight( %s )" %(q_string)
else:
raise TypeError("unsupported value type %s for query data" %type(sinlge_query_data))
if self._date_when:
q_string = "#filreq( #%s( %s ) %s)" %(self._date_when,date_value,
q_string)
if self._numeric_compare is not None:
q_string = "#filreq( #%s( %s %d ) %s)" %(self._numeric_compare,
numeric_field_name,numeric_value,q_string)
psr = ""
if self._psr :
if not (fbDocs and fbTerms and fbOrigWeight):
raise ValueError("need valid fbDocs and fbTerms and fbOrigWeight!")
psr += "<fbDocs>%d</fbDocs>" %(fbDocs)
psr += "<fbTerms>%d</fbTerms>" %(fbTerms)
psr += "<fbOrigWeight>%f</fbOrigWeight>" %(fbOrigWeight)
query_body+=query_template.substitute(
qid=qid,q_string=q_string)
with codecs.open(file_path, 'w','utf-8') as f:
f.write(structure_template.substitute(query_body=query_body,index=index,
run_id=run_id,count=str(self._count),
rule=rule,stopper=stopper,psr=psr))
def gene_query_with_date_filter(self,file_path,queries,index,
date_value,run_id="test",fbDocs=None,
fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id=run_id,date_value=date_value,
fbDocs=fbDocs,fbTerms=fbTerms,fbOrigWeight=fbOrigWeight)
def gene_query_with_numeric_filter(self,file_path,queries,index,
numeric_value,numeric_field_name,run_id="test",
fbDocs=None,fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id,numeric_value=numeric_value,
numeric_field_name=numeric_field_name,fbDocs=fbDocs,fbTerms=fbTerms,
fbOrigWeight=fbOrigWeight)
def gene_normal_query(self,file_path,queries,index,run_id="test"):
self._gene_query(file_path,queries,index,run_id=run_id)
#
#-------------------before are utility code----------------------------
#-------------------below are the code that SHOULD be modified---------
#
def read_qrels(eval_dir):
qrel_file = os.path.join(eval_dir,"qrels.txt")
qrels = {}
with open(qrel_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
jud = max(0,int(parts[3]) )
if qid not in qrels:
qrels[qid] = {}
qrels[qid][docid] = jud
return qrels
def read_query_file(query_file,qrels):
queries = {}
data = json.load(open(query_file))
for single_query in data:
qid = single_query["topid"]
if qid not in qrels:
continue
# text = re.sub("[^\w ]+"," ",single_query["title"])
# queries[qid] = text
queries[qid] = single_query["title"]
return queries
def build_temp_query(queries,temp_query_para_file,index_dir):
retrieval_method = "method:f2exp,s:0.1"
temp_query_builder = IndriQueryFactory(count=100,
rule=retrieval_method)
temp_query_builder.gene_normal_query(temp_query_para_file,
queries,index_dir)
def run_query(temp_query_para_file,temp_result_file):
os.system("IndriRunQuery %s > %s" %(temp_query_para_file,temp_result_file))
def evaluate_temp_result(temp_result_file,qrels):
performance = {}
with open(temp_result_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
if qid not in qrels:
# print "query %s does not have judgement" %(qid)
continue
else:
if qid not in performance:
performance[qid] = .0
if docid in qrels[qid]:
performance[qid] += qrels[qid][docid]*1.0/100
final_performance = sum(performance.values())*1.0/len(qrels)
print " | stopper += "<word>%s</word>\n" %stopword | conditional_block |
translator.py | :
class Argument:
def __init__(self, content, begin_pos, end_pos):
self.content = content
self.begin_pos = begin_pos
self.end_pos = end_pos
def __hash__(self):
return hash(self.content)
def __eq__(self, other):
return isinstance(other, Tag.Argument) and self.content == other.content
def __str__(self):
return self.content
def __init__(self, name, args, begin_pos, end_pos):
self.name = name
self.args = args
self.begin_pos = begin_pos
self.end_pos = end_pos
def __eq__(self, other):
return isinstance(other, Tag) and self.name == other.name and self.args == other.args
def __hash__(self):
return hash(self.name)+sum([hash(i) for i in self.args])
def __str__(self):
return self.name+''.join(['{'+str(i)+'}' for i in self.args])
class Document:
@staticmethod
def load(file):
return Document(file)
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def generate(self):
root, _ = os.path.splitext(self.name)
output = root+'.pdf'
subprocess.check_call(['xelatex', self.name])
return output
def find_tags(self, tag, nargs=1):
with open(self.name) as file:
doc = file.read()
line_number = [ 0 for i in range(len(doc)) ]
line = 1
for i in range(len(doc)):
line_number[i] = line
if doc[i] == '\n':
line += 1
texts = list()
pos = 0
def _find_matching_closing(i):
depth = 0
while True:
pc = doc[i-1] if i-1 > 0 else None
c = doc[i]
if c == '{' and pc != '\\':
depth += 1
elif c == '}' and pc != '\\':
depth -= 1
if depth == 0:
break
i += 1
return i
while True:
i = doc.find(tag, pos)
if i < 0:
break
args = []
start_tag = i
end = start = pos = start_tag+len(tag)
for n in range(nargs):
try:
end = _find_matching_closing(start)
except Exception as e:
raise Exception(
'Could not find end for tag that starts at line '+
'{line} ({text})'.format(
line=line_number[start],
text=(
doc[max(start-20, 0):start]+' --> '+
doc[start:min(start+20, len(doc))])
))
start += 1 #skip initial '{'
args.append(Tag.Argument(doc[start:end], start, end))
start = doc.find('{', end)
texts.append(Tag(tag, args, start_tag, end))
return texts
class Translation:
ALLOW_NOT_EXISTING = 1
TAG_MSGID = 'msgid'
TAG_MSGID_PLURAL = 'msgid_plural'
TAG_MSGSTR = 'msgstr'
TAG_MSGCTXT = 'msgctxt'
@staticmethod
def load(input_file, file, flags=0):
_, name = os.path.split(file)
name = RE_PO_FILE.match(name)
if not flags & Translation.ALLOW_NOT_EXISTING:
if not os.path.exists(file):
raise Exception('File "{}" does not exists'.format(file))
return Translation(input_file, name.group(1), file)
def __init__(self, input, locale, file=None):
self.input = input
self.locale = locale
self.file = file
self._parsed = None
self._icu_locale = icu.Locale.createFromName(self.locale)
self._icu_date_full = icu.DateFormat.createDateInstance(icu.DateFormat.FULL, self._icu_locale)
def __repr__(self):
return 'Translation(input={input}, locale={locale}, file={file})'.format(
input=self.input, locale=self.locale, file=self.file
)
def update(self, document):
if not self.file:
return False #nothing to update
template_name = self.generate_template(document)
sys.stderr.write('Updating translation {}...\n'.format(self))
if not os.path.exists(self.file):
sys.stderr.write('Generating new translation file: {}...\n'.format(self.file))
subprocess.check_call(['msginit', '-i', template_name,
'-l', self.locale, '-o', self.file])
return True
with open(self.file, 'rb') as f:
old = f.read()
sys.stderr.write('Merging template into translation file: {}...\n'.format(self.file))
new = subprocess.check_output(['msgmerge', self.file, template_name])
with open(self.file, 'wb') as f:
f.write(new)
return old != new
def translate(self, document):
sys.stderr.write('Translating {} to {}...\n'.format(document, self))
tags = self.find_all_tags(document)
tags += document.find_tags('\\today', 0)
tags += document.find_tags('\\formatdate', 3)
tags = sorted(tags, key=lambda x: x.begin_pos)
translated, ext = os.path.splitext(self.input)
translated += '.' + self.locale + ext
with open(document.name) as input_file:
doc = input_file.read()
sys.stderr.write('Generating file {}...\n'.format(translated))
with open(translated, 'w') as output:
elems = []
prev = 0
for i in tags:
elems.append(doc[prev:i.begin_pos])
elems.append(self.translate_tag(i))
prev = i.end_pos+1
elems.append(doc[prev:])
output.write(''.join(elems))
return Document.load(translated)
def find_all_tags(self, document):
tags = []
tags += document.find_tags('\\gettext')
tags += document.find_tags('\\pgettext', 2)
tags += document.find_tags('\\ngettext', 3)
tags += document.find_tags('\\npgettext', 4)
return tags
def generate_template(self, document):
with open(document.name) as doc:
doc = doc.read()
tags = self.find_all_tags(document)
tags = set(tags)
tags = sorted(tags, key=lambda x: x.begin_pos)
template_name, _ = os.path.splitext(document.name)
template_name = template_name+'.pot'
sys.stderr.write('Generating template "{}"...\n'.format(template_name))
with open(template_name, 'w') as template:
template.write('msgid ""\n')
template.write('msgstr ""\n')
#template.write('"Project-Id-Version: PACKAGE VERSION\\n"\n')
#template.write('"Report-Msgid-Bugs-To: \\n"\n')
##template.write('"POT-Creation-Date: 2014-05-03 22:18+0200\\n"\n')
#time = datetime.datetime.now(tz=tzlocal.get_localzone())
#time = time.strftime('%Y-%m-%d %H:%M%z')
#template.write('"POT-Creation-Date: {}\\n"\n'.format(time))
#template.write('"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n')
template.write('"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n')
#template.write('"Language-Team: LANGUAGE <LL@li.org>\\n"\n')
template.write('"Language: \\n"\n')
template.write('"MIME-Version: 1.0\\n"\n')
template.write('"Content-Type: text/plain; charset=UTF-8\\n"\n')
template.write('"Content-Transfer-Encoding: 8bit\\n"\n')
template.write('"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"\n')
template.write('\n')
for tag in tags:
def escape(s):
return s.replace('\\', '\\\\').replace('\n', '"\n"')
if tag.name == '\\gettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\ngettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[1].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\pgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag | Tag | identifier_name | |
translator.py | pc = doc[i-1] if i-1 > 0 else None
c = doc[i]
if c == '{' and pc != '\\':
depth += 1
elif c == '}' and pc != '\\':
depth -= 1
if depth == 0:
break
i += 1
return i
while True:
i = doc.find(tag, pos)
if i < 0:
break
args = []
start_tag = i
end = start = pos = start_tag+len(tag)
for n in range(nargs):
|
texts.append(Tag(tag, args, start_tag, end))
return texts
class Translation:
ALLOW_NOT_EXISTING = 1
TAG_MSGID = 'msgid'
TAG_MSGID_PLURAL = 'msgid_plural'
TAG_MSGSTR = 'msgstr'
TAG_MSGCTXT = 'msgctxt'
@staticmethod
def load(input_file, file, flags=0):
_, name = os.path.split(file)
name = RE_PO_FILE.match(name)
if not flags & Translation.ALLOW_NOT_EXISTING:
if not os.path.exists(file):
raise Exception('File "{}" does not exists'.format(file))
return Translation(input_file, name.group(1), file)
def __init__(self, input, locale, file=None):
self.input = input
self.locale = locale
self.file = file
self._parsed = None
self._icu_locale = icu.Locale.createFromName(self.locale)
self._icu_date_full = icu.DateFormat.createDateInstance(icu.DateFormat.FULL, self._icu_locale)
def __repr__(self):
return 'Translation(input={input}, locale={locale}, file={file})'.format(
input=self.input, locale=self.locale, file=self.file
)
def update(self, document):
if not self.file:
return False #nothing to update
template_name = self.generate_template(document)
sys.stderr.write('Updating translation {}...\n'.format(self))
if not os.path.exists(self.file):
sys.stderr.write('Generating new translation file: {}...\n'.format(self.file))
subprocess.check_call(['msginit', '-i', template_name,
'-l', self.locale, '-o', self.file])
return True
with open(self.file, 'rb') as f:
old = f.read()
sys.stderr.write('Merging template into translation file: {}...\n'.format(self.file))
new = subprocess.check_output(['msgmerge', self.file, template_name])
with open(self.file, 'wb') as f:
f.write(new)
return old != new
def translate(self, document):
sys.stderr.write('Translating {} to {}...\n'.format(document, self))
tags = self.find_all_tags(document)
tags += document.find_tags('\\today', 0)
tags += document.find_tags('\\formatdate', 3)
tags = sorted(tags, key=lambda x: x.begin_pos)
translated, ext = os.path.splitext(self.input)
translated += '.' + self.locale + ext
with open(document.name) as input_file:
doc = input_file.read()
sys.stderr.write('Generating file {}...\n'.format(translated))
with open(translated, 'w') as output:
elems = []
prev = 0
for i in tags:
elems.append(doc[prev:i.begin_pos])
elems.append(self.translate_tag(i))
prev = i.end_pos+1
elems.append(doc[prev:])
output.write(''.join(elems))
return Document.load(translated)
def find_all_tags(self, document):
tags = []
tags += document.find_tags('\\gettext')
tags += document.find_tags('\\pgettext', 2)
tags += document.find_tags('\\ngettext', 3)
tags += document.find_tags('\\npgettext', 4)
return tags
def generate_template(self, document):
with open(document.name) as doc:
doc = doc.read()
tags = self.find_all_tags(document)
tags = set(tags)
tags = sorted(tags, key=lambda x: x.begin_pos)
template_name, _ = os.path.splitext(document.name)
template_name = template_name+'.pot'
sys.stderr.write('Generating template "{}"...\n'.format(template_name))
with open(template_name, 'w') as template:
template.write('msgid ""\n')
template.write('msgstr ""\n')
#template.write('"Project-Id-Version: PACKAGE VERSION\\n"\n')
#template.write('"Report-Msgid-Bugs-To: \\n"\n')
##template.write('"POT-Creation-Date: 2014-05-03 22:18+0200\\n"\n')
#time = datetime.datetime.now(tz=tzlocal.get_localzone())
#time = time.strftime('%Y-%m-%d %H:%M%z')
#template.write('"POT-Creation-Date: {}\\n"\n'.format(time))
#template.write('"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n')
template.write('"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n')
#template.write('"Language-Team: LANGUAGE <LL@li.org>\\n"\n')
template.write('"Language: \\n"\n')
template.write('"MIME-Version: 1.0\\n"\n')
template.write('"Content-Type: text/plain; charset=UTF-8\\n"\n')
template.write('"Content-Transfer-Encoding: 8bit\\n"\n')
template.write('"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"\n')
template.write('\n')
for tag in tags:
def escape(s):
return s.replace('\\', '\\\\').replace('\n', '"\n"')
if tag.name == '\\gettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\ngettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[1].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\pgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\npgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[2].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
template.write('\n')
return template_name
def translate_tag(self, tag):
if tag.name == '\\gettext':
if not self.file:
return tag.args[0].content
else:
return self[(tag.args[0].content, None)][self.TAG_MSGSTR]
elif tag.name == '\\ngettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[0].content, tag.args[1].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[0].content, None)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[2].content, variants)
elif tag.name == '\\pgettext':
if not self.file:
return tag.args[1].content
return self[(tag.args[1].content, tag.args[0].content)][self.TAG_MSGSTR]
elif tag.name == '\\npgettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[1].content, tag.args[ | try:
end = _find_matching_closing(start)
except Exception as e:
raise Exception(
'Could not find end for tag that starts at line '+
'{line} ({text})'.format(
line=line_number[start],
text=(
doc[max(start-20, 0):start]+' --> '+
doc[start:min(start+20, len(doc))])
))
start += 1 #skip initial '{'
args.append(Tag.Argument(doc[start:end], start, end))
start = doc.find('{', end) | conditional_block |
translator.py | pc = doc[i-1] if i-1 > 0 else None
c = doc[i]
if c == '{' and pc != '\\':
depth += 1
elif c == '}' and pc != '\\':
depth -= 1
if depth == 0:
break
i += 1
return i
while True:
i = doc.find(tag, pos)
if i < 0:
break
args = []
start_tag = i
end = start = pos = start_tag+len(tag)
for n in range(nargs):
try:
end = _find_matching_closing(start)
except Exception as e:
raise Exception(
'Could not find end for tag that starts at line '+
'{line} ({text})'.format(
line=line_number[start],
text=(
doc[max(start-20, 0):start]+' --> '+
doc[start:min(start+20, len(doc))])
))
start += 1 #skip initial '{'
args.append(Tag.Argument(doc[start:end], start, end))
start = doc.find('{', end)
texts.append(Tag(tag, args, start_tag, end))
return texts
class Translation:
ALLOW_NOT_EXISTING = 1
TAG_MSGID = 'msgid'
TAG_MSGID_PLURAL = 'msgid_plural'
TAG_MSGSTR = 'msgstr'
TAG_MSGCTXT = 'msgctxt'
@staticmethod
def load(input_file, file, flags=0):
_, name = os.path.split(file)
name = RE_PO_FILE.match(name)
if not flags & Translation.ALLOW_NOT_EXISTING:
if not os.path.exists(file):
raise Exception('File "{}" does not exists'.format(file))
return Translation(input_file, name.group(1), file)
def __init__(self, input, locale, file=None):
self.input = input
self.locale = locale
self.file = file
self._parsed = None
self._icu_locale = icu.Locale.createFromName(self.locale)
self._icu_date_full = icu.DateFormat.createDateInstance(icu.DateFormat.FULL, self._icu_locale)
def __repr__(self):
return 'Translation(input={input}, locale={locale}, file={file})'.format(
input=self.input, locale=self.locale, file=self.file
)
def update(self, document):
if not self.file:
return False #nothing to update | sys.stderr.write('Updating translation {}...\n'.format(self))
if not os.path.exists(self.file):
sys.stderr.write('Generating new translation file: {}...\n'.format(self.file))
subprocess.check_call(['msginit', '-i', template_name,
'-l', self.locale, '-o', self.file])
return True
with open(self.file, 'rb') as f:
old = f.read()
sys.stderr.write('Merging template into translation file: {}...\n'.format(self.file))
new = subprocess.check_output(['msgmerge', self.file, template_name])
with open(self.file, 'wb') as f:
f.write(new)
return old != new
def translate(self, document):
sys.stderr.write('Translating {} to {}...\n'.format(document, self))
tags = self.find_all_tags(document)
tags += document.find_tags('\\today', 0)
tags += document.find_tags('\\formatdate', 3)
tags = sorted(tags, key=lambda x: x.begin_pos)
translated, ext = os.path.splitext(self.input)
translated += '.' + self.locale + ext
with open(document.name) as input_file:
doc = input_file.read()
sys.stderr.write('Generating file {}...\n'.format(translated))
with open(translated, 'w') as output:
elems = []
prev = 0
for i in tags:
elems.append(doc[prev:i.begin_pos])
elems.append(self.translate_tag(i))
prev = i.end_pos+1
elems.append(doc[prev:])
output.write(''.join(elems))
return Document.load(translated)
def find_all_tags(self, document):
tags = []
tags += document.find_tags('\\gettext')
tags += document.find_tags('\\pgettext', 2)
tags += document.find_tags('\\ngettext', 3)
tags += document.find_tags('\\npgettext', 4)
return tags
def generate_template(self, document):
with open(document.name) as doc:
doc = doc.read()
tags = self.find_all_tags(document)
tags = set(tags)
tags = sorted(tags, key=lambda x: x.begin_pos)
template_name, _ = os.path.splitext(document.name)
template_name = template_name+'.pot'
sys.stderr.write('Generating template "{}"...\n'.format(template_name))
with open(template_name, 'w') as template:
template.write('msgid ""\n')
template.write('msgstr ""\n')
#template.write('"Project-Id-Version: PACKAGE VERSION\\n"\n')
#template.write('"Report-Msgid-Bugs-To: \\n"\n')
##template.write('"POT-Creation-Date: 2014-05-03 22:18+0200\\n"\n')
#time = datetime.datetime.now(tz=tzlocal.get_localzone())
#time = time.strftime('%Y-%m-%d %H:%M%z')
#template.write('"POT-Creation-Date: {}\\n"\n'.format(time))
#template.write('"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n')
template.write('"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n')
#template.write('"Language-Team: LANGUAGE <LL@li.org>\\n"\n')
template.write('"Language: \\n"\n')
template.write('"MIME-Version: 1.0\\n"\n')
template.write('"Content-Type: text/plain; charset=UTF-8\\n"\n')
template.write('"Content-Transfer-Encoding: 8bit\\n"\n')
template.write('"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"\n')
template.write('\n')
for tag in tags:
def escape(s):
return s.replace('\\', '\\\\').replace('\n', '"\n"')
if tag.name == '\\gettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\ngettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[1].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\pgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\npgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[2].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
template.write('\n')
return template_name
def translate_tag(self, tag):
if tag.name == '\\gettext':
if not self.file:
return tag.args[0].content
else:
return self[(tag.args[0].content, None)][self.TAG_MSGSTR]
elif tag.name == '\\ngettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[0].content, tag.args[1].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[0].content, None)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[2].content, variants)
elif tag.name == '\\pgettext':
if not self.file:
return tag.args[1].content
return self[(tag.args[1].content, tag.args[0].content)][self.TAG_MSGSTR]
elif tag.name == '\\npgettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[1].content, tag.args[2 | template_name = self.generate_template(document) | random_line_split |
translator.py |
def __str__(self):
return self.content
def __init__(self, name, args, begin_pos, end_pos):
self.name = name
self.args = args
self.begin_pos = begin_pos
self.end_pos = end_pos
def __eq__(self, other):
return isinstance(other, Tag) and self.name == other.name and self.args == other.args
def __hash__(self):
return hash(self.name)+sum([hash(i) for i in self.args])
def __str__(self):
return self.name+''.join(['{'+str(i)+'}' for i in self.args])
class Document:
@staticmethod
def load(file):
return Document(file)
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def generate(self):
root, _ = os.path.splitext(self.name)
output = root+'.pdf'
subprocess.check_call(['xelatex', self.name])
return output
def find_tags(self, tag, nargs=1):
with open(self.name) as file:
doc = file.read()
line_number = [ 0 for i in range(len(doc)) ]
line = 1
for i in range(len(doc)):
line_number[i] = line
if doc[i] == '\n':
line += 1
texts = list()
pos = 0
def _find_matching_closing(i):
depth = 0
while True:
pc = doc[i-1] if i-1 > 0 else None
c = doc[i]
if c == '{' and pc != '\\':
depth += 1
elif c == '}' and pc != '\\':
depth -= 1
if depth == 0:
break
i += 1
return i
while True:
i = doc.find(tag, pos)
if i < 0:
break
args = []
start_tag = i
end = start = pos = start_tag+len(tag)
for n in range(nargs):
try:
end = _find_matching_closing(start)
except Exception as e:
raise Exception(
'Could not find end for tag that starts at line '+
'{line} ({text})'.format(
line=line_number[start],
text=(
doc[max(start-20, 0):start]+' --> '+
doc[start:min(start+20, len(doc))])
))
start += 1 #skip initial '{'
args.append(Tag.Argument(doc[start:end], start, end))
start = doc.find('{', end)
texts.append(Tag(tag, args, start_tag, end))
return texts
class Translation:
ALLOW_NOT_EXISTING = 1
TAG_MSGID = 'msgid'
TAG_MSGID_PLURAL = 'msgid_plural'
TAG_MSGSTR = 'msgstr'
TAG_MSGCTXT = 'msgctxt'
@staticmethod
def load(input_file, file, flags=0):
_, name = os.path.split(file)
name = RE_PO_FILE.match(name)
if not flags & Translation.ALLOW_NOT_EXISTING:
if not os.path.exists(file):
raise Exception('File "{}" does not exists'.format(file))
return Translation(input_file, name.group(1), file)
def __init__(self, input, locale, file=None):
self.input = input
self.locale = locale
self.file = file
self._parsed = None
self._icu_locale = icu.Locale.createFromName(self.locale)
self._icu_date_full = icu.DateFormat.createDateInstance(icu.DateFormat.FULL, self._icu_locale)
def __repr__(self):
return 'Translation(input={input}, locale={locale}, file={file})'.format(
input=self.input, locale=self.locale, file=self.file
)
def update(self, document):
if not self.file:
return False #nothing to update
template_name = self.generate_template(document)
sys.stderr.write('Updating translation {}...\n'.format(self))
if not os.path.exists(self.file):
sys.stderr.write('Generating new translation file: {}...\n'.format(self.file))
subprocess.check_call(['msginit', '-i', template_name,
'-l', self.locale, '-o', self.file])
return True
with open(self.file, 'rb') as f:
old = f.read()
sys.stderr.write('Merging template into translation file: {}...\n'.format(self.file))
new = subprocess.check_output(['msgmerge', self.file, template_name])
with open(self.file, 'wb') as f:
f.write(new)
return old != new
def translate(self, document):
sys.stderr.write('Translating {} to {}...\n'.format(document, self))
tags = self.find_all_tags(document)
tags += document.find_tags('\\today', 0)
tags += document.find_tags('\\formatdate', 3)
tags = sorted(tags, key=lambda x: x.begin_pos)
translated, ext = os.path.splitext(self.input)
translated += '.' + self.locale + ext
with open(document.name) as input_file:
doc = input_file.read()
sys.stderr.write('Generating file {}...\n'.format(translated))
with open(translated, 'w') as output:
elems = []
prev = 0
for i in tags:
elems.append(doc[prev:i.begin_pos])
elems.append(self.translate_tag(i))
prev = i.end_pos+1
elems.append(doc[prev:])
output.write(''.join(elems))
return Document.load(translated)
def find_all_tags(self, document):
tags = []
tags += document.find_tags('\\gettext')
tags += document.find_tags('\\pgettext', 2)
tags += document.find_tags('\\ngettext', 3)
tags += document.find_tags('\\npgettext', 4)
return tags
def generate_template(self, document):
with open(document.name) as doc:
doc = doc.read()
tags = self.find_all_tags(document)
tags = set(tags)
tags = sorted(tags, key=lambda x: x.begin_pos)
template_name, _ = os.path.splitext(document.name)
template_name = template_name+'.pot'
sys.stderr.write('Generating template "{}"...\n'.format(template_name))
with open(template_name, 'w') as template:
template.write('msgid ""\n')
template.write('msgstr ""\n')
#template.write('"Project-Id-Version: PACKAGE VERSION\\n"\n')
#template.write('"Report-Msgid-Bugs-To: \\n"\n')
##template.write('"POT-Creation-Date: 2014-05-03 22:18+0200\\n"\n')
#time = datetime.datetime.now(tz=tzlocal.get_localzone())
#time = time.strftime('%Y-%m-%d %H:%M%z')
#template.write('"POT-Creation-Date: {}\\n"\n'.format(time))
#template.write('"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n')
template.write('"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n')
#template.write('"Language-Team: LANGUAGE <LL@li.org>\\n"\n')
template.write('"Language: \\n"\n')
template.write('"MIME-Version: 1.0\\n"\n')
template.write('"Content-Type: text/plain; charset=UTF-8\\n"\n')
template.write('"Content-Transfer-Encoding: 8bit\\n"\n')
template.write('"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"\n')
template.write('\n')
for tag in tags:
def escape(s):
return s.replace('\\', '\\\\').replace('\n', '"\n"')
if tag.name == '\\gettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\ngettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[1].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\pgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\npgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PL | return isinstance(other, Tag.Argument) and self.content == other.content | identifier_body | |
obj.rs | str) -> Result<Self, Self::Err> {
let mut tokens = s.split('/');
// Get vertex index
let vertex_index: T = tokens
.next()
.ok_or_else(|| io_error("Missing vertex index"))?
.parse()
.map_err(io_error)?;
let texture_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
let normal_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
Ok(FaceIndex {
vertex_index,
texture_index,
normal_index,
})
}
}
#[derive(Debug)]
pub struct Group {
pub name: String,
pub faces: Vec<Face<u32>>,
}
impl Group {
pub fn new(name: &str) -> Self {
Group {
name: name.into(),
faces: Vec::new(),
}
}
}
struct Material {
/// Ka
ambient_color: Color,
/// Kd
diffuse_color: Color,
/// Ks
specular_color: Color,
/// Ns
specular_exponent: f32,
/// Ni
optical_density: f32,
/// d or Tr
transparency: f32,
// TODO: illum
// TODO: maps
}
pub struct Obj {
groups: Vec<Group>,
vert_start: GLint,
num_verts: GLsizei,
pub vertices: Vec<Vec3>,
pub normals: Vec<Vec3>,
pub texture_coords: Vec<Vec2>,
center: Vec3,
scale: Vec3,
translate: Vec3,
texture_path: PathBuf,
cur_texture: u8,
}
impl Obj {
/// Loads a render object from a path
pub fn load<P, PP>(
obj_path: P,
texture_path: PP,
cur_texture: &mut u8,
scale: Vec3,
translate: Vec3,
) -> Result<Self, io::Error>
where
P: AsRef<Path> + std::fmt::Display,
PP: AsRef<OsStr> + Sized,
{
// Get the path as string for later
let path_str = obj_path.to_string();
// Read the obj file
let obj_file = File::open(obj_path)?;
// Create reader for the file
let obj_file = BufReader::new(obj_file);
// Buffers for data
let mut vertices: Vec<Vec3> = Vec::new();
let mut normals: Vec<Vec3> = Vec::new();
let mut texture_coords: Vec<Vec2> = Vec::new();
// Create list of groups
let mut groups: Vec<Group> = Vec::new();
// current group
let mut cur_group: Group = Group::new("");
// Keep track of center
let mut center: Vec3 = Vec3::origin();
// Keep track of vertices for averaging center
// Float is used here for division
let mut num_vertices: f32 = 0.0;
for line in obj_file.lines() {
// Unwrap the line
let line = line?;
// Ignore comments
if line.starts_with('#') {
continue;
}
// Split line into tokens
let mut tokens = line.split_whitespace();
// Read the first token
let ty = match tokens.next() {
Some(token) => token,
// Skip empty lines
None => {
continue;
}
};
// Handle it
match ty {
"g" => {
// Read group name
let name = tokens.next().unwrap_or("unnamed");
// Insert old group into groups
if !cur_group.faces.is_empty() {
groups.push(cur_group);
}
// Create new group
cur_group = Group::new(name);
}
"v" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
// Collect into a vector
let v = vec3(x, y, z);
// Factor vertex into the center
center = ¢er + v;
// Add to number of vertices
num_vertices += 1.0;
// Add vector into the list
vertices.push(v);
}
"vn" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
normals.push(vec3(x, y, z));
}
"vt" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
texture_coords.push(vec2(x, y));
}
"f" => |
other => {
eprintln!("Unhandled line type: {}", other);
}
}
}
// Push the last group
groups.push(cur_group);
// Average out the center
let center = center * (1.0 / (num_vertices as f32));
println!("Center for {} is {:?}", path_str, center);
// Iterate texture counter forward
*cur_texture += 1;
// Generate the render object
Ok(Obj {
groups,
vert_start: 0,
num_verts: 0,
vertices,
normals,
texture_coords,
center,
scale,
translate,
texture_path: Path::new(&texture_path).to_path_buf(),
cur_texture: *cur_texture,
})
}
pub fn to_vertices(&self, group: &Group) -> Vec<f32> {
// Generate vertex list from face list
group
.faces
.iter()
// For each face, get the vertex, normal, and texture coordinates
// of all its components
.flat_map(|face| {
face.indices.iter().map(|index| {
(
// Get the vertex for this
/*(&(&self.vertices[(index.vertex_index - 1) as usize] - self.center)
+ self.translate)
.scale(self.scale.x, self.scale.y, self.scale.z),*/
// Get the vertex for this
&self.vertices[(index.vertex_index - 1) as usize] - self.center,
index
.normal_index
.map(|normal_index| self.normals[(normal_index - 1) as usize])
.unwrap_or_else(Vec3::origin),
index
.texture_index
.map(|texture_index| self.texture_coords[(texture_index - 1) as usize])
.unwrap_or_else(Vec2::origin),
)
})
})
// Flatten out everything
.flat_map(|(vertex, normal, texture)| {
#[cfg_attr(rustfmt, rustfmt_skip)]
vec![
vertex.x, vertex.y, vertex.z,
normal.x, normal.y, normal.z,
texture.x, texture.y,
]
})
.collect()
}
}
impl Drawable for Obj {
/// Returns buffer data
fn buffer_data(&mut self, vertex_start: GLint) -> Vec<f32> {
// Store element start
self.vert_start = vertex_start;
// Store vertex data
let mut vertices: Vec<f32> = Vec::new();
// Iterate over groups
for group in &self.groups {
// Extract data for the current group
let cur_vertices = self.to_vertices(group);
// Add existing data
vertices.extend_from_slice(&cur_vertices);
}
// Store the number of vertices
self.num_verts = (vertices.len() / 8) as GLsizei;
// Return vertices
vertices
}
/// Loads textures
fn load_texture(&self, ctx: &Context) {
let gl = &ctx.gl;
// Read texture
let tex_image = image::open(self.texture_path.clone()).unwrap();
// Extract dimensions
let (width, height) = tex_image.dimensions();
// Get image as raw bytes
let tex_image = tex_image.as_rgb8().unwrap().clone();
// Create a texture
let texture = | {
let face_indices = tokens.map(FaceIndex::from_str).flatten().collect();
cur_group.faces.push(face(face_indices));
} | conditional_block |
obj.rs | &str) -> Result<Self, Self::Err> {
let mut tokens = s.split('/');
// Get vertex index
let vertex_index: T = tokens
.next()
.ok_or_else(|| io_error("Missing vertex index"))?
.parse()
.map_err(io_error)?;
let texture_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
let normal_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
Ok(FaceIndex {
vertex_index,
texture_index,
normal_index,
})
}
}
#[derive(Debug)]
pub struct Group {
pub name: String,
pub faces: Vec<Face<u32>>,
}
impl Group {
pub fn | (name: &str) -> Self {
Group {
name: name.into(),
faces: Vec::new(),
}
}
}
struct Material {
/// Ka
ambient_color: Color,
/// Kd
diffuse_color: Color,
/// Ks
specular_color: Color,
/// Ns
specular_exponent: f32,
/// Ni
optical_density: f32,
/// d or Tr
transparency: f32,
// TODO: illum
// TODO: maps
}
pub struct Obj {
groups: Vec<Group>,
vert_start: GLint,
num_verts: GLsizei,
pub vertices: Vec<Vec3>,
pub normals: Vec<Vec3>,
pub texture_coords: Vec<Vec2>,
center: Vec3,
scale: Vec3,
translate: Vec3,
texture_path: PathBuf,
cur_texture: u8,
}
impl Obj {
/// Loads a render object from a path
pub fn load<P, PP>(
obj_path: P,
texture_path: PP,
cur_texture: &mut u8,
scale: Vec3,
translate: Vec3,
) -> Result<Self, io::Error>
where
P: AsRef<Path> + std::fmt::Display,
PP: AsRef<OsStr> + Sized,
{
// Get the path as string for later
let path_str = obj_path.to_string();
// Read the obj file
let obj_file = File::open(obj_path)?;
// Create reader for the file
let obj_file = BufReader::new(obj_file);
// Buffers for data
let mut vertices: Vec<Vec3> = Vec::new();
let mut normals: Vec<Vec3> = Vec::new();
let mut texture_coords: Vec<Vec2> = Vec::new();
// Create list of groups
let mut groups: Vec<Group> = Vec::new();
// current group
let mut cur_group: Group = Group::new("");
// Keep track of center
let mut center: Vec3 = Vec3::origin();
// Keep track of vertices for averaging center
// Float is used here for division
let mut num_vertices: f32 = 0.0;
for line in obj_file.lines() {
// Unwrap the line
let line = line?;
// Ignore comments
if line.starts_with('#') {
continue;
}
// Split line into tokens
let mut tokens = line.split_whitespace();
// Read the first token
let ty = match tokens.next() {
Some(token) => token,
// Skip empty lines
None => {
continue;
}
};
// Handle it
match ty {
"g" => {
// Read group name
let name = tokens.next().unwrap_or("unnamed");
// Insert old group into groups
if !cur_group.faces.is_empty() {
groups.push(cur_group);
}
// Create new group
cur_group = Group::new(name);
}
"v" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
// Collect into a vector
let v = vec3(x, y, z);
// Factor vertex into the center
center = ¢er + v;
// Add to number of vertices
num_vertices += 1.0;
// Add vector into the list
vertices.push(v);
}
"vn" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
normals.push(vec3(x, y, z));
}
"vt" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
texture_coords.push(vec2(x, y));
}
"f" => {
let face_indices = tokens.map(FaceIndex::from_str).flatten().collect();
cur_group.faces.push(face(face_indices));
}
other => {
eprintln!("Unhandled line type: {}", other);
}
}
}
// Push the last group
groups.push(cur_group);
// Average out the center
let center = center * (1.0 / (num_vertices as f32));
println!("Center for {} is {:?}", path_str, center);
// Iterate texture counter forward
*cur_texture += 1;
// Generate the render object
Ok(Obj {
groups,
vert_start: 0,
num_verts: 0,
vertices,
normals,
texture_coords,
center,
scale,
translate,
texture_path: Path::new(&texture_path).to_path_buf(),
cur_texture: *cur_texture,
})
}
pub fn to_vertices(&self, group: &Group) -> Vec<f32> {
// Generate vertex list from face list
group
.faces
.iter()
// For each face, get the vertex, normal, and texture coordinates
// of all its components
.flat_map(|face| {
face.indices.iter().map(|index| {
(
// Get the vertex for this
/*(&(&self.vertices[(index.vertex_index - 1) as usize] - self.center)
+ self.translate)
.scale(self.scale.x, self.scale.y, self.scale.z),*/
// Get the vertex for this
&self.vertices[(index.vertex_index - 1) as usize] - self.center,
index
.normal_index
.map(|normal_index| self.normals[(normal_index - 1) as usize])
.unwrap_or_else(Vec3::origin),
index
.texture_index
.map(|texture_index| self.texture_coords[(texture_index - 1) as usize])
.unwrap_or_else(Vec2::origin),
)
})
})
// Flatten out everything
.flat_map(|(vertex, normal, texture)| {
#[cfg_attr(rustfmt, rustfmt_skip)]
vec![
vertex.x, vertex.y, vertex.z,
normal.x, normal.y, normal.z,
texture.x, texture.y,
]
})
.collect()
}
}
impl Drawable for Obj {
/// Returns buffer data
fn buffer_data(&mut self, vertex_start: GLint) -> Vec<f32> {
// Store element start
self.vert_start = vertex_start;
// Store vertex data
let mut vertices: Vec<f32> = Vec::new();
// Iterate over groups
for group in &self.groups {
// Extract data for the current group
let cur_vertices = self.to_vertices(group);
// Add existing data
vertices.extend_from_slice(&cur_vertices);
}
// Store the number of vertices
self.num_verts = (vertices.len() / 8) as GLsizei;
// Return vertices
vertices
}
/// Loads textures
fn load_texture(&self, ctx: &Context) {
let gl = &ctx.gl;
// Read texture
let tex_image = image::open(self.texture_path.clone()).unwrap();
// Extract dimensions
let (width, height) = tex_image.dimensions();
// Get image as raw bytes
let tex_image = tex_image.as_rgb8().unwrap().clone();
// Create a texture
let texture = | new | identifier_name |
obj.rs | &str) -> Result<Self, Self::Err> {
let mut tokens = s.split('/');
// Get vertex index
let vertex_index: T = tokens
.next()
.ok_or_else(|| io_error("Missing vertex index"))?
.parse()
.map_err(io_error)?;
let texture_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
let normal_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
Ok(FaceIndex {
vertex_index,
texture_index,
normal_index,
})
}
}
#[derive(Debug)]
pub struct Group {
pub name: String,
pub faces: Vec<Face<u32>>,
}
impl Group {
pub fn new(name: &str) -> Self {
Group { | name: name.into(),
faces: Vec::new(),
}
}
}
struct Material {
/// Ka
ambient_color: Color,
/// Kd
diffuse_color: Color,
/// Ks
specular_color: Color,
/// Ns
specular_exponent: f32,
/// Ni
optical_density: f32,
/// d or Tr
transparency: f32,
// TODO: illum
// TODO: maps
}
pub struct Obj {
groups: Vec<Group>,
vert_start: GLint,
num_verts: GLsizei,
pub vertices: Vec<Vec3>,
pub normals: Vec<Vec3>,
pub texture_coords: Vec<Vec2>,
center: Vec3,
scale: Vec3,
translate: Vec3,
texture_path: PathBuf,
cur_texture: u8,
}
impl Obj {
/// Loads a render object from a path
pub fn load<P, PP>(
obj_path: P,
texture_path: PP,
cur_texture: &mut u8,
scale: Vec3,
translate: Vec3,
) -> Result<Self, io::Error>
where
P: AsRef<Path> + std::fmt::Display,
PP: AsRef<OsStr> + Sized,
{
// Get the path as string for later
let path_str = obj_path.to_string();
// Read the obj file
let obj_file = File::open(obj_path)?;
// Create reader for the file
let obj_file = BufReader::new(obj_file);
// Buffers for data
let mut vertices: Vec<Vec3> = Vec::new();
let mut normals: Vec<Vec3> = Vec::new();
let mut texture_coords: Vec<Vec2> = Vec::new();
// Create list of groups
let mut groups: Vec<Group> = Vec::new();
// current group
let mut cur_group: Group = Group::new("");
// Keep track of center
let mut center: Vec3 = Vec3::origin();
// Keep track of vertices for averaging center
// Float is used here for division
let mut num_vertices: f32 = 0.0;
for line in obj_file.lines() {
// Unwrap the line
let line = line?;
// Ignore comments
if line.starts_with('#') {
continue;
}
// Split line into tokens
let mut tokens = line.split_whitespace();
// Read the first token
let ty = match tokens.next() {
Some(token) => token,
// Skip empty lines
None => {
continue;
}
};
// Handle it
match ty {
"g" => {
// Read group name
let name = tokens.next().unwrap_or("unnamed");
// Insert old group into groups
if !cur_group.faces.is_empty() {
groups.push(cur_group);
}
// Create new group
cur_group = Group::new(name);
}
"v" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
// Collect into a vector
let v = vec3(x, y, z);
// Factor vertex into the center
center = ¢er + v;
// Add to number of vertices
num_vertices += 1.0;
// Add vector into the list
vertices.push(v);
}
"vn" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
normals.push(vec3(x, y, z));
}
"vt" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
texture_coords.push(vec2(x, y));
}
"f" => {
let face_indices = tokens.map(FaceIndex::from_str).flatten().collect();
cur_group.faces.push(face(face_indices));
}
other => {
eprintln!("Unhandled line type: {}", other);
}
}
}
// Push the last group
groups.push(cur_group);
// Average out the center
let center = center * (1.0 / (num_vertices as f32));
println!("Center for {} is {:?}", path_str, center);
// Iterate texture counter forward
*cur_texture += 1;
// Generate the render object
Ok(Obj {
groups,
vert_start: 0,
num_verts: 0,
vertices,
normals,
texture_coords,
center,
scale,
translate,
texture_path: Path::new(&texture_path).to_path_buf(),
cur_texture: *cur_texture,
})
}
pub fn to_vertices(&self, group: &Group) -> Vec<f32> {
// Generate vertex list from face list
group
.faces
.iter()
// For each face, get the vertex, normal, and texture coordinates
// of all its components
.flat_map(|face| {
face.indices.iter().map(|index| {
(
// Get the vertex for this
/*(&(&self.vertices[(index.vertex_index - 1) as usize] - self.center)
+ self.translate)
.scale(self.scale.x, self.scale.y, self.scale.z),*/
// Get the vertex for this
&self.vertices[(index.vertex_index - 1) as usize] - self.center,
index
.normal_index
.map(|normal_index| self.normals[(normal_index - 1) as usize])
.unwrap_or_else(Vec3::origin),
index
.texture_index
.map(|texture_index| self.texture_coords[(texture_index - 1) as usize])
.unwrap_or_else(Vec2::origin),
)
})
})
// Flatten out everything
.flat_map(|(vertex, normal, texture)| {
#[cfg_attr(rustfmt, rustfmt_skip)]
vec![
vertex.x, vertex.y, vertex.z,
normal.x, normal.y, normal.z,
texture.x, texture.y,
]
})
.collect()
}
}
impl Drawable for Obj {
/// Returns buffer data
fn buffer_data(&mut self, vertex_start: GLint) -> Vec<f32> {
// Store element start
self.vert_start = vertex_start;
// Store vertex data
let mut vertices: Vec<f32> = Vec::new();
// Iterate over groups
for group in &self.groups {
// Extract data for the current group
let cur_vertices = self.to_vertices(group);
// Add existing data
vertices.extend_from_slice(&cur_vertices);
}
// Store the number of vertices
self.num_verts = (vertices.len() / 8) as GLsizei;
// Return vertices
vertices
}
/// Loads textures
fn load_texture(&self, ctx: &Context) {
let gl = &ctx.gl;
// Read texture
let tex_image = image::open(self.texture_path.clone()).unwrap();
// Extract dimensions
let (width, height) = tex_image.dimensions();
// Get image as raw bytes
let tex_image = tex_image.as_rgb8().unwrap().clone();
// Create a texture
let texture = gl | random_line_split | |
cortex.pb.go | samples,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TimeSeries) GetLabels() []*LabelPair {
if m != nil {
return m.Labels
}
return nil
}
func (m *TimeSeries) GetSamples() []*Sample {
if m != nil {
return m.Samples
}
return nil
}
type LabelMatcher struct {
Type MatchType `protobuf:"varint,1,opt,name=type,enum=cortex.MatchType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) | () string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type ReadRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs" json:"end_timestamp_ms,omitempty"`
Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ReadRequest) GetMatchers() []*LabelMatcher {
if m != nil {
return m.Matchers
}
return nil
}
type ReadResponse struct {
Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *ReadResponse) GetTimeseries() []*TimeSeries {
if m != nil {
return m.Timeseries
}
return nil
}
type LabelValuesRequest struct {
LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName" json:"label_name,omitempty"`
}
func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} }
func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) }
func (*LabelValuesRequest) ProtoMessage() {}
func (*LabelValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type LabelValuesResponse struct {
LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
}
func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} }
func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) }
func (*LabelValuesResponse) ProtoMessage() {}
func (*LabelValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type UserStatsResponse struct {
IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate" json:"ingestion_rate,omitempty"`
NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries" json:"num_series,omitempty"`
}
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
func (m *UserStatsResponse) String() string { return proto.CompactTextString(m) }
func (*UserStatsResponse) ProtoMessage() {}
func (*UserStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func init() {
proto.RegisterType((*Sample)(nil), "cortex.Sample")
proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair")
proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries")
proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse")
proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest")
proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse")
proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse")
proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
}
func init() { proto.RegisterFile("cortex.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 455 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6b, 0x13, 0x41,
0x14, 0x75, 0x93, 0x34, 0x3a, 0x37, 0xe9, 0xba, 0xbd, 0xf6, 0xa1, 0x2f, 0x82, 0x0e, 0x14, 0xa2,
0x48, 0x91, 0x16, 0xc1, 0xd7, 0x28, 0x41, 0x91, 0x26, 0xd5, 0xc9, 0x56, 0xf4, 0x69, 0x98, 0xb6,
0x83, 0x2e, 0xec, 0x97, 0x3b, 0x13, 0xd1, 0x5f, 0xe2, 0xdf, 0xf5, 0xee, 0xcc, 0x7e, 0x05, 0xfa,
0xb6, 0xf7, 0xdc, 0xaf, 0x73, 0xce, 0x9d, 0x85, 0xf9, 0x6d, 0x51, 0x59, 0xfd, 0xe7, 0xac, 0xac,
0x0a, 0x5b, 0xe0, 0xd4, 0x47, 0x7c, 0x09, 0xd3, 0xad, 0xca, 0xca, 0x54, 0xe3, 0x31, 0x1c, 0xfc,
0x56, 0xe9, 0x4e, 0x9f, 0x04, 0xcf, 0x82, 0x45, 0x20, 0x7c, 0x80, 0xcf, 0x61, 0x6e, 0x93, 0x4c,
0x1b, 0x4b, 0x45, 0x32, 0x33, 0x27, 0x23, 0x4a, 0x8e, 0xc5, 0xac, 0xc3, 0xd6, 0x86, 0xbf, 0x01,
0x76, 0xa9, 0x6e, 0x74, 0xfa, 0x59, 0x25, 0x15, 0x22, 0x4c, 0x72, 0x95, 0xf9, 0x21, 0x4c, 0xb8,
0xef, 0x7e, 0xf2, 0xc8, 0x81, 0x3e, 0xe0, 0x0a, 0x20, 0xa6, 0x29, 0x5b, | String | identifier_name |
cortex.pb.go | samples,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TimeSeries) GetLabels() []*LabelPair {
if m != nil {
return m.Labels
}
return nil
}
func (m *TimeSeries) GetSamples() []*Sample {
if m != nil {
return m.Samples
}
return nil
}
type LabelMatcher struct {
Type MatchType `protobuf:"varint,1,opt,name=type,enum=cortex.MatchType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type ReadRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs" json:"end_timestamp_ms,omitempty"`
Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ReadRequest) GetMatchers() []*LabelMatcher {
if m != nil {
return m.Matchers
}
return nil
}
type ReadResponse struct {
Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {} | }
return nil
}
type LabelValuesRequest struct {
LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName" json:"label_name,omitempty"`
}
func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} }
func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) }
func (*LabelValuesRequest) ProtoMessage() {}
func (*LabelValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type LabelValuesResponse struct {
LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
}
func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} }
func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) }
func (*LabelValuesResponse) ProtoMessage() {}
func (*LabelValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type UserStatsResponse struct {
IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate" json:"ingestion_rate,omitempty"`
NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries" json:"num_series,omitempty"`
}
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
func (m *UserStatsResponse) String() string { return proto.CompactTextString(m) }
func (*UserStatsResponse) ProtoMessage() {}
func (*UserStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func init() {
proto.RegisterType((*Sample)(nil), "cortex.Sample")
proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair")
proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries")
proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse")
proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest")
proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse")
proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse")
proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
}
func init() { proto.RegisterFile("cortex.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 455 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6b, 0x13, 0x41,
0x14, 0x75, 0x93, 0x34, 0x3a, 0x37, 0xe9, 0xba, 0xbd, 0xf6, 0xa1, 0x2f, 0x82, 0x0e, 0x14, 0xa2,
0x48, 0x91, 0x16, 0xc1, 0xd7, 0x28, 0x41, 0x91, 0x26, 0xd5, 0xc9, 0x56, 0xf4, 0x69, 0x98, 0xb6,
0x83, 0x2e, 0xec, 0x97, 0x3b, 0x13, 0xd1, 0x5f, 0xe2, 0xdf, 0xf5, 0xee, 0xcc, 0x7e, 0x05, 0xfa,
0xb6, 0xf7, 0xdc, 0xaf, 0x73, 0xce, 0x9d, 0x85, 0xf9, 0x6d, 0x51, 0x59, 0xfd, 0xe7, 0xac, 0xac,
0x0a, 0x5b, 0xe0, 0xd4, 0x47, 0x7c, 0x09, 0xd3, 0xad, 0xca, 0xca, 0x54, 0xe3, 0x31, 0x1c, 0xfc,
0x56, 0xe9, 0x4e, 0x9f, 0x04, 0xcf, 0x82, 0x45, 0x20, 0x7c, 0x80, 0xcf, 0x61, 0x6e, 0x93, 0x4c,
0x1b, 0x4b, 0x45, 0x32, 0x33, 0x27, 0x23, 0x4a, 0x8e, 0xc5, 0xac, 0xc3, 0xd6, 0x86, 0xbf, 0x01,
0x76, 0xa9, 0x6e, 0x74, 0xfa, 0x59, 0x25, 0x15, 0x22, 0x4c, 0x72, 0x95, 0xf9, 0x21, 0x4c, 0xb8,
0xef, 0x7e, 0xf2, 0xc8, 0x81, 0x3e, 0xe0, 0x0a, 0x20, 0xa6, 0x29, 0x5b, 0x | func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *ReadResponse) GetTimeseries() []*TimeSeries {
if m != nil {
return m.Timeseries | random_line_split |
cortex.pb.go | samples,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TimeSeries) GetLabels() []*LabelPair {
if m != nil {
return m.Labels
}
return nil
}
func (m *TimeSeries) GetSamples() []*Sample {
if m != nil {
return m.Samples
}
return nil
}
type LabelMatcher struct {
Type MatchType `protobuf:"varint,1,opt,name=type,enum=cortex.MatchType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type ReadRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs" json:"end_timestamp_ms,omitempty"`
Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ReadRequest) GetMatchers() []*LabelMatcher {
if m != nil {
return m.Matchers
}
return nil
}
type ReadResponse struct {
Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *ReadResponse) GetTimeseries() []*TimeSeries {
if m != nil {
return m.Timeseries
}
return nil
}
type LabelValuesRequest struct {
LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName" json:"label_name,omitempty"`
}
func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} }
func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) }
func (*LabelValuesRequest) ProtoMessage() {}
func (*LabelValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type LabelValuesResponse struct {
LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
}
func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} }
func (m *LabelValuesResponse) String() string |
func (*LabelValuesResponse) ProtoMessage() {}
func (*LabelValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type UserStatsResponse struct {
IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate" json:"ingestion_rate,omitempty"`
NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries" json:"num_series,omitempty"`
}
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
func (m *UserStatsResponse) String() string { return proto.CompactTextString(m) }
func (*UserStatsResponse) ProtoMessage() {}
func (*UserStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func init() {
proto.RegisterType((*Sample)(nil), "cortex.Sample")
proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair")
proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries")
proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse")
proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest")
proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse")
proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse")
proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
}
func init() { proto.RegisterFile("cortex.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 455 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6b, 0x13, 0x41,
0x14, 0x75, 0x93, 0x34, 0x3a, 0x37, 0xe9, 0xba, 0xbd, 0xf6, 0xa1, 0x2f, 0x82, 0x0e, 0x14, 0xa2,
0x48, 0x91, 0x16, 0xc1, 0xd7, 0x28, 0x41, 0x91, 0x26, 0xd5, 0xc9, 0x56, 0xf4, 0x69, 0x98, 0xb6,
0x83, 0x2e, 0xec, 0x97, 0x3b, 0x13, 0xd1, 0x5f, 0xe2, 0xdf, 0xf5, 0xee, 0xcc, 0x7e, 0x05, 0xfa,
0xb6, 0xf7, 0xdc, 0xaf, 0x73, 0xce, 0x9d, 0x85, 0xf9, 0x6d, 0x51, 0x59, 0xfd, 0xe7, 0xac, 0xac,
0x0a, 0x5b, 0xe0, 0xd4, 0x47, 0x7c, 0x09, 0xd3, 0xad, 0xca, 0xca, 0x54, 0xe3, 0x31, 0x1c, 0xfc,
0x56, 0xe9, 0x4e, 0x9f, 0x04, 0xcf, 0x82, 0x45, 0x20, 0x7c, 0x80, 0xcf, 0x61, 0x6e, 0x93, 0x4c,
0x1b, 0x4b, 0x45, 0x32, 0x33, 0x27, 0x23, 0x4a, 0x8e, 0xc5, 0xac, 0xc3, 0xd6, 0x86, 0xbf, 0x01,
0x76, 0xa9, 0x6e, 0x74, 0xfa, 0x59, 0x25, 0x15, 0x22, 0x4c, 0x72, 0x95, 0xf9, 0x21, 0x4c, 0xb8,
0xef, 0x7e, 0xf2, 0xc8, 0x81, 0x3e, 0xe0, 0x0a, 0x20, 0xa6, 0x29, 0x5b, | { return proto.CompactTextString(m) } | identifier_body |
cortex.pb.go | ,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TimeSeries) GetLabels() []*LabelPair {
if m != nil {
return m.Labels
}
return nil
}
func (m *TimeSeries) GetSamples() []*Sample {
if m != nil {
return m.Samples
}
return nil
}
type LabelMatcher struct {
Type MatchType `protobuf:"varint,1,opt,name=type,enum=cortex.MatchType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type ReadRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs" json:"end_timestamp_ms,omitempty"`
Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ReadRequest) GetMatchers() []*LabelMatcher {
if m != nil {
return m.Matchers
}
return nil
}
type ReadResponse struct {
Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *ReadResponse) GetTimeseries() []*TimeSeries {
if m != nil |
return nil
}
type LabelValuesRequest struct {
LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName" json:"label_name,omitempty"`
}
func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} }
func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) }
func (*LabelValuesRequest) ProtoMessage() {}
func (*LabelValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type LabelValuesResponse struct {
LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
}
func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} }
func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) }
func (*LabelValuesResponse) ProtoMessage() {}
func (*LabelValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type UserStatsResponse struct {
IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate" json:"ingestion_rate,omitempty"`
NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries" json:"num_series,omitempty"`
}
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
func (m *UserStatsResponse) String() string { return proto.CompactTextString(m) }
func (*UserStatsResponse) ProtoMessage() {}
func (*UserStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func init() {
proto.RegisterType((*Sample)(nil), "cortex.Sample")
proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair")
proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries")
proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse")
proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest")
proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse")
proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse")
proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
}
func init() { proto.RegisterFile("cortex.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 455 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6b, 0x13, 0x41,
0x14, 0x75, 0x93, 0x34, 0x3a, 0x37, 0xe9, 0xba, 0xbd, 0xf6, 0xa1, 0x2f, 0x82, 0x0e, 0x14, 0xa2,
0x48, 0x91, 0x16, 0xc1, 0xd7, 0x28, 0x41, 0x91, 0x26, 0xd5, 0xc9, 0x56, 0xf4, 0x69, 0x98, 0xb6,
0x83, 0x2e, 0xec, 0x97, 0x3b, 0x13, 0xd1, 0x5f, 0xe2, 0xdf, 0xf5, 0xee, 0xcc, 0x7e, 0x05, 0xfa,
0xb6, 0xf7, 0xdc, 0xaf, 0x73, 0xce, 0x9d, 0x85, 0xf9, 0x6d, 0x51, 0x59, 0xfd, 0xe7, 0xac, 0xac,
0x0a, 0x5b, 0xe0, 0xd4, 0x47, 0x7c, 0x09, 0xd3, 0xad, 0xca, 0xca, 0x54, 0xe3, 0x31, 0x1c, 0xfc,
0x56, 0xe9, 0x4e, 0x9f, 0x04, 0xcf, 0x82, 0x45, 0x20, 0x7c, 0x80, 0xcf, 0x61, 0x6e, 0x93, 0x4c,
0x1b, 0x4b, 0x45, 0x32, 0x33, 0x27, 0x23, 0x4a, 0x8e, 0xc5, 0xac, 0xc3, 0xd6, 0x86, 0xbf, 0x01,
0x76, 0xa9, 0x6e, 0x74, 0xfa, 0x59, 0x25, 0x15, 0x22, 0x4c, 0x72, 0x95, 0xf9, 0x21, 0x4c, 0xb8,
0xef, 0x7e, 0xf2, 0xc8, 0x81, 0x3e, 0xe0, 0x0a, 0x20, 0xa6, 0x29, 0x5b, | {
return m.Timeseries
} | conditional_block |
sss.py | pop = startpop
minbboxarea = bbox[2]*bbox[3]
mingen = 0
# Keep track of the total bbox
maxx = bbox[2]
maxy = bbox[3]
maxpop = startpop
# Ignore ship if rule is not a 2-state rule
if not g.numstates()==2:
return (minpop, speed)
for ii in xrange(maxgen):
g.run(1)
r = g.getrect()
if not r:
# Pattern has died out and is therefore not a ship
mingen = 0
break
pop = int(g.getpop())
bboxarea = r[2]*r[3]
if pop < minpop:
# Find phase with minimimum population
minpop = pop
minbboxarea = r[2]*r[3]
mingen = ii+1
elif pop == minpop:
# Amongst phases with min pop, find one with minimum bbox area
# bboxarea = r[2]*r[3]
if bboxarea < minbboxarea:
minbboxarea = bboxarea
mingen = ii+1
# Track the bounding box of the pattern's evolution
maxx = max(maxx, r[2])
maxy = max(maxy, r[3])
maxpop = max(maxpop, pop)
if (pop == startpop and r[2:4] == bbox[2:4]):
if (giveRLE(g.getcells(r)) == testrle):
# Starting ship has reappeared
speed = (r[0]-bbox[0], r[1]-bbox[1], ii+1) # displacement and period
break
# Check for rotated pattern
elif (pop == startpop and r[2:4] == bbox[3:1:-1]):
# For 2-cell oscillators this is sufficient
if minpop == 2:
speed = (0, 0, 2*(ii+1))
mingen = ii+1
break
g.run(mingen) # Evolve ship to generation with minimum population
# return (minpop, speed)
# return (minpop, speed, maxpop)
return (minpop, speed, maxx*maxy)
# --------------------------------------------------------------------
# Return the minimum and maximum of the absolute value of a list of numbers
def minmaxofabs(v):
v = [abs(x) for x in v]
return min(v), max(v)
# Define a sign function
sign = lambda x: int(math.copysign(1, x))
# Find the canonical pattern for a sss format ship
# This is determined by orienting the ship so that it travels E, SE, or ESE,
# setting the rule to the minimal isotropic rule which supports the ship, and
# choosing a minimal bounding box phase from all phases with minimal population
# Input ship is in sss format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
# XXX Two cases where the resulting pattern is not guaranteed to be canonical:
# - asymmetrical ships travelling orthogonally or diagonally (either one of
# the two orientations in the canonical direction may be returned)
# - multiple phases having the minimal population and bounding box area
def canon5Sship(ship, maxgen=2000):
minpop, rulestr, dx, dy, period, shiprle = ship
shipPatt = g.parse(shiprle)
# Transform ship to canonical direction
if abs(dx) >= abs(dy):
a, b, c, d = sign(dx), 0, 0, sign(dy)
else:
a, b, c, d = 0, sign(dy), sign(dx), 0
dy, dx = minmaxofabs((dx, dy))
shipPatt = g.transform(shipPatt, 0, 0, a, b, c, d)
# Clear the layer and place the ship
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(shipPatt)
shiprle = giveRLE(g.getcells(g.getrect()))
g.setrule(rulestr)
# Determine the minimal isotropic rule
setminisorule(period)
return minpop, g.getrule(), dx, dy, period, shiprle
# Python function to convert a cell list to RLE
# Author: Nathaniel Johnston (nathaniel@nathanieljohnston.com), June 2009.
# DMG: Refactored slightly so that the function input is a simple cell list.
# No error checking added.
# TBD: check for multistate rule, show appropriate warning.
# AJP: Replace g.evolve(clist,0) with Python sort (faster for small patterns)
# --------------------------------------------------------------------
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def giveRLE(clist):
# clist_chunks = list (chunks (g.evolve(clist,0), 2))
clist_chunks = list(chunks(clist, 2))
clist_chunks.sort(key=lambda l:(l[1], l[0]))
mcc = min(clist_chunks)
rl_list = [[x[0]-mcc[0],x[1]-mcc[1]] for x in clist_chunks]
rle_res = ""
rle_len = 1
rl_y = rl_list[0][1] - 1
rl_x = 0
for rl_i in rl_list:
if rl_i[1] == rl_y:
if rl_i[0] == rl_x + 1:
| else: rle_strB = str (rl_i[0] - rl_x - 1)
rle_res = rle_res + rle_strA + "o" + rle_strB + "b"
rle_len = 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[1] - rl_y == 1: rle_strB = ""
else: rle_strB = str (rl_i[1] - rl_y)
if rl_i[0] == 1: rle_strC = "b"
elif rl_i[0] == 0: rle_strC = ""
else: rle_strC = str (rl_i[0]) + "b"
rle_res = rle_res + rle_strA + "o" + rle_strB + "$" + rle_strC
rle_len = 1
rl_x = rl_i[0]
rl_y = rl_i[1]
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
rle_res = rle_res[2:] + rle_strA + "o"
return rle_res+"!"
# --------------------------------------------------------------------
# Isotropic rule range functions
# Based on the rule computation scripts by Nathaniel Johnston and Peter Naszvadi
# Functions:
# - parseTransitions:
# Interpret the totalistic and isotropic rule elements as a list of isotropic transitions
# - rulestringopt:
# Cleanup a rulestring. Only used when rulestring will be displayed
# - getRuleRangeElems:
# Determines the minimum and maximum isotropic rules in which a pattern's
# evolution remains unchanged for a given number of generations.
# Returns the required and allowed isotropic rule transitions in four lists.
# Optionally compute only the minimum or the maximum rule.
# --------------------------------------------------------------------
Hensel = [
['0'],
['1c', '1e'],
['2a', '2c', '2e', '2i', '2k', '2n'],
['3a', '3c', '3e', '3i', '3j', '3k', '3n', '3q', '3r', '3y'],
['4a', '4c', '4e', '4i', '4j', '4k', '4n', '4q', '4r', '4t', '4w', '4y', '4z'],
['5a', '5c', '5e', '5i', '5j', '5k', '5n', '5q', '5r', '5y'],
['6a', '6c', '6e', '6i', '6k', '6n'],
['7c', '7e'],
['8']
]
def parseTransitions(ruleTrans):
ruleElem = []
if not ruleTrans:
return ruleElem
context = ruleTrans[0]
bNonTot = False
bNegate | rle_len += 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[0] - rl_x - 1 == 1: rle_strB = ""
| random_line_split |
sss.py | Return the minimum and maximum of the absolute value of a list of numbers
def minmaxofabs(v):
v = [abs(x) for x in v]
return min(v), max(v)
# Define a sign function
sign = lambda x: int(math.copysign(1, x))
# Find the canonical pattern for a sss format ship
# This is determined by orienting the ship so that it travels E, SE, or ESE,
# setting the rule to the minimal isotropic rule which supports the ship, and
# choosing a minimal bounding box phase from all phases with minimal population
# Input ship is in sss format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
# XXX Two cases where the resulting pattern is not guaranteed to be canonical:
# - asymmetrical ships travelling orthogonally or diagonally (either one of
# the two orientations in the canonical direction may be returned)
# - multiple phases having the minimal population and bounding box area
def canon5Sship(ship, maxgen=2000):
minpop, rulestr, dx, dy, period, shiprle = ship
shipPatt = g.parse(shiprle)
# Transform ship to canonical direction
if abs(dx) >= abs(dy):
a, b, c, d = sign(dx), 0, 0, sign(dy)
else:
a, b, c, d = 0, sign(dy), sign(dx), 0
dy, dx = minmaxofabs((dx, dy))
shipPatt = g.transform(shipPatt, 0, 0, a, b, c, d)
# Clear the layer and place the ship
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(shipPatt)
shiprle = giveRLE(g.getcells(g.getrect()))
g.setrule(rulestr)
# Determine the minimal isotropic rule
setminisorule(period)
return minpop, g.getrule(), dx, dy, period, shiprle
# Python function to convert a cell list to RLE
# Author: Nathaniel Johnston (nathaniel@nathanieljohnston.com), June 2009.
# DMG: Refactored slightly so that the function input is a simple cell list.
# No error checking added.
# TBD: check for multistate rule, show appropriate warning.
# AJP: Replace g.evolve(clist,0) with Python sort (faster for small patterns)
# --------------------------------------------------------------------
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def giveRLE(clist):
# clist_chunks = list (chunks (g.evolve(clist,0), 2))
clist_chunks = list(chunks(clist, 2))
clist_chunks.sort(key=lambda l:(l[1], l[0]))
mcc = min(clist_chunks)
rl_list = [[x[0]-mcc[0],x[1]-mcc[1]] for x in clist_chunks]
rle_res = ""
rle_len = 1
rl_y = rl_list[0][1] - 1
rl_x = 0
for rl_i in rl_list:
if rl_i[1] == rl_y:
if rl_i[0] == rl_x + 1:
rle_len += 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[0] - rl_x - 1 == 1: rle_strB = ""
else: rle_strB = str (rl_i[0] - rl_x - 1)
rle_res = rle_res + rle_strA + "o" + rle_strB + "b"
rle_len = 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[1] - rl_y == 1: rle_strB = ""
else: rle_strB = str (rl_i[1] - rl_y)
if rl_i[0] == 1: rle_strC = "b"
elif rl_i[0] == 0: rle_strC = ""
else: rle_strC = str (rl_i[0]) + "b"
rle_res = rle_res + rle_strA + "o" + rle_strB + "$" + rle_strC
rle_len = 1
rl_x = rl_i[0]
rl_y = rl_i[1]
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
rle_res = rle_res[2:] + rle_strA + "o"
return rle_res+"!"
# --------------------------------------------------------------------
# Isotropic rule range functions
# Based on the rule computation scripts by Nathaniel Johnston and Peter Naszvadi
# Functions:
# - parseTransitions:
# Interpret the totalistic and isotropic rule elements as a list of isotropic transitions
# - rulestringopt:
# Cleanup a rulestring. Only used when rulestring will be displayed
# - getRuleRangeElems:
# Determines the minimum and maximum isotropic rules in which a pattern's
# evolution remains unchanged for a given number of generations.
# Returns the required and allowed isotropic rule transitions in four lists.
# Optionally compute only the minimum or the maximum rule.
# --------------------------------------------------------------------
Hensel = [
['0'],
['1c', '1e'],
['2a', '2c', '2e', '2i', '2k', '2n'],
['3a', '3c', '3e', '3i', '3j', '3k', '3n', '3q', '3r', '3y'],
['4a', '4c', '4e', '4i', '4j', '4k', '4n', '4q', '4r', '4t', '4w', '4y', '4z'],
['5a', '5c', '5e', '5i', '5j', '5k', '5n', '5q', '5r', '5y'],
['6a', '6c', '6e', '6i', '6k', '6n'],
['7c', '7e'],
['8']
]
def parseTransitions(ruleTrans):
ruleElem = []
if not ruleTrans:
return ruleElem
context = ruleTrans[0]
bNonTot = False
bNegate = False
for ch in ruleTrans[1:] + '9':
if ch in '0123456789':
if not bNonTot:
ruleElem += Hensel[int(context)]
context = ch
bNonTot = False
bNegate = False
elif ch == '-':
bNegate = True
ruleElem += Hensel[int(context)]
else:
bNonTot = True
if bNegate:
ruleElem.remove(context + ch)
else:
ruleElem.append(context + ch)
return ruleElem
def rulestringopt(a):
result = ''
context = ''
lastnum = ''
lastcontext = ''
for i in a:
if i in 'BS':
context = i
result += i
elif i in '012345678':
if (i == lastnum) and (lastcontext == context):
pass
else:
lastcontext = context
lastnum = i
result += i
else:
result += i
result = result.replace('4aceijknqrtwyz', '4')
result = result.replace('3aceijknqry', '3')
result = result.replace('5aceijknqry', '5')
result = result.replace('2aceikn', '2')
result = result.replace('6aceikn', '6')
result = result.replace('1ce', '1')
result = result.replace('7ce', '7')
return result
def getRuleRangeElems(period, ruleRange = 'minmax'):
| if g.empty():
return
if period < 1:
return
rule = g.getrule().split(':')[0]
if not (rule[0] == 'B' and '/S' in rule):
g.exit('Please set Golly to an isotropic 2-state rule.')
# Parse rule string to list of transitions for Birth and Survival
oldrule = rule
Bstr, Sstr = rule.split('/')
Bstr = Bstr.lstrip('B')
Sstr = Sstr.lstrip('S')
b_need = parseTransitions(Bstr)
b_OK = list(b_need)
s_need = parseTransitions(Sstr)
s_OK = list(s_need)
| identifier_body | |
sss.py | pop = startpop
minbboxarea = bbox[2]*bbox[3]
mingen = 0
# Keep track of the total bbox
maxx = bbox[2]
maxy = bbox[3]
maxpop = startpop
# Ignore ship if rule is not a 2-state rule
if not g.numstates()==2:
return (minpop, speed)
for ii in xrange(maxgen):
g.run(1)
r = g.getrect()
if not r:
# Pattern has died out and is therefore not a ship
mingen = 0
break
pop = int(g.getpop())
bboxarea = r[2]*r[3]
if pop < minpop:
# Find phase with minimimum population
minpop = pop
minbboxarea = r[2]*r[3]
mingen = ii+1
elif pop == minpop:
# Amongst phases with min pop, find one with minimum bbox area
# bboxarea = r[2]*r[3]
if bboxarea < minbboxarea:
minbboxarea = bboxarea
mingen = ii+1
# Track the bounding box of the pattern's evolution
maxx = max(maxx, r[2])
maxy = max(maxy, r[3])
maxpop = max(maxpop, pop)
if (pop == startpop and r[2:4] == bbox[2:4]):
if (giveRLE(g.getcells(r)) == testrle):
# Starting ship has reappeared
speed = (r[0]-bbox[0], r[1]-bbox[1], ii+1) # displacement and period
break
# Check for rotated pattern
elif (pop == startpop and r[2:4] == bbox[3:1:-1]):
# For 2-cell oscillators this is sufficient
if minpop == 2:
speed = (0, 0, 2*(ii+1))
mingen = ii+1
break
g.run(mingen) # Evolve ship to generation with minimum population
# return (minpop, speed)
# return (minpop, speed, maxpop)
return (minpop, speed, maxx*maxy)
# --------------------------------------------------------------------
# Return the minimum and maximum of the absolute value of a list of numbers
def minmaxofabs(v):
v = [abs(x) for x in v]
return min(v), max(v)
# Define a sign function
sign = lambda x: int(math.copysign(1, x))
# Find the canonical pattern for a sss format ship
# This is determined by orienting the ship so that it travels E, SE, or ESE,
# setting the rule to the minimal isotropic rule which supports the ship, and
# choosing a minimal bounding box phase from all phases with minimal population
# Input ship is in sss format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
# XXX Two cases where the resulting pattern is not guaranteed to be canonical:
# - asymmetrical ships travelling orthogonally or diagonally (either one of
# the two orientations in the canonical direction may be returned)
# - multiple phases having the minimal population and bounding box area
def canon5Sship(ship, maxgen=2000):
minpop, rulestr, dx, dy, period, shiprle = ship
shipPatt = g.parse(shiprle)
# Transform ship to canonical direction
if abs(dx) >= abs(dy):
a, b, c, d = sign(dx), 0, 0, sign(dy)
else:
a, b, c, d = 0, sign(dy), sign(dx), 0
dy, dx = minmaxofabs((dx, dy))
shipPatt = g.transform(shipPatt, 0, 0, a, b, c, d)
# Clear the layer and place the ship
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(shipPatt)
shiprle = giveRLE(g.getcells(g.getrect()))
g.setrule(rulestr)
# Determine the minimal isotropic rule
setminisorule(period)
return minpop, g.getrule(), dx, dy, period, shiprle
# Python function to convert a cell list to RLE
# Author: Nathaniel Johnston (nathaniel@nathanieljohnston.com), June 2009.
# DMG: Refactored slightly so that the function input is a simple cell list.
# No error checking added.
# TBD: check for multistate rule, show appropriate warning.
# AJP: Replace g.evolve(clist,0) with Python sort (faster for small patterns)
# --------------------------------------------------------------------
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def giveRLE(clist):
# clist_chunks = list (chunks (g.evolve(clist,0), 2))
clist_chunks = list(chunks(clist, 2))
clist_chunks.sort(key=lambda l:(l[1], l[0]))
mcc = min(clist_chunks)
rl_list = [[x[0]-mcc[0],x[1]-mcc[1]] for x in clist_chunks]
rle_res = ""
rle_len = 1
rl_y = rl_list[0][1] - 1
rl_x = 0
for rl_i in rl_list:
if rl_i[1] == rl_y:
if rl_i[0] == rl_x + 1:
rle_len += 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[0] - rl_x - 1 == 1: rle_strB = ""
else: rle_strB = str (rl_i[0] - rl_x - 1)
rle_res = rle_res + rle_strA + "o" + rle_strB + "b"
rle_len = 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[1] - rl_y == 1: |
else: rle_strB = str (rl_i[1] - rl_y)
if rl_i[0] == 1: rle_strC = "b"
elif rl_i[0] == 0: rle_strC = ""
else: rle_strC = str (rl_i[0]) + "b"
rle_res = rle_res + rle_strA + "o" + rle_strB + "$" + rle_strC
rle_len = 1
rl_x = rl_i[0]
rl_y = rl_i[1]
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
rle_res = rle_res[2:] + rle_strA + "o"
return rle_res+"!"
# --------------------------------------------------------------------
# Isotropic rule range functions
# Based on the rule computation scripts by Nathaniel Johnston and Peter Naszvadi
# Functions:
# - parseTransitions:
# Interpret the totalistic and isotropic rule elements as a list of isotropic transitions
# - rulestringopt:
# Cleanup a rulestring. Only used when rulestring will be displayed
# - getRuleRangeElems:
# Determines the minimum and maximum isotropic rules in which a pattern's
# evolution remains unchanged for a given number of generations.
# Returns the required and allowed isotropic rule transitions in four lists.
# Optionally compute only the minimum or the maximum rule.
# --------------------------------------------------------------------
Hensel = [
['0'],
['1c', '1e'],
['2a', '2c', '2e', '2i', '2k', '2n'],
['3a', '3c', '3e', '3i', '3j', '3k', '3n', '3q', '3r', '3y'],
['4a', '4c', '4e', '4i', '4j', '4k', '4n', '4q', '4r', '4t', '4w', '4y', '4z'],
['5a', '5c', '5e', '5i', '5j', '5k', '5n', '5q', '5r', '5y'],
['6a', '6c', '6e', '6i', '6k', '6n'],
['7c', '7e'],
['8']
]
def parseTransitions(ruleTrans):
ruleElem = []
if not ruleTrans:
return ruleElem
context = ruleTrans[0]
bNonTot = False
bNegate | rle_strB = "" | conditional_block |
sss.py | pop = startpop
minbboxarea = bbox[2]*bbox[3]
mingen = 0
# Keep track of the total bbox
maxx = bbox[2]
maxy = bbox[3]
maxpop = startpop
# Ignore ship if rule is not a 2-state rule
if not g.numstates()==2:
return (minpop, speed)
for ii in xrange(maxgen):
g.run(1)
r = g.getrect()
if not r:
# Pattern has died out and is therefore not a ship
mingen = 0
break
pop = int(g.getpop())
bboxarea = r[2]*r[3]
if pop < minpop:
# Find phase with minimimum population
minpop = pop
minbboxarea = r[2]*r[3]
mingen = ii+1
elif pop == minpop:
# Amongst phases with min pop, find one with minimum bbox area
# bboxarea = r[2]*r[3]
if bboxarea < minbboxarea:
minbboxarea = bboxarea
mingen = ii+1
# Track the bounding box of the pattern's evolution
maxx = max(maxx, r[2])
maxy = max(maxy, r[3])
maxpop = max(maxpop, pop)
if (pop == startpop and r[2:4] == bbox[2:4]):
if (giveRLE(g.getcells(r)) == testrle):
# Starting ship has reappeared
speed = (r[0]-bbox[0], r[1]-bbox[1], ii+1) # displacement and period
break
# Check for rotated pattern
elif (pop == startpop and r[2:4] == bbox[3:1:-1]):
# For 2-cell oscillators this is sufficient
if minpop == 2:
speed = (0, 0, 2*(ii+1))
mingen = ii+1
break
g.run(mingen) # Evolve ship to generation with minimum population
# return (minpop, speed)
# return (minpop, speed, maxpop)
return (minpop, speed, maxx*maxy)
# --------------------------------------------------------------------
# Return the minimum and maximum of the absolute value of a list of numbers
def minmaxofabs(v):
v = [abs(x) for x in v]
return min(v), max(v)
# Define a sign function
sign = lambda x: int(math.copysign(1, x))
# Find the canonical pattern for a sss format ship
# This is determined by orienting the ship so that it travels E, SE, or ESE,
# setting the rule to the minimal isotropic rule which supports the ship, and
# choosing a minimal bounding box phase from all phases with minimal population
# Input ship is in sss format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
# XXX Two cases where the resulting pattern is not guaranteed to be canonical:
# - asymmetrical ships travelling orthogonally or diagonally (either one of
# the two orientations in the canonical direction may be returned)
# - multiple phases having the minimal population and bounding box area
def canon5Sship(ship, maxgen=2000):
minpop, rulestr, dx, dy, period, shiprle = ship
shipPatt = g.parse(shiprle)
# Transform ship to canonical direction
if abs(dx) >= abs(dy):
a, b, c, d = sign(dx), 0, 0, sign(dy)
else:
a, b, c, d = 0, sign(dy), sign(dx), 0
dy, dx = minmaxofabs((dx, dy))
shipPatt = g.transform(shipPatt, 0, 0, a, b, c, d)
# Clear the layer and place the ship
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(shipPatt)
shiprle = giveRLE(g.getcells(g.getrect()))
g.setrule(rulestr)
# Determine the minimal isotropic rule
setminisorule(period)
return minpop, g.getrule(), dx, dy, period, shiprle
# Python function to convert a cell list to RLE
# Author: Nathaniel Johnston (nathaniel@nathanieljohnston.com), June 2009.
# DMG: Refactored slightly so that the function input is a simple cell list.
# No error checking added.
# TBD: check for multistate rule, show appropriate warning.
# AJP: Replace g.evolve(clist,0) with Python sort (faster for small patterns)
# --------------------------------------------------------------------
def | (l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def giveRLE(clist):
# clist_chunks = list (chunks (g.evolve(clist,0), 2))
clist_chunks = list(chunks(clist, 2))
clist_chunks.sort(key=lambda l:(l[1], l[0]))
mcc = min(clist_chunks)
rl_list = [[x[0]-mcc[0],x[1]-mcc[1]] for x in clist_chunks]
rle_res = ""
rle_len = 1
rl_y = rl_list[0][1] - 1
rl_x = 0
for rl_i in rl_list:
if rl_i[1] == rl_y:
if rl_i[0] == rl_x + 1:
rle_len += 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[0] - rl_x - 1 == 1: rle_strB = ""
else: rle_strB = str (rl_i[0] - rl_x - 1)
rle_res = rle_res + rle_strA + "o" + rle_strB + "b"
rle_len = 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[1] - rl_y == 1: rle_strB = ""
else: rle_strB = str (rl_i[1] - rl_y)
if rl_i[0] == 1: rle_strC = "b"
elif rl_i[0] == 0: rle_strC = ""
else: rle_strC = str (rl_i[0]) + "b"
rle_res = rle_res + rle_strA + "o" + rle_strB + "$" + rle_strC
rle_len = 1
rl_x = rl_i[0]
rl_y = rl_i[1]
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
rle_res = rle_res[2:] + rle_strA + "o"
return rle_res+"!"
# --------------------------------------------------------------------
# Isotropic rule range functions
# Based on the rule computation scripts by Nathaniel Johnston and Peter Naszvadi
# Functions:
# - parseTransitions:
# Interpret the totalistic and isotropic rule elements as a list of isotropic transitions
# - rulestringopt:
# Cleanup a rulestring. Only used when rulestring will be displayed
# - getRuleRangeElems:
# Determines the minimum and maximum isotropic rules in which a pattern's
# evolution remains unchanged for a given number of generations.
# Returns the required and allowed isotropic rule transitions in four lists.
# Optionally compute only the minimum or the maximum rule.
# --------------------------------------------------------------------
Hensel = [
['0'],
['1c', '1e'],
['2a', '2c', '2e', '2i', '2k', '2n'],
['3a', '3c', '3e', '3i', '3j', '3k', '3n', '3q', '3r', '3y'],
['4a', '4c', '4e', '4i', '4j', '4k', '4n', '4q', '4r', '4t', '4w', '4y', '4z'],
['5a', '5c', '5e', '5i', '5j', '5k', '5n', '5q', '5r', '5y'],
['6a', '6c', '6e', '6i', '6k', '6n'],
['7c', '7e'],
['8']
]
def parseTransitions(ruleTrans):
ruleElem = []
if not ruleTrans:
return ruleElem
context = ruleTrans[0]
bNonTot = False
bNegate = | chunks | identifier_name |
limit.rs | }
fn record_demand(&mut self, buf: &[u8]) {
self.buf.extend_from_slice(buf);
}
fn add_demand_cap(&mut self, more: usize) {
self.buf.reserve(more + self.get_demand_remaining());
}
fn take_allowance(&mut self, taken: usize) {
if taken > self.allowance {
panic!("taken > allowance");
}
self.allowance -= taken;
self.last_used += taken;
}
fn consume_read(&mut self, buf: &mut [u8]) -> usize {
let to_drain = cmp::min(buf.len(), self.allowance);
self.buf.copy_to_slice(&mut buf[..to_drain]);
self.buf.reserve(to_drain);
self.take_allowance(to_drain);
to_drain
}
fn consume_write<F, E>(&mut self, sz: usize, mut write: F) -> (usize, Option<E>)
where F: FnMut (&[u8]) -> Result<usize, E> {
let mut used = 0;
let mut err = None;
let to_drain = cmp::min(self.buf.len(), sz);
match write(&self.buf[..to_drain]) {
Ok(n) => used += n,
Err(e) => err = Some(e),
}
self.buf.advance(used);
self.add_demand_cap(used);
self.take_allowance(used);
(used, err)
}
}
fn unwrap_err_or<T, E>(r: Result<T, E>, de: E) -> E {
match r {
Ok(_) => de,
Err(e) => e,
}
}
#[derive(Debug, PartialEq, Eq)]
enum Status {
SOpen,
SOk, // eof
SErr
}
/** Rate-limited asynchronous analogue of `std::io::BufReader` + `std::io::BufWriter`.
You **must** call `flush()` before dropping this (which closes the stream).
This is even more important than doing so on `BufWriter` - if not, you may lose
data. See https://internals.rust-lang.org/t/asynchronous-destructors/11127/49
for an in-depth explanation.
*/
#[derive(Debug)]
pub struct RateLimited<T> where T: ?Sized {
rstatus: Status,
pub(crate) rbuf: RLBuf,
wstatus: Status,
pub(crate) wbuf: RLBuf,
pub(crate) inner: T,
}
impl<T> RateLimited<T> {
/** Create a new `RateLimited` with the given initial capacity.
The inner stream must already be in non-blocking mode.
*/
pub fn new_lb(inner: T, init: usize) -> RateLimited<T> {
RateLimited {
inner: inner,
rstatus: SOpen,
rbuf: RLBuf::new_lb(init),
wstatus: SOpen,
wbuf: RLBuf::new_lb(init),
}
}
}
impl<T> RateLimited<T> where T: RorW + ?Sized {
/** Do a pre-read.
That is, do a non-blocking read from the underlying handle, filling up the
remaining part of `rbuf`.
This is to be used by higher-level code, before it performs the rate-limiting.
*/
pub fn pre_read(&mut self) {
match self.rstatus {
SOpen => {
let remain = self.rbuf.get_demand_remaining();
if remain == 0 {
return;
}
// TODO: replace with https://github.com/rust-lang/rfcs/pull/2930
let mut buf: &mut [u8] = unsafe { std::mem::transmute(self.rbuf.buf.bytes_mut()) };
match self.inner.read(&mut buf) { // TODO: assert non-blocking
Ok(0) => {
self.rstatus = SOk;
},
Ok(n) => {
unsafe {
self.rbuf.buf.advance_mut(n);
}
if n >= remain {
// TODO: automatically grow the buffer capacity
log::debug!("rbuf pre_read filled buffer");
}
},
Err(e) => match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
// println!("pre_read: {:?}", e);
self.rstatus = SErr;
}
},
}
},
_ => (), // already finished
}
}
pub fn is_readable(&self) -> bool {
self.rstatus != SOpen || self.rbuf.allowance > 0
}
/** Do a post-write.
That is, do a non-blocking write to the underlying handle, up to the current
allowance of `wbuf`.
This is to be used by higher-level code, after it performs the rate-limiting.
*/
pub fn post_write(&mut self) {
self.post_write_exact(self.wbuf.allowance);
}
pub fn is_writable(&self) -> bool {
self.wstatus != SOpen || self.wbuf.get_demand_remaining() > 0
}
// extra param is exposed for testing only
fn post_write_exact(&mut self, sz: usize) -> Option<io::Error> {
match self.wbuf.get_demand() {
0 => None,
_ => match self.wbuf.allowance {
0 => None,
_ => {
let w = &mut self.inner;
let (_, err) = self.wbuf.consume_write(sz, |b| w.write(b));
if let Some(e) = err.as_ref() {
match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
self.wstatus = SErr;
},
}
}
err
}
}
}
}
}
impl<T> Read for RateLimited<T> where T: Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rbuf.get_demand() {
0 => match self.rstatus {
SOpen => Err(io::Error::new(ErrorKind::WouldBlock, "")),
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.read(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
},
_ => match self.rbuf.allowance {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => Ok(self.rbuf.consume_read(buf)),
}
}
}
}
impl<T> Write for RateLimited<T> where T: Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> |
fn flush(&mut self) -> io::Result<()> {
match self.wstatus {
SErr =>
// if there was an error, wbuf might not have been consumed, so output error even if wbuf is non-empty
Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
_ => match self.wbuf.get_demand() {
0 => {
//println!("flush OK");
Ok(())
},
_ => {
//println!("flush waiting :( {} {}", self.wbuf.get_demand(), self.wbuf.allowance);
Err(io::Error::new(ErrorKind::WouldBlock, ""))
}, // something else is responsible for calling post_write
}
}
}
}
#[derive(Debug)]
pub struct UsageStats {
samples: VecDeque<(usize, usize)>,
max_samples: usize,
current_usage: (usize, usize), // (waste, used)
}
impl UsageStats {
pub fn new() -> UsageStats {
UsageStats {
samples: VecDeque::new(),
max_samples: 4096, // TODO: make configurable
current_usage: (0, 0),
}
}
pub fn add_current_usage(&mut self, usage: (usize, usize)) {
self.current_usage.0 += usage.0;
self.current_usage.1 += usage.1;
}
pub fn finalise_current_usage(&mut self) -> (usize, usize) {
while self.samples.len() >= self.max_samples {
self.samples.pop_front();
}
let usage = self.current_usage;
self.samples.push_back(usage);
self.current_usage = (0, 0);
usage
}
pub fn estimate_next_usage(&mut self) -> usize {
// TODO: something smarter
// TODO: do something with the waste, e.g. to give more allowance
self.samples.back().unwrap().1
}
}
pub fn derive_allowance<K>(demand: HashMap<K, usize>) -> HashMap<K, usize> {
| {
match self.wstatus {
SOpen => {
// TODO: figure out when it's appropriate to automatically grow the buffer capacity
let remain = self.wbuf.get_demand_remaining();
match remain {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => {
let n = cmp::min(buf.len(), remain);
self.wbuf.record_demand(&buf[..n]);
Ok(n)
}
}
},
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
}
} | identifier_body |
limit.rs | }
fn record_demand(&mut self, buf: &[u8]) {
self.buf.extend_from_slice(buf);
}
fn add_demand_cap(&mut self, more: usize) {
self.buf.reserve(more + self.get_demand_remaining());
}
fn take_allowance(&mut self, taken: usize) {
if taken > self.allowance {
panic!("taken > allowance");
}
self.allowance -= taken;
self.last_used += taken;
}
fn consume_read(&mut self, buf: &mut [u8]) -> usize {
let to_drain = cmp::min(buf.len(), self.allowance);
self.buf.copy_to_slice(&mut buf[..to_drain]);
self.buf.reserve(to_drain);
self.take_allowance(to_drain);
to_drain
}
fn consume_write<F, E>(&mut self, sz: usize, mut write: F) -> (usize, Option<E>)
where F: FnMut (&[u8]) -> Result<usize, E> {
let mut used = 0;
let mut err = None;
let to_drain = cmp::min(self.buf.len(), sz);
match write(&self.buf[..to_drain]) {
Ok(n) => used += n,
Err(e) => err = Some(e),
}
self.buf.advance(used);
self.add_demand_cap(used);
self.take_allowance(used);
(used, err)
}
}
fn unwrap_err_or<T, E>(r: Result<T, E>, de: E) -> E {
match r {
Ok(_) => de,
Err(e) => e,
}
}
#[derive(Debug, PartialEq, Eq)]
enum Status {
SOpen,
SOk, // eof
SErr
}
/** Rate-limited asynchronous analogue of `std::io::BufReader` + `std::io::BufWriter`.
You **must** call `flush()` before dropping this (which closes the stream).
This is even more important than doing so on `BufWriter` - if not, you may lose
data. See https://internals.rust-lang.org/t/asynchronous-destructors/11127/49
for an in-depth explanation.
*/
#[derive(Debug)]
pub struct RateLimited<T> where T: ?Sized {
rstatus: Status,
pub(crate) rbuf: RLBuf,
wstatus: Status,
pub(crate) wbuf: RLBuf, | pub(crate) inner: T,
}
impl<T> RateLimited<T> {
/** Create a new `RateLimited` with the given initial capacity.
The inner stream must already be in non-blocking mode.
*/
pub fn new_lb(inner: T, init: usize) -> RateLimited<T> {
RateLimited {
inner: inner,
rstatus: SOpen,
rbuf: RLBuf::new_lb(init),
wstatus: SOpen,
wbuf: RLBuf::new_lb(init),
}
}
}
impl<T> RateLimited<T> where T: RorW + ?Sized {
/** Do a pre-read.
That is, do a non-blocking read from the underlying handle, filling up the
remaining part of `rbuf`.
This is to be used by higher-level code, before it performs the rate-limiting.
*/
pub fn pre_read(&mut self) {
match self.rstatus {
SOpen => {
let remain = self.rbuf.get_demand_remaining();
if remain == 0 {
return;
}
// TODO: replace with https://github.com/rust-lang/rfcs/pull/2930
let mut buf: &mut [u8] = unsafe { std::mem::transmute(self.rbuf.buf.bytes_mut()) };
match self.inner.read(&mut buf) { // TODO: assert non-blocking
Ok(0) => {
self.rstatus = SOk;
},
Ok(n) => {
unsafe {
self.rbuf.buf.advance_mut(n);
}
if n >= remain {
// TODO: automatically grow the buffer capacity
log::debug!("rbuf pre_read filled buffer");
}
},
Err(e) => match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
// println!("pre_read: {:?}", e);
self.rstatus = SErr;
}
},
}
},
_ => (), // already finished
}
}
pub fn is_readable(&self) -> bool {
self.rstatus != SOpen || self.rbuf.allowance > 0
}
/** Do a post-write.
That is, do a non-blocking write to the underlying handle, up to the current
allowance of `wbuf`.
This is to be used by higher-level code, after it performs the rate-limiting.
*/
pub fn post_write(&mut self) {
self.post_write_exact(self.wbuf.allowance);
}
pub fn is_writable(&self) -> bool {
self.wstatus != SOpen || self.wbuf.get_demand_remaining() > 0
}
// extra param is exposed for testing only
fn post_write_exact(&mut self, sz: usize) -> Option<io::Error> {
match self.wbuf.get_demand() {
0 => None,
_ => match self.wbuf.allowance {
0 => None,
_ => {
let w = &mut self.inner;
let (_, err) = self.wbuf.consume_write(sz, |b| w.write(b));
if let Some(e) = err.as_ref() {
match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
self.wstatus = SErr;
},
}
}
err
}
}
}
}
}
impl<T> Read for RateLimited<T> where T: Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rbuf.get_demand() {
0 => match self.rstatus {
SOpen => Err(io::Error::new(ErrorKind::WouldBlock, "")),
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.read(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
},
_ => match self.rbuf.allowance {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => Ok(self.rbuf.consume_read(buf)),
}
}
}
}
impl<T> Write for RateLimited<T> where T: Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self.wstatus {
SOpen => {
// TODO: figure out when it's appropriate to automatically grow the buffer capacity
let remain = self.wbuf.get_demand_remaining();
match remain {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => {
let n = cmp::min(buf.len(), remain);
self.wbuf.record_demand(&buf[..n]);
Ok(n)
}
}
},
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
}
}
fn flush(&mut self) -> io::Result<()> {
match self.wstatus {
SErr =>
// if there was an error, wbuf might not have been consumed, so output error even if wbuf is non-empty
Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
_ => match self.wbuf.get_demand() {
0 => {
//println!("flush OK");
Ok(())
},
_ => {
//println!("flush waiting :( {} {}", self.wbuf.get_demand(), self.wbuf.allowance);
Err(io::Error::new(ErrorKind::WouldBlock, ""))
}, // something else is responsible for calling post_write
}
}
}
}
#[derive(Debug)]
pub struct UsageStats {
samples: VecDeque<(usize, usize)>,
max_samples: usize,
current_usage: (usize, usize), // (waste, used)
}
impl UsageStats {
pub fn new() -> UsageStats {
UsageStats {
samples: VecDeque::new(),
max_samples: 4096, // TODO: make configurable
current_usage: (0, 0),
}
}
pub fn add_current_usage(&mut self, usage: (usize, usize)) {
self.current_usage.0 += usage.0;
self.current_usage.1 += usage.1;
}
pub fn finalise_current_usage(&mut self) -> (usize, usize) {
while self.samples.len() >= self.max_samples {
self.samples.pop_front();
}
let usage = self.current_usage;
self.samples.push_back(usage);
self.current_usage = (0, 0);
usage
}
pub fn estimate_next_usage(&mut self) -> usize {
// TODO: something smarter
// TODO: do something with the waste, e.g. to give more allowance
self.samples.back().unwrap().1
}
}
pub fn derive_allowance<K>(demand: HashMap<K, usize>) -> HashMap<K, usize> {
// TODO | random_line_split | |
limit.rs | }
fn record_demand(&mut self, buf: &[u8]) {
self.buf.extend_from_slice(buf);
}
fn add_demand_cap(&mut self, more: usize) {
self.buf.reserve(more + self.get_demand_remaining());
}
fn take_allowance(&mut self, taken: usize) {
if taken > self.allowance {
panic!("taken > allowance");
}
self.allowance -= taken;
self.last_used += taken;
}
fn | (&mut self, buf: &mut [u8]) -> usize {
let to_drain = cmp::min(buf.len(), self.allowance);
self.buf.copy_to_slice(&mut buf[..to_drain]);
self.buf.reserve(to_drain);
self.take_allowance(to_drain);
to_drain
}
fn consume_write<F, E>(&mut self, sz: usize, mut write: F) -> (usize, Option<E>)
where F: FnMut (&[u8]) -> Result<usize, E> {
let mut used = 0;
let mut err = None;
let to_drain = cmp::min(self.buf.len(), sz);
match write(&self.buf[..to_drain]) {
Ok(n) => used += n,
Err(e) => err = Some(e),
}
self.buf.advance(used);
self.add_demand_cap(used);
self.take_allowance(used);
(used, err)
}
}
fn unwrap_err_or<T, E>(r: Result<T, E>, de: E) -> E {
match r {
Ok(_) => de,
Err(e) => e,
}
}
#[derive(Debug, PartialEq, Eq)]
enum Status {
SOpen,
SOk, // eof
SErr
}
/** Rate-limited asynchronous analogue of `std::io::BufReader` + `std::io::BufWriter`.
You **must** call `flush()` before dropping this (which closes the stream).
This is even more important than doing so on `BufWriter` - if not, you may lose
data. See https://internals.rust-lang.org/t/asynchronous-destructors/11127/49
for an in-depth explanation.
*/
#[derive(Debug)]
pub struct RateLimited<T> where T: ?Sized {
rstatus: Status,
pub(crate) rbuf: RLBuf,
wstatus: Status,
pub(crate) wbuf: RLBuf,
pub(crate) inner: T,
}
impl<T> RateLimited<T> {
/** Create a new `RateLimited` with the given initial capacity.
The inner stream must already be in non-blocking mode.
*/
pub fn new_lb(inner: T, init: usize) -> RateLimited<T> {
RateLimited {
inner: inner,
rstatus: SOpen,
rbuf: RLBuf::new_lb(init),
wstatus: SOpen,
wbuf: RLBuf::new_lb(init),
}
}
}
impl<T> RateLimited<T> where T: RorW + ?Sized {
/** Do a pre-read.
That is, do a non-blocking read from the underlying handle, filling up the
remaining part of `rbuf`.
This is to be used by higher-level code, before it performs the rate-limiting.
*/
pub fn pre_read(&mut self) {
match self.rstatus {
SOpen => {
let remain = self.rbuf.get_demand_remaining();
if remain == 0 {
return;
}
// TODO: replace with https://github.com/rust-lang/rfcs/pull/2930
let mut buf: &mut [u8] = unsafe { std::mem::transmute(self.rbuf.buf.bytes_mut()) };
match self.inner.read(&mut buf) { // TODO: assert non-blocking
Ok(0) => {
self.rstatus = SOk;
},
Ok(n) => {
unsafe {
self.rbuf.buf.advance_mut(n);
}
if n >= remain {
// TODO: automatically grow the buffer capacity
log::debug!("rbuf pre_read filled buffer");
}
},
Err(e) => match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
// println!("pre_read: {:?}", e);
self.rstatus = SErr;
}
},
}
},
_ => (), // already finished
}
}
pub fn is_readable(&self) -> bool {
self.rstatus != SOpen || self.rbuf.allowance > 0
}
/** Do a post-write.
That is, do a non-blocking write to the underlying handle, up to the current
allowance of `wbuf`.
This is to be used by higher-level code, after it performs the rate-limiting.
*/
pub fn post_write(&mut self) {
self.post_write_exact(self.wbuf.allowance);
}
pub fn is_writable(&self) -> bool {
self.wstatus != SOpen || self.wbuf.get_demand_remaining() > 0
}
// extra param is exposed for testing only
fn post_write_exact(&mut self, sz: usize) -> Option<io::Error> {
match self.wbuf.get_demand() {
0 => None,
_ => match self.wbuf.allowance {
0 => None,
_ => {
let w = &mut self.inner;
let (_, err) = self.wbuf.consume_write(sz, |b| w.write(b));
if let Some(e) = err.as_ref() {
match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
self.wstatus = SErr;
},
}
}
err
}
}
}
}
}
impl<T> Read for RateLimited<T> where T: Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rbuf.get_demand() {
0 => match self.rstatus {
SOpen => Err(io::Error::new(ErrorKind::WouldBlock, "")),
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.read(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
},
_ => match self.rbuf.allowance {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => Ok(self.rbuf.consume_read(buf)),
}
}
}
}
impl<T> Write for RateLimited<T> where T: Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self.wstatus {
SOpen => {
// TODO: figure out when it's appropriate to automatically grow the buffer capacity
let remain = self.wbuf.get_demand_remaining();
match remain {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => {
let n = cmp::min(buf.len(), remain);
self.wbuf.record_demand(&buf[..n]);
Ok(n)
}
}
},
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
}
}
fn flush(&mut self) -> io::Result<()> {
match self.wstatus {
SErr =>
// if there was an error, wbuf might not have been consumed, so output error even if wbuf is non-empty
Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
_ => match self.wbuf.get_demand() {
0 => {
//println!("flush OK");
Ok(())
},
_ => {
//println!("flush waiting :( {} {}", self.wbuf.get_demand(), self.wbuf.allowance);
Err(io::Error::new(ErrorKind::WouldBlock, ""))
}, // something else is responsible for calling post_write
}
}
}
}
#[derive(Debug)]
pub struct UsageStats {
samples: VecDeque<(usize, usize)>,
max_samples: usize,
current_usage: (usize, usize), // (waste, used)
}
impl UsageStats {
pub fn new() -> UsageStats {
UsageStats {
samples: VecDeque::new(),
max_samples: 4096, // TODO: make configurable
current_usage: (0, 0),
}
}
pub fn add_current_usage(&mut self, usage: (usize, usize)) {
self.current_usage.0 += usage.0;
self.current_usage.1 += usage.1;
}
pub fn finalise_current_usage(&mut self) -> (usize, usize) {
while self.samples.len() >= self.max_samples {
self.samples.pop_front();
}
let usage = self.current_usage;
self.samples.push_back(usage);
self.current_usage = (0, 0);
usage
}
pub fn estimate_next_usage(&mut self) -> usize {
// TODO: something smarter
// TODO: do something with the waste, e.g. to give more allowance
self.samples.back().unwrap().1
}
}
pub fn derive_allowance<K>(demand: HashMap<K, usize>) -> HashMap<K, usize> {
// | consume_read | identifier_name |
de.rs | , str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}] ? Ordering not clear.
if next_rest != "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
Deserializer::with_map(x).deserialize_map(visitor)
} else {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
self.deserialize_map(visitor)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
| // visitor.visit_seq(self)
if let Level::Sequence(x) = self.0 {
SeqDeserializer::new(x.into_iter()).deserialize(visitor)
} else {
Err(de::Error::custom("value does not appear to be a sequence"))
}
}
| identifier_body | |
de.rs | able<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}] ? Ordering not clear.
if next_rest != "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
Deserializer::with_map(x).deserialize_map(visitor)
} else {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor | {
self.deserialize_map(visitor)
} | random_line_split | |
de.rs | a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_str<T: de::Deserialize>(input: &str) -> Result<T, Error> {
from_bytes(input.as_bytes())
}
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where T: de::Deserialize, R: Read
{
let mut buf = vec![];
reader.read_to_end(&mut buf)
.map_err(|e| {
de::Error::custom(format_args!("could not read input: {}", e))
})?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}] ? Ordering not clear.
if next_rest != "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn de | >(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
| serialize_struct<V | identifier_name |
de.rs | /// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}] ? Ordering not clear.
if next_rest != "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
| Deserializer::with_map(x).deserialize_map(visitor)
} e | conditional_block | |
JsPsych.js |
}, [stimuliUrls]);
useEffect(() => {
if (ready) {
setTimeout(() => {
// Implementation of previous team timeline & trial logic -------------------------- START
/* create timeline */
const timeline = [];
/* number of trials */
// NOTE: Adjust line below to shorten the number of trials. 199 will go through all 200 iterations.
// NUMBER_OF_TRIALS = 199, means a total of 200 trials (0-indexed)
const NUMBER_OF_TRIALS = 199;
const exampleImageOne =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample1.png?alt=media&token=8a6ee16a-0700-40ef-a7cf-5c3e380b5b3f';
const exampleImageTwo =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample2.png?alt=media&token=fb0a7373-add3-4187-9449-2f206fd08ae3';
/* define instructions */
const instructions = {
type: 'html-keyboard-response',
stimulus: function () {
return (
'<h1>Instruction</h1>' +
'<p>In this experiment, two images will be shown ' +
'on the screen. Choose the image that looks more like you. </p>' +
'<p>Press the letter <strong>E</strong> on the keyboard to select the image on the left.</p>' +
'<p>Press the letter <strong>I</strong> on the keyboard to select the image on the right.</p> ' +
'<p></p>' +
"<div style='width: 900px; margin: auto;'>" +
"<div style='float: left;'><img width='300' src='" +
exampleImageOne +
"' alt='Error loading example 1'/>" +
"<p class='small'><strong>Press the E key</strong></p></div>" +
"<div class='float: right;'><img width='300' src='" +
exampleImageTwo +
"' alt='Error loading example 2'/>" +
"<p class='small'><strong>Press the I key</strong></p></div>" +
'</div>' +
'<p></p>' +
'<p><strong>Press any key to begin.</strong></p>'
);
},
};
timeline.push(instructions);
// Preload images for trials
var preload = {
type: 'preload',
images: stimuliUrls,
};
timeline.push(preload);
/* generate trials with number of trials */
function generateTrials(numberOfTrial) {
const trials = [];
for (let i = 0; i <= numberOfTrial; i++) {
const invFilePath = stimuliUrls[i];
const oriFilePath = stimuliUrls[i + 1];
const twoStimulusHtml =
// For the first 200 images that are rendered, show original on left & show inverted on right
i <= numberOfTrial / 2
? "<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
'</div>'
: // For the last 200 images that are rendered, show inverted on left & show original on right
"<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
'</div>';
const newStimuli = {
stimulus: twoStimulusHtml,
data: { label: 'trial', trial_num: i },
};
trials.push(newStimuli);
}
return trials;
}
const fixation = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:red; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 1000, // From 1500
data: { label: 'fixation' },
};
const trial = {
type: 'html-keyboard-response',
stimulus: jsPsych.timelineVariable('stimulus'),
choices: ['e', 'i'],
data: jsPsych.timelineVariable('data'),
trial_duration: 1000, // 1000,
post_trial_gap: 0,
on_finish: function (data) {
if (data.response === 'e') {
data.selection = 'left';
} else if (data.response === 'i') {
data.selection = 'right';
} else {
data.selection = 'none';
}
},
};
const postTrialPause = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:blue; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 250, // From 1500
data: { label: 'post-fixation' },
};
// Transforms the experimental data from JsPsych to follow the back end JSON scheme
function transformExperimentData() {
let trialSelections = jsPsych.data
.get()
.filter({ label: 'trial' })
.select('selection').values;
let newData = [];
let columnHeaders = {
stimulus: 'trial number',
response:
'trial response is whether or not the user chose the original image; 1 = correct, -1 = incorrect',
trait: 'untrustworthy by default',
subject:
'trial subject is the placement of original image; 1 = left, 2 = right',
};
newData.push(columnHeaders);
for (
let trialNumber = 0;
trialNumber < trialSelections.length;
trialNumber++
) {
let trialResponse;
let trialSubject;
// If the user doesn't make a selection, we are counting it as '-1'; an untrustworthy trial.
if (trialNumber <= NUMBER_OF_TRIALS / 2) {
// For the first half trials, original image on left.
trialResponse = trialSelections[trialNumber] === 'left' ? 1 : -1;
trialSubject = 1;
} else {
// For the second half trials, original image on right.
trialResponse = trialSelections[trialNumber] === 'right' ? 1 : -1;
trialSubject = 2;
}
let trialRow = {
stimulus: trialNumber + 1,
response: trialResponse,
trait: 'untrustworthy',
subject: trialSubject,
};
newData.push(trialRow);
}
return newData;
}
// Call backend api storeExperimentResult to connect with FireBase and update Users Collection with experiment data.
function saveExperimentData(experimentData) {
uploadSelectionResult(participantId, experimentId, experimentData);
selectionTaskCompletionHandler(true);
}
const trialProcedure = {
timeline: [fixation, trial, postTrialPause],
timeline_variables: generateTrials(NUMBER_OF_TRIALS),
randomize_order: false,
repetitions: 1,
};
timeline.push(trialProcedure);
const reportBlock = {
type: 'html-keyboard-response',
stimulus: function () {
const trials = jsPsych.data.get().filter({ label: 'trial' });
const trialCount = trials.count();
const | {
setReady(true);
} | conditional_block | |
JsPsych.js | (true);
}
}, [stimuliUrls]);
useEffect(() => {
if (ready) {
setTimeout(() => {
// Implementation of previous team timeline & trial logic -------------------------- START
/* create timeline */
const timeline = [];
/* number of trials */
// NOTE: Adjust line below to shorten the number of trials. 199 will go through all 200 iterations.
// NUMBER_OF_TRIALS = 199, means a total of 200 trials (0-indexed)
const NUMBER_OF_TRIALS = 199;
const exampleImageOne =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample1.png?alt=media&token=8a6ee16a-0700-40ef-a7cf-5c3e380b5b3f';
const exampleImageTwo =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample2.png?alt=media&token=fb0a7373-add3-4187-9449-2f206fd08ae3';
/* define instructions */
const instructions = {
type: 'html-keyboard-response',
stimulus: function () {
return (
'<h1>Instruction</h1>' +
'<p>In this experiment, two images will be shown ' +
'on the screen. Choose the image that looks more like you. </p>' +
'<p>Press the letter <strong>E</strong> on the keyboard to select the image on the left.</p>' +
'<p>Press the letter <strong>I</strong> on the keyboard to select the image on the right.</p> ' +
'<p></p>' +
"<div style='width: 900px; margin: auto;'>" +
"<div style='float: left;'><img width='300' src='" +
exampleImageOne +
"' alt='Error loading example 1'/>" +
"<p class='small'><strong>Press the E key</strong></p></div>" +
"<div class='float: right;'><img width='300' src='" +
exampleImageTwo +
"' alt='Error loading example 2'/>" +
"<p class='small'><strong>Press the I key</strong></p></div>" +
'</div>' +
'<p></p>' +
'<p><strong>Press any key to begin.</strong></p>'
);
},
};
timeline.push(instructions);
// Preload images for trials
var preload = {
type: 'preload',
images: stimuliUrls,
};
timeline.push(preload);
/* generate trials with number of trials */
function generateTrials(numberOfTrial) | '</div>'
: // For the last 200 images that are rendered, show inverted on left & show original on right
"<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
'</div>';
const newStimuli = {
stimulus: twoStimulusHtml,
data: { label: 'trial', trial_num: i },
};
trials.push(newStimuli);
}
return trials;
}
const fixation = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:red; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 1000, // From 1500
data: { label: 'fixation' },
};
const trial = {
type: 'html-keyboard-response',
stimulus: jsPsych.timelineVariable('stimulus'),
choices: ['e', 'i'],
data: jsPsych.timelineVariable('data'),
trial_duration: 1000, // 1000,
post_trial_gap: 0,
on_finish: function (data) {
if (data.response === 'e') {
data.selection = 'left';
} else if (data.response === 'i') {
data.selection = 'right';
} else {
data.selection = 'none';
}
},
};
const postTrialPause = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:blue; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 250, // From 1500
data: { label: 'post-fixation' },
};
// Transforms the experimental data from JsPsych to follow the back end JSON scheme
function transformExperimentData() {
let trialSelections = jsPsych.data
.get()
.filter({ label: 'trial' })
.select('selection').values;
let newData = [];
let columnHeaders = {
stimulus: 'trial number',
response:
'trial response is whether or not the user chose the original image; 1 = correct, -1 = incorrect',
trait: 'untrustworthy by default',
subject:
'trial subject is the placement of original image; 1 = left, 2 = right',
};
newData.push(columnHeaders);
for (
let trialNumber = 0;
trialNumber < trialSelections.length;
trialNumber++
) {
let trialResponse;
let trialSubject;
// If the user doesn't make a selection, we are counting it as '-1'; an untrustworthy trial.
if (trialNumber <= NUMBER_OF_TRIALS / 2) {
// For the first half trials, original image on left.
trialResponse = trialSelections[trialNumber] === 'left' ? 1 : -1;
trialSubject = 1;
} else {
// For the second half trials, original image on right.
trialResponse = trialSelections[trialNumber] === 'right' ? 1 : -1;
trialSubject = 2;
}
let trialRow = {
stimulus: trialNumber + 1,
response: trialResponse,
trait: 'untrustworthy',
subject: trialSubject,
};
newData.push(trialRow);
}
return newData;
}
// Call backend api storeExperimentResult to connect with FireBase and update Users Collection with experiment data.
function saveExperimentData(experimentData) {
uploadSelectionResult(participantId, experimentId, experimentData);
selectionTaskCompletionHandler(true);
}
const trialProcedure = {
timeline: [fixation, trial, postTrialPause],
timeline_variables: generateTrials(NUMBER_OF_TRIALS),
randomize_order: false,
repetitions: 1,
};
timeline.push(trialProcedure);
const reportBlock = {
type: 'html-keyboard-response',
stimulus: function () {
const trials = jsPsych.data.get().filter({ label: 'trial' });
const trialCount = trials.count();
const leftTrials = | {
const trials = [];
for (let i = 0; i <= numberOfTrial; i++) {
const invFilePath = stimuliUrls[i];
const oriFilePath = stimuliUrls[i + 1];
const twoStimulusHtml =
// For the first 200 images that are rendered, show original on left & show inverted on right
i <= numberOfTrial / 2
? "<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' + | identifier_body |
JsPsych.js | (true);
}
}, [stimuliUrls]);
useEffect(() => {
if (ready) {
setTimeout(() => {
// Implementation of previous team timeline & trial logic -------------------------- START
/* create timeline */
const timeline = [];
/* number of trials */
// NOTE: Adjust line below to shorten the number of trials. 199 will go through all 200 iterations.
// NUMBER_OF_TRIALS = 199, means a total of 200 trials (0-indexed)
const NUMBER_OF_TRIALS = 199;
const exampleImageOne =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample1.png?alt=media&token=8a6ee16a-0700-40ef-a7cf-5c3e380b5b3f';
const exampleImageTwo =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample2.png?alt=media&token=fb0a7373-add3-4187-9449-2f206fd08ae3';
/* define instructions */
const instructions = {
type: 'html-keyboard-response',
stimulus: function () {
return (
'<h1>Instruction</h1>' +
'<p>In this experiment, two images will be shown ' +
'on the screen. Choose the image that looks more like you. </p>' +
'<p>Press the letter <strong>E</strong> on the keyboard to select the image on the left.</p>' +
'<p>Press the letter <strong>I</strong> on the keyboard to select the image on the right.</p> ' +
'<p></p>' +
"<div style='width: 900px; margin: auto;'>" +
"<div style='float: left;'><img width='300' src='" +
exampleImageOne +
"' alt='Error loading example 1'/>" +
"<p class='small'><strong>Press the E key</strong></p></div>" +
"<div class='float: right;'><img width='300' src='" +
exampleImageTwo +
"' alt='Error loading example 2'/>" +
"<p class='small'><strong>Press the I key</strong></p></div>" +
'</div>' +
'<p></p>' +
'<p><strong>Press any key to begin.</strong></p>'
);
},
};
timeline.push(instructions);
// Preload images for trials
var preload = {
type: 'preload',
images: stimuliUrls,
};
timeline.push(preload);
/* generate trials with number of trials */
function | (numberOfTrial) {
const trials = [];
for (let i = 0; i <= numberOfTrial; i++) {
const invFilePath = stimuliUrls[i];
const oriFilePath = stimuliUrls[i + 1];
const twoStimulusHtml =
// For the first 200 images that are rendered, show original on left & show inverted on right
i <= numberOfTrial / 2
? "<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
'</div>'
: // For the last 200 images that are rendered, show inverted on left & show original on right
"<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
'</div>';
const newStimuli = {
stimulus: twoStimulusHtml,
data: { label: 'trial', trial_num: i },
};
trials.push(newStimuli);
}
return trials;
}
const fixation = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:red; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 1000, // From 1500
data: { label: 'fixation' },
};
const trial = {
type: 'html-keyboard-response',
stimulus: jsPsych.timelineVariable('stimulus'),
choices: ['e', 'i'],
data: jsPsych.timelineVariable('data'),
trial_duration: 1000, // 1000,
post_trial_gap: 0,
on_finish: function (data) {
if (data.response === 'e') {
data.selection = 'left';
} else if (data.response === 'i') {
data.selection = 'right';
} else {
data.selection = 'none';
}
},
};
const postTrialPause = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:blue; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 250, // From 1500
data: { label: 'post-fixation' },
};
// Transforms the experimental data from JsPsych to follow the back end JSON scheme
function transformExperimentData() {
let trialSelections = jsPsych.data
.get()
.filter({ label: 'trial' })
.select('selection').values;
let newData = [];
let columnHeaders = {
stimulus: 'trial number',
response:
'trial response is whether or not the user chose the original image; 1 = correct, -1 = incorrect',
trait: 'untrustworthy by default',
subject:
'trial subject is the placement of original image; 1 = left, 2 = right',
};
newData.push(columnHeaders);
for (
let trialNumber = 0;
trialNumber < trialSelections.length;
trialNumber++
) {
let trialResponse;
let trialSubject;
// If the user doesn't make a selection, we are counting it as '-1'; an untrustworthy trial.
if (trialNumber <= NUMBER_OF_TRIALS / 2) {
// For the first half trials, original image on left.
trialResponse = trialSelections[trialNumber] === 'left' ? 1 : -1;
trialSubject = 1;
} else {
// For the second half trials, original image on right.
trialResponse = trialSelections[trialNumber] === 'right' ? 1 : -1;
trialSubject = 2;
}
let trialRow = {
stimulus: trialNumber + 1,
response: trialResponse,
trait: 'untrustworthy',
subject: trialSubject,
};
newData.push(trialRow);
}
return newData;
}
// Call backend api storeExperimentResult to connect with FireBase and update Users Collection with experiment data.
function saveExperimentData(experimentData) {
uploadSelectionResult(participantId, experimentId, experimentData);
selectionTaskCompletionHandler(true);
}
const trialProcedure = {
timeline: [fixation, trial, postTrialPause],
timeline_variables: generateTrials(NUMBER_OF_TRIALS),
randomize_order: false,
repetitions: 1,
};
timeline.push(trialProcedure);
const reportBlock = {
type: 'html-keyboard-response',
stimulus: function () {
const trials = jsPsych.data.get().filter({ label: 'trial' });
const trialCount = trials.count();
const leftTrials = | generateTrials | identifier_name |
JsPsych.js | (true);
}
}, [stimuliUrls]);
useEffect(() => {
if (ready) {
setTimeout(() => {
// Implementation of previous team timeline & trial logic -------------------------- START
/* create timeline */
const timeline = [];
/* number of trials */
// NOTE: Adjust line below to shorten the number of trials. 199 will go through all 200 iterations.
// NUMBER_OF_TRIALS = 199, means a total of 200 trials (0-indexed)
const NUMBER_OF_TRIALS = 199;
const exampleImageOne =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample1.png?alt=media&token=8a6ee16a-0700-40ef-a7cf-5c3e380b5b3f';
const exampleImageTwo =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample2.png?alt=media&token=fb0a7373-add3-4187-9449-2f206fd08ae3';
/* define instructions */
const instructions = {
type: 'html-keyboard-response',
stimulus: function () {
return (
'<h1>Instruction</h1>' +
'<p>In this experiment, two images will be shown ' +
'on the screen. Choose the image that looks more like you. </p>' +
'<p>Press the letter <strong>E</strong> on the keyboard to select the image on the left.</p>' +
'<p>Press the letter <strong>I</strong> on the keyboard to select the image on the right.</p> ' +
'<p></p>' +
"<div style='width: 900px; margin: auto;'>" +
"<div style='float: left;'><img width='300' src='" +
exampleImageOne +
"' alt='Error loading example 1'/>" +
"<p class='small'><strong>Press the E key</strong></p></div>" +
"<div class='float: right;'><img width='300' src='" +
exampleImageTwo +
"' alt='Error loading example 2'/>" +
"<p class='small'><strong>Press the I key</strong></p></div>" +
'</div>' +
'<p></p>' +
'<p><strong>Press any key to begin.</strong></p>'
);
},
};
timeline.push(instructions);
// Preload images for trials
var preload = {
type: 'preload',
images: stimuliUrls,
};
timeline.push(preload);
/* generate trials with number of trials */
function generateTrials(numberOfTrial) {
const trials = [];
for (let i = 0; i <= numberOfTrial; i++) {
const invFilePath = stimuliUrls[i];
const oriFilePath = stimuliUrls[i + 1];
const twoStimulusHtml =
// For the first 200 images that are rendered, show original on left & show inverted on right
i <= numberOfTrial / 2
? "<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
'</div>'
: // For the last 200 images that are rendered, show inverted on left & show original on right
"<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
'</div>';
const newStimuli = {
stimulus: twoStimulusHtml, | data: { label: 'trial', trial_num: i },
};
trials.push(newStimuli);
}
return trials;
}
const fixation = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:red; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 1000, // From 1500
data: { label: 'fixation' },
};
const trial = {
type: 'html-keyboard-response',
stimulus: jsPsych.timelineVariable('stimulus'),
choices: ['e', 'i'],
data: jsPsych.timelineVariable('data'),
trial_duration: 1000, // 1000,
post_trial_gap: 0,
on_finish: function (data) {
if (data.response === 'e') {
data.selection = 'left';
} else if (data.response === 'i') {
data.selection = 'right';
} else {
data.selection = 'none';
}
},
};
const postTrialPause = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:blue; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 250, // From 1500
data: { label: 'post-fixation' },
};
// Transforms the experimental data from JsPsych to follow the back end JSON scheme
function transformExperimentData() {
let trialSelections = jsPsych.data
.get()
.filter({ label: 'trial' })
.select('selection').values;
let newData = [];
let columnHeaders = {
stimulus: 'trial number',
response:
'trial response is whether or not the user chose the original image; 1 = correct, -1 = incorrect',
trait: 'untrustworthy by default',
subject:
'trial subject is the placement of original image; 1 = left, 2 = right',
};
newData.push(columnHeaders);
for (
let trialNumber = 0;
trialNumber < trialSelections.length;
trialNumber++
) {
let trialResponse;
let trialSubject;
// If the user doesn't make a selection, we are counting it as '-1'; an untrustworthy trial.
if (trialNumber <= NUMBER_OF_TRIALS / 2) {
// For the first half trials, original image on left.
trialResponse = trialSelections[trialNumber] === 'left' ? 1 : -1;
trialSubject = 1;
} else {
// For the second half trials, original image on right.
trialResponse = trialSelections[trialNumber] === 'right' ? 1 : -1;
trialSubject = 2;
}
let trialRow = {
stimulus: trialNumber + 1,
response: trialResponse,
trait: 'untrustworthy',
subject: trialSubject,
};
newData.push(trialRow);
}
return newData;
}
// Call backend api storeExperimentResult to connect with FireBase and update Users Collection with experiment data.
function saveExperimentData(experimentData) {
uploadSelectionResult(participantId, experimentId, experimentData);
selectionTaskCompletionHandler(true);
}
const trialProcedure = {
timeline: [fixation, trial, postTrialPause],
timeline_variables: generateTrials(NUMBER_OF_TRIALS),
randomize_order: false,
repetitions: 1,
};
timeline.push(trialProcedure);
const reportBlock = {
type: 'html-keyboard-response',
stimulus: function () {
const trials = jsPsych.data.get().filter({ label: 'trial' });
const trialCount = trials.count();
const leftTrials = trials | random_line_split | |
merge_zips.go | string) {
oz.excludeFiles = excludeFiles
}
// Adds an entry with given name whose source is given ZipEntryContents. Returns old ZipEntryContents
// if entry with given name already exists.
func (oz *OutputZip) addZipEntry(name string, source ZipEntryContents) (ZipEntryContents, error) {
if existingSource, exists := oz.sourceByDest[name]; exists {
return existingSource, nil
}
oz.sourceByDest[name] = source
// Delay writing an entry if entries need to be rearranged.
if oz.emulateJar || oz.sortEntries {
return nil, nil
}
return nil, source.WriteToZip(name, oz.outputWriter)
}
// Adds an entry for the manifest (META-INF/MANIFEST.MF from the given file
func (oz *OutputZip) addManifest(manifestPath string) error {
if !oz.stripDirEntries {
if _, err := oz.addZipEntry(jar.MetaDir, ZipEntryFromBuffer{jar.MetaDirFileHeader(), nil}); err != nil {
return err
}
}
contents, err := ioutil.ReadFile(manifestPath)
if err == nil {
fh, buf, err := jar.ManifestFileContents(contents)
if err == nil {
_, err = oz.addZipEntry(jar.ManifestFile, ZipEntryFromBuffer{fh, buf})
}
}
return err
}
// Adds an entry with given name and contents read from given file
func (oz *OutputZip) addZipEntryFromFile(name string, path string) error {
buf, err := ioutil.ReadFile(path)
if err == nil {
fh := &zip.FileHeader{
Name: name,
Method: zip.Store,
UncompressedSize64: uint64(len(buf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err = oz.addZipEntry(name, ZipEntryFromBuffer{fh, buf})
}
return err
}
func (oz *OutputZip) addEmptyEntry(entry string) error {
var emptyBuf []byte
fh := &zip.FileHeader{
Name: entry,
Method: zip.Store,
UncompressedSize64: uint64(len(emptyBuf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err := oz.addZipEntry(entry, ZipEntryFromBuffer{fh, emptyBuf})
return err
}
// Returns true if given entry is to be excluded
func (oz *OutputZip) isEntryExcluded(name string) bool {
for _, dir := range oz.excludeDirs {
dir = filepath.Clean(dir)
patterns := []string{
dir + "/", // the directory itself
dir + "/**/*", // files recursively in the directory
dir + "/**/*/", // directories recursively in the directory
}
for _, pattern := range patterns {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
if oz.emulateJar {
// When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
// requested.
// TODO(ccross): which files does this affect?
if name != jar.MetaDir && name != jar.ManifestFile {
return true
}
}
return true
}
}
}
for _, pattern := range oz.excludeFiles {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
return true
}
}
return false
}
// Creates a zip entry whose contents is an entry from the given input zip.
func (oz *OutputZip) copyEntry(inputZip InputZip, index int) error {
entry := NewZipEntryFromZip(inputZip, index)
if oz.stripDirEntries && entry.IsDir() {
return nil
}
existingEntry, err := oz.addZipEntry(entry.name, entry)
if err != nil {
return err
}
if existingEntry == nil {
return nil
}
// File types should match
if existingEntry.IsDir() != entry.IsDir() {
return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
entry.name, existingEntry, entry)
}
if oz.ignoreDuplicates ||
// Skip manifest and module info files that are not from the first input file
(oz.emulateJar && entry.name == jar.ManifestFile || entry.name == jar.ModuleInfoClass) ||
// Identical entries
(existingEntry.CRC32() == entry.CRC32() && existingEntry.Size() == entry.Size()) ||
// Directory entries
entry.IsDir() {
return nil
}
return fmt.Errorf("Duplicate path %v found in %v and %v\n", entry.name, existingEntry, inputZip.Name())
}
func (oz *OutputZip) entriesArray() []string {
entries := make([]string, len(oz.sourceByDest))
i := 0
for entry := range oz.sourceByDest {
entries[i] = entry
i++
}
return entries
}
func (oz *OutputZip) jarSorted() []string {
entries := oz.entriesArray()
sort.SliceStable(entries, func(i, j int) bool { return jar.EntryNamesLess(entries[i], entries[j]) })
return entries
}
func (oz *OutputZip) alphanumericSorted() []string {
entries := oz.entriesArray()
sort.Strings(entries)
return entries
}
func (oz *OutputZip) writeEntries(entries []string) error {
for _, entry := range entries {
source, _ := oz.sourceByDest[entry]
if err := source.WriteToZip(entry, oz.outputWriter); err != nil {
return err
}
}
return nil
}
func (oz *OutputZip) getUninitializedPythonPackages(inputZips []InputZip) ([]string, error) {
// the runfiles packages needs to be populated with "__init__.py".
// the runfiles dirs have been treated as packages.
allPackages := make(map[string]bool)
initedPackages := make(map[string]bool)
getPackage := func(path string) string {
ret := filepath.Dir(path)
// filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
if ret == "." || ret == "/" {
return ""
}
return ret
}
// put existing __init__.py files to a set first. This set is used for preventing
// generated __init__.py files from overwriting existing ones.
for _, inputZip := range inputZips | }
noInitPackages := make([]string, 0)
for pyPkg := range allPackages {
if _, found := initedPackages[pyPkg]; !found {
noInitPackages = append(noInitPackages, pyPkg)
}
}
return noInitPackages, nil
}
// An InputZip owned by the InputZipsManager. Opened ManagedInputZip's are chained in the open order.
type ManagedInputZip struct {
owner *InputZipsManager
realInputZip InputZip
older *ManagedInputZip
newer *ManagedInputZip
}
// Maintains the array of ManagedInputZips, keeping track of open input ones. When an InputZip is opened,
// may close some other InputZip to limit the number of open ones.
type InputZipsManager struct {
inputZips []*ManagedInputZip
nOpenZips int
maxOpenZips int
openInputZips *ManagedInputZip
}
func (miz *ManagedInputZip) unlink() {
olderMiz := miz.older
newerMiz := miz.newer
if newerMiz.older != miz || olderMiz.newer != miz {
panic(fmt.Errorf("removing %p:%#v: broken list between %p:%#v and %p:%#v",
miz, miz, newerMiz, newerMiz, olderMiz, olderMiz))
}
olderMiz.newer = newerMiz
newerMiz.older = olderMiz
miz.newer = nil
miz.older = nil
}
func (miz *ManagedInputZip) link(olderMiz *ManagedInputZip) {
if olderMiz.newer != nil || olderMiz.older != nil {
panic(fmt.Errorf("inputZip is already open"))
}
oldOld | {
if err := inputZip.Open(); err != nil {
return nil, err
}
for _, file := range inputZip.Entries() {
pyPkg := getPackage(file.Name)
if filepath.Base(file.Name) == "__init__.py" {
if _, found := initedPackages[pyPkg]; found {
panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q", file.Name))
}
initedPackages[pyPkg] = true
}
for pyPkg != "" {
if _, found := allPackages[pyPkg]; found {
break
}
allPackages[pyPkg] = true
pyPkg = getPackage(pyPkg)
}
} | conditional_block |
merge_zips.go | string) {
oz.excludeFiles = excludeFiles
}
// Adds an entry with given name whose source is given ZipEntryContents. Returns old ZipEntryContents
// if entry with given name already exists.
func (oz *OutputZip) addZipEntry(name string, source ZipEntryContents) (ZipEntryContents, error) {
if existingSource, exists := oz.sourceByDest[name]; exists {
return existingSource, nil
}
oz.sourceByDest[name] = source
// Delay writing an entry if entries need to be rearranged.
if oz.emulateJar || oz.sortEntries {
return nil, nil
}
return nil, source.WriteToZip(name, oz.outputWriter)
}
// Adds an entry for the manifest (META-INF/MANIFEST.MF from the given file
func (oz *OutputZip) addManifest(manifestPath string) error {
if !oz.stripDirEntries {
if _, err := oz.addZipEntry(jar.MetaDir, ZipEntryFromBuffer{jar.MetaDirFileHeader(), nil}); err != nil {
return err
}
}
contents, err := ioutil.ReadFile(manifestPath)
if err == nil {
fh, buf, err := jar.ManifestFileContents(contents)
if err == nil {
_, err = oz.addZipEntry(jar.ManifestFile, ZipEntryFromBuffer{fh, buf})
}
}
return err
}
// Adds an entry with given name and contents read from given file
func (oz *OutputZip) addZipEntryFromFile(name string, path string) error {
buf, err := ioutil.ReadFile(path)
if err == nil {
fh := &zip.FileHeader{
Name: name,
Method: zip.Store,
UncompressedSize64: uint64(len(buf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err = oz.addZipEntry(name, ZipEntryFromBuffer{fh, buf})
}
return err
}
func (oz *OutputZip) addEmptyEntry(entry string) error {
var emptyBuf []byte
fh := &zip.FileHeader{
Name: entry,
Method: zip.Store,
UncompressedSize64: uint64(len(emptyBuf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err := oz.addZipEntry(entry, ZipEntryFromBuffer{fh, emptyBuf})
return err
}
// Returns true if given entry is to be excluded
func (oz *OutputZip) isEntryExcluded(name string) bool {
for _, dir := range oz.excludeDirs {
dir = filepath.Clean(dir)
patterns := []string{
dir + "/", // the directory itself
dir + "/**/*", // files recursively in the directory
dir + "/**/*/", // directories recursively in the directory
}
for _, pattern := range patterns {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
if oz.emulateJar {
// When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
// requested.
// TODO(ccross): which files does this affect?
if name != jar.MetaDir && name != jar.ManifestFile {
return true
}
}
return true
}
}
}
for _, pattern := range oz.excludeFiles {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
return true
}
}
return false
}
// Creates a zip entry whose contents is an entry from the given input zip.
func (oz *OutputZip) | (inputZip InputZip, index int) error {
entry := NewZipEntryFromZip(inputZip, index)
if oz.stripDirEntries && entry.IsDir() {
return nil
}
existingEntry, err := oz.addZipEntry(entry.name, entry)
if err != nil {
return err
}
if existingEntry == nil {
return nil
}
// File types should match
if existingEntry.IsDir() != entry.IsDir() {
return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
entry.name, existingEntry, entry)
}
if oz.ignoreDuplicates ||
// Skip manifest and module info files that are not from the first input file
(oz.emulateJar && entry.name == jar.ManifestFile || entry.name == jar.ModuleInfoClass) ||
// Identical entries
(existingEntry.CRC32() == entry.CRC32() && existingEntry.Size() == entry.Size()) ||
// Directory entries
entry.IsDir() {
return nil
}
return fmt.Errorf("Duplicate path %v found in %v and %v\n", entry.name, existingEntry, inputZip.Name())
}
func (oz *OutputZip) entriesArray() []string {
entries := make([]string, len(oz.sourceByDest))
i := 0
for entry := range oz.sourceByDest {
entries[i] = entry
i++
}
return entries
}
func (oz *OutputZip) jarSorted() []string {
entries := oz.entriesArray()
sort.SliceStable(entries, func(i, j int) bool { return jar.EntryNamesLess(entries[i], entries[j]) })
return entries
}
func (oz *OutputZip) alphanumericSorted() []string {
entries := oz.entriesArray()
sort.Strings(entries)
return entries
}
func (oz *OutputZip) writeEntries(entries []string) error {
for _, entry := range entries {
source, _ := oz.sourceByDest[entry]
if err := source.WriteToZip(entry, oz.outputWriter); err != nil {
return err
}
}
return nil
}
func (oz *OutputZip) getUninitializedPythonPackages(inputZips []InputZip) ([]string, error) {
// the runfiles packages needs to be populated with "__init__.py".
// the runfiles dirs have been treated as packages.
allPackages := make(map[string]bool)
initedPackages := make(map[string]bool)
getPackage := func(path string) string {
ret := filepath.Dir(path)
// filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
if ret == "." || ret == "/" {
return ""
}
return ret
}
// put existing __init__.py files to a set first. This set is used for preventing
// generated __init__.py files from overwriting existing ones.
for _, inputZip := range inputZips {
if err := inputZip.Open(); err != nil {
return nil, err
}
for _, file := range inputZip.Entries() {
pyPkg := getPackage(file.Name)
if filepath.Base(file.Name) == "__init__.py" {
if _, found := initedPackages[pyPkg]; found {
panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q", file.Name))
}
initedPackages[pyPkg] = true
}
for pyPkg != "" {
if _, found := allPackages[pyPkg]; found {
break
}
allPackages[pyPkg] = true
pyPkg = getPackage(pyPkg)
}
}
}
noInitPackages := make([]string, 0)
for pyPkg := range allPackages {
if _, found := initedPackages[pyPkg]; !found {
noInitPackages = append(noInitPackages, pyPkg)
}
}
return noInitPackages, nil
}
// An InputZip owned by the InputZipsManager. Opened ManagedInputZip's are chained in the open order.
type ManagedInputZip struct {
owner *InputZipsManager
realInputZip InputZip
older *ManagedInputZip
newer *ManagedInputZip
}
// Maintains the array of ManagedInputZips, keeping track of open input ones. When an InputZip is opened,
// may close some other InputZip to limit the number of open ones.
type InputZipsManager struct {
inputZips []*ManagedInputZip
nOpenZips int
maxOpenZips int
openInputZips *ManagedInputZip
}
func (miz *ManagedInputZip) unlink() {
olderMiz := miz.older
newerMiz := miz.newer
if newerMiz.older != miz || olderMiz.newer != miz {
panic(fmt.Errorf("removing %p:%#v: broken list between %p:%#v and %p:%#v",
miz, miz, newerMiz, newerMiz, olderMiz, olderMiz))
}
olderMiz.newer = newerMiz
newerMiz.older = olderMiz
miz.newer = nil
miz.older = nil
}
func (miz *ManagedInputZip) link(olderMiz *ManagedInputZip) {
if olderMiz.newer != nil || olderMiz.older != nil {
panic(fmt.Errorf("inputZip is already open"))
}
oldOld | copyEntry | identifier_name |
merge_zips.go | string) {
oz.excludeFiles = excludeFiles
}
// Adds an entry with given name whose source is given ZipEntryContents. Returns old ZipEntryContents
// if entry with given name already exists.
func (oz *OutputZip) addZipEntry(name string, source ZipEntryContents) (ZipEntryContents, error) {
if existingSource, exists := oz.sourceByDest[name]; exists {
return existingSource, nil
}
oz.sourceByDest[name] = source
// Delay writing an entry if entries need to be rearranged.
if oz.emulateJar || oz.sortEntries {
return nil, nil
}
return nil, source.WriteToZip(name, oz.outputWriter)
}
// Adds an entry for the manifest (META-INF/MANIFEST.MF from the given file
func (oz *OutputZip) addManifest(manifestPath string) error {
if !oz.stripDirEntries {
if _, err := oz.addZipEntry(jar.MetaDir, ZipEntryFromBuffer{jar.MetaDirFileHeader(), nil}); err != nil {
return err
}
}
contents, err := ioutil.ReadFile(manifestPath)
if err == nil {
fh, buf, err := jar.ManifestFileContents(contents)
if err == nil {
_, err = oz.addZipEntry(jar.ManifestFile, ZipEntryFromBuffer{fh, buf})
}
}
return err
}
// Adds an entry with given name and contents read from given file
func (oz *OutputZip) addZipEntryFromFile(name string, path string) error {
buf, err := ioutil.ReadFile(path)
if err == nil {
fh := &zip.FileHeader{
Name: name,
Method: zip.Store,
UncompressedSize64: uint64(len(buf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err = oz.addZipEntry(name, ZipEntryFromBuffer{fh, buf})
}
return err
}
func (oz *OutputZip) addEmptyEntry(entry string) error {
var emptyBuf []byte
fh := &zip.FileHeader{
Name: entry,
Method: zip.Store,
UncompressedSize64: uint64(len(emptyBuf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err := oz.addZipEntry(entry, ZipEntryFromBuffer{fh, emptyBuf})
return err
}
// Returns true if given entry is to be excluded
func (oz *OutputZip) isEntryExcluded(name string) bool {
for _, dir := range oz.excludeDirs {
dir = filepath.Clean(dir)
patterns := []string{
dir + "/", // the directory itself
dir + "/**/*", // files recursively in the directory
dir + "/**/*/", // directories recursively in the directory
}
for _, pattern := range patterns {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
if oz.emulateJar {
// When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
// requested.
// TODO(ccross): which files does this affect?
if name != jar.MetaDir && name != jar.ManifestFile {
return true
}
}
return true
}
}
}
for _, pattern := range oz.excludeFiles {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern)) | }
return false
}
// Creates a zip entry whose contents is an entry from the given input zip.
func (oz *OutputZip) copyEntry(inputZip InputZip, index int) error {
entry := NewZipEntryFromZip(inputZip, index)
if oz.stripDirEntries && entry.IsDir() {
return nil
}
existingEntry, err := oz.addZipEntry(entry.name, entry)
if err != nil {
return err
}
if existingEntry == nil {
return nil
}
// File types should match
if existingEntry.IsDir() != entry.IsDir() {
return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
entry.name, existingEntry, entry)
}
if oz.ignoreDuplicates ||
// Skip manifest and module info files that are not from the first input file
(oz.emulateJar && entry.name == jar.ManifestFile || entry.name == jar.ModuleInfoClass) ||
// Identical entries
(existingEntry.CRC32() == entry.CRC32() && existingEntry.Size() == entry.Size()) ||
// Directory entries
entry.IsDir() {
return nil
}
return fmt.Errorf("Duplicate path %v found in %v and %v\n", entry.name, existingEntry, inputZip.Name())
}
func (oz *OutputZip) entriesArray() []string {
entries := make([]string, len(oz.sourceByDest))
i := 0
for entry := range oz.sourceByDest {
entries[i] = entry
i++
}
return entries
}
func (oz *OutputZip) jarSorted() []string {
entries := oz.entriesArray()
sort.SliceStable(entries, func(i, j int) bool { return jar.EntryNamesLess(entries[i], entries[j]) })
return entries
}
func (oz *OutputZip) alphanumericSorted() []string {
entries := oz.entriesArray()
sort.Strings(entries)
return entries
}
func (oz *OutputZip) writeEntries(entries []string) error {
for _, entry := range entries {
source, _ := oz.sourceByDest[entry]
if err := source.WriteToZip(entry, oz.outputWriter); err != nil {
return err
}
}
return nil
}
func (oz *OutputZip) getUninitializedPythonPackages(inputZips []InputZip) ([]string, error) {
// the runfiles packages needs to be populated with "__init__.py".
// the runfiles dirs have been treated as packages.
allPackages := make(map[string]bool)
initedPackages := make(map[string]bool)
getPackage := func(path string) string {
ret := filepath.Dir(path)
// filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
if ret == "." || ret == "/" {
return ""
}
return ret
}
// put existing __init__.py files to a set first. This set is used for preventing
// generated __init__.py files from overwriting existing ones.
for _, inputZip := range inputZips {
if err := inputZip.Open(); err != nil {
return nil, err
}
for _, file := range inputZip.Entries() {
pyPkg := getPackage(file.Name)
if filepath.Base(file.Name) == "__init__.py" {
if _, found := initedPackages[pyPkg]; found {
panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q", file.Name))
}
initedPackages[pyPkg] = true
}
for pyPkg != "" {
if _, found := allPackages[pyPkg]; found {
break
}
allPackages[pyPkg] = true
pyPkg = getPackage(pyPkg)
}
}
}
noInitPackages := make([]string, 0)
for pyPkg := range allPackages {
if _, found := initedPackages[pyPkg]; !found {
noInitPackages = append(noInitPackages, pyPkg)
}
}
return noInitPackages, nil
}
// An InputZip owned by the InputZipsManager. Opened ManagedInputZip's are chained in the open order.
type ManagedInputZip struct {
owner *InputZipsManager
realInputZip InputZip
older *ManagedInputZip
newer *ManagedInputZip
}
// Maintains the array of ManagedInputZips, keeping track of open input ones. When an InputZip is opened,
// may close some other InputZip to limit the number of open ones.
type InputZipsManager struct {
inputZips []*ManagedInputZip
nOpenZips int
maxOpenZips int
openInputZips *ManagedInputZip
}
func (miz *ManagedInputZip) unlink() {
olderMiz := miz.older
newerMiz := miz.newer
if newerMiz.older != miz || olderMiz.newer != miz {
panic(fmt.Errorf("removing %p:%#v: broken list between %p:%#v and %p:%#v",
miz, miz, newerMiz, newerMiz, olderMiz, olderMiz))
}
olderMiz.newer = newerMiz
newerMiz.older = olderMiz
miz.newer = nil
miz.older = nil
}
func (miz *ManagedInputZip) link(olderMiz *ManagedInputZip) {
if olderMiz.newer != nil || olderMiz.older != nil {
panic(fmt.Errorf("inputZip is already open"))
}
oldOlder | }
if match {
return true
} | random_line_split |
merge_zips.go | string) {
oz.excludeFiles = excludeFiles
}
// Adds an entry with given name whose source is given ZipEntryContents. Returns old ZipEntryContents
// if entry with given name already exists.
func (oz *OutputZip) addZipEntry(name string, source ZipEntryContents) (ZipEntryContents, error) {
if existingSource, exists := oz.sourceByDest[name]; exists {
return existingSource, nil
}
oz.sourceByDest[name] = source
// Delay writing an entry if entries need to be rearranged.
if oz.emulateJar || oz.sortEntries {
return nil, nil
}
return nil, source.WriteToZip(name, oz.outputWriter)
}
// Adds an entry for the manifest (META-INF/MANIFEST.MF from the given file
func (oz *OutputZip) addManifest(manifestPath string) error {
if !oz.stripDirEntries {
if _, err := oz.addZipEntry(jar.MetaDir, ZipEntryFromBuffer{jar.MetaDirFileHeader(), nil}); err != nil {
return err
}
}
contents, err := ioutil.ReadFile(manifestPath)
if err == nil {
fh, buf, err := jar.ManifestFileContents(contents)
if err == nil {
_, err = oz.addZipEntry(jar.ManifestFile, ZipEntryFromBuffer{fh, buf})
}
}
return err
}
// Adds an entry with given name and contents read from given file
func (oz *OutputZip) addZipEntryFromFile(name string, path string) error {
buf, err := ioutil.ReadFile(path)
if err == nil {
fh := &zip.FileHeader{
Name: name,
Method: zip.Store,
UncompressedSize64: uint64(len(buf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err = oz.addZipEntry(name, ZipEntryFromBuffer{fh, buf})
}
return err
}
func (oz *OutputZip) addEmptyEntry(entry string) error {
var emptyBuf []byte
fh := &zip.FileHeader{
Name: entry,
Method: zip.Store,
UncompressedSize64: uint64(len(emptyBuf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err := oz.addZipEntry(entry, ZipEntryFromBuffer{fh, emptyBuf})
return err
}
// Returns true if given entry is to be excluded
func (oz *OutputZip) isEntryExcluded(name string) bool {
for _, dir := range oz.excludeDirs {
dir = filepath.Clean(dir)
patterns := []string{
dir + "/", // the directory itself
dir + "/**/*", // files recursively in the directory
dir + "/**/*/", // directories recursively in the directory
}
for _, pattern := range patterns {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
if oz.emulateJar {
// When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
// requested.
// TODO(ccross): which files does this affect?
if name != jar.MetaDir && name != jar.ManifestFile {
return true
}
}
return true
}
}
}
for _, pattern := range oz.excludeFiles {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
return true
}
}
return false
}
// Creates a zip entry whose contents is an entry from the given input zip.
func (oz *OutputZip) copyEntry(inputZip InputZip, index int) error {
entry := NewZipEntryFromZip(inputZip, index)
if oz.stripDirEntries && entry.IsDir() {
return nil
}
existingEntry, err := oz.addZipEntry(entry.name, entry)
if err != nil {
return err
}
if existingEntry == nil {
return nil
}
// File types should match
if existingEntry.IsDir() != entry.IsDir() {
return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
entry.name, existingEntry, entry)
}
if oz.ignoreDuplicates ||
// Skip manifest and module info files that are not from the first input file
(oz.emulateJar && entry.name == jar.ManifestFile || entry.name == jar.ModuleInfoClass) ||
// Identical entries
(existingEntry.CRC32() == entry.CRC32() && existingEntry.Size() == entry.Size()) ||
// Directory entries
entry.IsDir() {
return nil
}
return fmt.Errorf("Duplicate path %v found in %v and %v\n", entry.name, existingEntry, inputZip.Name())
}
func (oz *OutputZip) entriesArray() []string {
entries := make([]string, len(oz.sourceByDest))
i := 0
for entry := range oz.sourceByDest {
entries[i] = entry
i++
}
return entries
}
func (oz *OutputZip) jarSorted() []string |
func (oz *OutputZip) alphanumericSorted() []string {
entries := oz.entriesArray()
sort.Strings(entries)
return entries
}
func (oz *OutputZip) writeEntries(entries []string) error {
for _, entry := range entries {
source, _ := oz.sourceByDest[entry]
if err := source.WriteToZip(entry, oz.outputWriter); err != nil {
return err
}
}
return nil
}
func (oz *OutputZip) getUninitializedPythonPackages(inputZips []InputZip) ([]string, error) {
// the runfiles packages needs to be populated with "__init__.py".
// the runfiles dirs have been treated as packages.
allPackages := make(map[string]bool)
initedPackages := make(map[string]bool)
getPackage := func(path string) string {
ret := filepath.Dir(path)
// filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
if ret == "." || ret == "/" {
return ""
}
return ret
}
// put existing __init__.py files to a set first. This set is used for preventing
// generated __init__.py files from overwriting existing ones.
for _, inputZip := range inputZips {
if err := inputZip.Open(); err != nil {
return nil, err
}
for _, file := range inputZip.Entries() {
pyPkg := getPackage(file.Name)
if filepath.Base(file.Name) == "__init__.py" {
if _, found := initedPackages[pyPkg]; found {
panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q", file.Name))
}
initedPackages[pyPkg] = true
}
for pyPkg != "" {
if _, found := allPackages[pyPkg]; found {
break
}
allPackages[pyPkg] = true
pyPkg = getPackage(pyPkg)
}
}
}
noInitPackages := make([]string, 0)
for pyPkg := range allPackages {
if _, found := initedPackages[pyPkg]; !found {
noInitPackages = append(noInitPackages, pyPkg)
}
}
return noInitPackages, nil
}
// An InputZip owned by the InputZipsManager. Opened ManagedInputZip's are chained in the open order.
type ManagedInputZip struct {
owner *InputZipsManager
realInputZip InputZip
older *ManagedInputZip
newer *ManagedInputZip
}
// Maintains the array of ManagedInputZips, keeping track of open input ones. When an InputZip is opened,
// may close some other InputZip to limit the number of open ones.
type InputZipsManager struct {
inputZips []*ManagedInputZip
nOpenZips int
maxOpenZips int
openInputZips *ManagedInputZip
}
func (miz *ManagedInputZip) unlink() {
olderMiz := miz.older
newerMiz := miz.newer
if newerMiz.older != miz || olderMiz.newer != miz {
panic(fmt.Errorf("removing %p:%#v: broken list between %p:%#v and %p:%#v",
miz, miz, newerMiz, newerMiz, olderMiz, olderMiz))
}
olderMiz.newer = newerMiz
newerMiz.older = olderMiz
miz.newer = nil
miz.older = nil
}
func (miz *ManagedInputZip) link(olderMiz *ManagedInputZip) {
if olderMiz.newer != nil || olderMiz.older != nil {
panic(fmt.Errorf("inputZip is already open"))
}
old | {
entries := oz.entriesArray()
sort.SliceStable(entries, func(i, j int) bool { return jar.EntryNamesLess(entries[i], entries[j]) })
return entries
} | identifier_body |
submasterDurations.py | subsequent swap to B
# create some counters that explain the reason for dropping various submasters
numDuplicateSubsErrors = 0
numKeepOutSolsErrors = 0
numSubDatabaseErrors = 0
numMissingMarginErrors = 0
numMarginDatabaseErrors = 0
numMissingActualsErrors = 0
numMultipleActualsErrors = 0
# initialize Spazz for a future query
spazzObj = spazz({'beginTime' : "Sol-" + str(starting_Sol) + "M00:00:00",'timeType': "LST"})
#initialize the query
# the "not" line should remove all instances of sub_00000
# This query is essensially a frame work for the elasticsearch to base off from. It continuosly parses through EVR files to
# match tihs query.
query = {
"query": {
"filtered": {
"query": {
"bool" : {
"must":[
{ "match": {"seqId":"sub"}}
]
}
},
"filter": {
"bool":{
"must":[
{"range" : {
"planSol" : {
"gte" : starting_Sol,
"lte" : latestSol
}
}},
{"term" : {"Tag" : "activitygroup" }},
| {"not": {"term" : {"seqId": "00000"}}}
]
}
}
}
},
"size": queryLen,
"_source": ["seqId","Duration","Children","masterSol", "seqgenDuration"],
"sort": { "masterSol": { "order": "desc" }}
}
# ------------------------------------------ Search ---------------------------------------------------
#send query to ES and reduce it down to results
search = es.search(index=index, body=query)
results = search['hits']['hits']
totalHits = len(search['hits']['hits'])
# print("Results are ======== ", )search
#create a variable to store unidentified backbone child names for troubleshooting
unidentifiedBackbones = []
marginNamesSanityCheck = []
#create a variable to store submaster children when the script couldn' identify the associated margin
noMarginFoundChildNames = []
#initialize a new dict to reorganize the information
submasters = {};
# ------------------------------ iterate through results; build pseudo database ----------------------------
# loop through the submasters and populate a new entry in the submasters dict
percentComplete = 0
for count,result in enumerate(results):
#print a message every 10% of the results that has been analyzed
if floor(totalHits/100) == False:
pass
elif (count % (floor(totalHits/100))) == 0: #This is smart lol
print("{}%".format(percentComplete))
percentComplete+=1
seqId = result['_source']['seqId']
# masterSol = int(result['_source']['masterSol'])
masterSol = int(result['_source'].get('masterSol',"0"))
uniqueID = 'sol' + str(masterSol)+'_' + seqId
# initialize a new entry in the temporary submasters dict for this submaster sequence
keepSeqId = True
seqIdDict = {}
# print("Am I getting data?", masterSol)
# Skip all EKO's sub_00000; this should never happen so if it does, please warn user
if seqId == 'sub_00000':
print('')
print('ERROR: Found an unexpected sub_00000; this should not be possible with the query. It will be ignored.')
print('')
keepSeqId = False
continue
# the user can define keep out sols, such as Conjunction or holiday plannning. Immediately ignore these sols from analysis as they will skew our data.
elif masterSol in keepOutSols:
if verbose:
print('')
print('ERROR: Submaster ' + seqId + ' on sol' + str(masterSol) +' falls in the user defined keepOutSols. It will be ignored.')
print('')
keepSeqId = False
numKeepOutSolsErrors += 1
continue
else:
try:
# calculate and initialize the planned duration fields
seqIdDict['seqId'] = seqId
seqIdDict['masterSol'] = masterSol
seqIdDict['backboneType'] = []
seqIdDict['planTotalDur'] = result['_source']['Duration']
seqIdDict['planMarginDur'] = 0
seqIdDict['uniqueID'] = uniqueID
# calculate and initialize the seqgen duration fields
#seqIdDict['totalSeqgenDuration'] = result['_source']['seqgenDuration']
#seqIdDict['totalSeqgenDurationMinutes'] = round(result['_source']['seqgenDuration']/60, 2)
except:
if verbose:
print('')
print('ERROR: Could not identify Duration field for the submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numSubDatabaseErrors+=1
continue
# loop through children to identify the backbone type,
marginsFound = 0
# if we find a margin, query for it's duration
for ii, child in enumerate(result['_source']['Children']):
# see if this child has margin in its string identifier
if 'margin' in child.lower():
# there is a templated activity called: APXS Short Standalone with margin + cleanup
# If it is that ignore it
if 'apxs' in child.lower():
seqIdDict['backboneType'].append('unidentified')
else:
marginsFound+=1
# if margin is in the name, identify and extract the id
idRegex = r"\(sol\d{5}_tap_end_of_sol_.{22}\)$"
idMatch = re.search(idRegex, child)
# if you can successfully identify the id, then break it out, else print error message
if idMatch:
#if you need the name it is here:
childName = child[:idMatch.start()]
if childName not in marginNamesSanityCheck:
marginNamesSanityCheck.append(childName)
#grab the child Id, remove the parentheses, so we can identify it in the database
childId = child[idMatch.start()+1:idMatch.end()-1]
#get margin information with a direct query
marginEntry = es.get(id=childId, index=index)
try:
#store the margin duration as a running sum (for when there are multiple margins associated with a single submaster)
seqIdDict['planMarginDur'] += marginEntry['_source']['Duration']
continue
except:
if verbose:
print('')
print('ERROR: Could not identify a duration for the identified margin activity for submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results.')
print('Margin activity results were: ')
print(marginEntry)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to identify an id for the child:' + child + '. Removing submaster ' + seqId + ' from results')
print('Child string that was searched:')
print(child)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
# if I can successfully identify a Science Block, then identify that as the type
elif (('science block' in child.lower()) or ('sb' in child.lower())) and 'SB' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('SB')
# if I can successfully identify Post Drive imaging, then identify that as the type
elif (('pdi' in child.lower()) or ('post-drive imaging' in child.lower())) and 'PDI' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('PDI')
# if I can successfully identify a mobility backbone, then identify that as the type
elif 'mobility backbone' in child.lower() and 'drive' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('drive')
# if I can successfully identify an arm backbone, then identify that as the type
elif 'arm' in child.lower() and 'arm' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('arm')
# identify ECAM imaging
elif (('slip assessment' in child.lower()) or ('ecam trending' in child.lower())) and 'ECAM' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('ECAM')
# ignore dan actives, mahli merges, SAPP_RIMU_DATA_Collection, and SAM activities (for now).
elif ('dan_active | random_line_split | |
submasterDurations.py | keepOutSols = range(1759, 1779)+range(2172,2209)+range(2320,2348) # a list of soles we know we don't want to include in the results;
#1759-1779 = conjunction; 2172-2209 = 2172 anomaly recovery; 2320-2348 = Safing on RCE-A on 2320 and again on 2339 and subsequent swap to B
# create some counters that explain the reason for dropping various submasters
numDuplicateSubsErrors = 0
numKeepOutSolsErrors = 0
numSubDatabaseErrors = 0
numMissingMarginErrors = 0
numMarginDatabaseErrors = 0
numMissingActualsErrors = 0
numMultipleActualsErrors = 0
# initialize Spazz for a future query
spazzObj = spazz({'beginTime' : "Sol-" + str(starting_Sol) + "M00:00:00",'timeType': "LST"})
#initialize the query
# the "not" line should remove all instances of sub_00000
# This query is essensially a frame work for the elasticsearch to base off from. It continuosly parses through EVR files to
# match tihs query.
query = {
"query": {
"filtered": {
"query": {
"bool" : {
"must":[
{ "match": {"seqId":"sub"}}
]
}
},
"filter": {
"bool":{
"must":[
{"range" : {
"planSol" : {
"gte" : starting_Sol,
"lte" : latestSol
}
}},
{"term" : {"Tag" : "activitygroup" }},
{"not": {"term" : {"seqId": "00000"}}}
]
}
}
}
},
"size": queryLen,
"_source": ["seqId","Duration","Children","masterSol", "seqgenDuration"],
"sort": { "masterSol": { "order": "desc" }}
}
# ------------------------------------------ Search ---------------------------------------------------
#send query to ES and reduce it down to results
search = es.search(index=index, body=query)
results = search['hits']['hits']
totalHits = len(search['hits']['hits'])
# print("Results are ======== ", )search
#create a variable to store unidentified backbone child names for troubleshooting
unidentifiedBackbones = []
marginNamesSanityCheck = []
#create a variable to store submaster children when the script couldn' identify the associated margin
noMarginFoundChildNames = []
#initialize a new dict to reorganize the information
submasters = {};
# ------------------------------ iterate through results; build pseudo database ----------------------------
# loop through the submasters and populate a new entry in the submasters dict
percentComplete = 0
for count,result in enumerate(results):
#print a message every 10% of the results that has been analyzed
if floor(totalHits/100) == False:
pass
elif (count % (floor(totalHits/100))) == 0: #This is smart lol
print("{}%".format(percentComplete))
percentComplete+=1
seqId = result['_source']['seqId']
# masterSol = int(result['_source']['masterSol'])
masterSol = int(result['_source'].get('masterSol',"0"))
uniqueID = 'sol' + str(masterSol)+'_' + seqId
# initialize a new entry in the temporary submasters dict for this submaster sequence
keepSeqId = True
seqIdDict = {}
# print("Am I getting data?", masterSol)
# Skip all EKO's sub_00000; this should never happen so if it does, please warn user
if seqId == 'sub_00000':
print('')
print('ERROR: Found an unexpected sub_00000; this should not be possible with the query. It will be ignored.')
print('')
keepSeqId = False
continue
# the user can define keep out sols, such as Conjunction or holiday plannning. Immediately ignore these sols from analysis as they will skew our data.
elif masterSol in keepOutSols:
if verbose:
print('')
print('ERROR: Submaster ' + seqId + ' on sol' + str(masterSol) +' falls in the user defined keepOutSols. It will be ignored.')
print('')
keepSeqId = False
numKeepOutSolsErrors += 1
continue
else:
try:
# calculate and initialize the planned duration fields
seqIdDict['seqId'] = seqId
seqIdDict['masterSol'] = masterSol
seqIdDict['backboneType'] = []
seqIdDict['planTotalDur'] = result['_source']['Duration']
seqIdDict['planMarginDur'] = 0
seqIdDict['uniqueID'] = uniqueID
# calculate and initialize the seqgen duration fields
#seqIdDict['totalSeqgenDuration'] = result['_source']['seqgenDuration']
#seqIdDict['totalSeqgenDurationMinutes'] = round(result['_source']['seqgenDuration']/60, 2)
except:
if verbose:
print('')
print('ERROR: Could not identify Duration field for the submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numSubDatabaseErrors+=1
continue
# loop through children to identify the backbone type,
marginsFound = 0
# if we find a margin, query for it's duration
for ii, child in enumerate(result['_source']['Children']):
# see if this child has margin in its string identifier
if 'margin' in child.lower():
# there is a templated activity called: APXS Short Standalone with margin + cleanup
# If it is that ignore it
if 'apxs' in child.lower():
seqIdDict['backboneType'].append('unidentified')
else:
marginsFound+=1
# if margin is in the name, identify and extract the id
idRegex = r"\(sol\d{5}_tap_end_of_sol_.{22}\)$"
idMatch = re.search(idRegex, child)
# if you can successfully identify the id, then break it out, else print error message
if idMatch:
#if you need the name it is here:
childName = child[:idMatch.start()]
if childName not in marginNamesSanityCheck:
marginNamesSanityCheck.append(childName)
#grab the child Id, remove the parentheses, so we can identify it in the database
childId = child[idMatch.start()+1:idMatch.end()-1]
#get margin information with a direct query
marginEntry = es.get(id=childId, index=index)
try:
#store the margin duration as a running sum (for when there are multiple margins associated with a single submaster)
seqIdDict['planMarginDur'] += marginEntry['_source']['Duration']
continue
except:
if verbose:
print('')
print('ERROR: Could not identify a duration for the identified margin activity for submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results.')
print('Margin activity results were: ')
print(marginEntry)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to identify an id for the child:' + child + '. Removing submaster ' + seqId + | verbose = False # a verbose flag that identifies every time a submaster was rejected from the analysis
filename = 'demonstrationoutput' # name of the .json file output to be used as a pseudo-database
queryLen = 5000 # how large do we let the query get. Currently we wouldn't want anything larger than 5000 results
# earliestSol = 2170 # the earliest sol of results we want to include in our data. With our naming convention for submaster sequences we should only query within modulo 1000
#note that margin strategy changed on 2169
#================================================================================================================================================
#======================================================INPUT=====================================================================================
starting_Sol = 2000
latestSol = 2150
# while(earliestSol == 0 and latestSol == 0):
# inputstart = input("Start Sol: ")
# inputend = input("End Sol: ")
# earliestSol = inputstart
# latestSol = inputend
#================================================================================================================================================
#================================================================================================================================================
#================================================================================================================================================
| identifier_body | |
submasterDurations.py | and subsequent swap to B
# create some counters that explain the reason for dropping various submasters
numDuplicateSubsErrors = 0
numKeepOutSolsErrors = 0
numSubDatabaseErrors = 0
numMissingMarginErrors = 0
numMarginDatabaseErrors = 0
numMissingActualsErrors = 0
numMultipleActualsErrors = 0
# initialize Spazz for a future query
spazzObj = spazz({'beginTime' : "Sol-" + str(starting_Sol) + "M00:00:00",'timeType': "LST"})
#initialize the query
# the "not" line should remove all instances of sub_00000
# This query is essensially a frame work for the elasticsearch to base off from. It continuosly parses through EVR files to
# match tihs query.
query = {
"query": {
"filtered": {
"query": {
"bool" : {
"must":[
{ "match": {"seqId":"sub"}}
]
}
},
"filter": {
"bool":{
"must":[
{"range" : {
"planSol" : {
"gte" : starting_Sol,
"lte" : latestSol
}
}},
{"term" : {"Tag" : "activitygroup" }},
{"not": {"term" : {"seqId": "00000"}}}
]
}
}
}
},
"size": queryLen,
"_source": ["seqId","Duration","Children","masterSol", "seqgenDuration"],
"sort": { "masterSol": { "order": "desc" }}
}
# ------------------------------------------ Search ---------------------------------------------------
#send query to ES and reduce it down to results
search = es.search(index=index, body=query)
results = search['hits']['hits']
totalHits = len(search['hits']['hits'])
# print("Results are ======== ", )search
#create a variable to store unidentified backbone child names for troubleshooting
unidentifiedBackbones = []
marginNamesSanityCheck = []
#create a variable to store submaster children when the script couldn' identify the associated margin
noMarginFoundChildNames = []
#initialize a new dict to reorganize the information
submasters = {};
# ------------------------------ iterate through results; build pseudo database ----------------------------
# loop through the submasters and populate a new entry in the submasters dict
percentComplete = 0
for count,result in enumerate(results):
#print a message every 10% of the results that has been analyzed
if floor(totalHits/100) == False:
pass
elif (count % (floor(totalHits/100))) == 0: #This is smart lol
|
seqId = result['_source']['seqId']
# masterSol = int(result['_source']['masterSol'])
masterSol = int(result['_source'].get('masterSol',"0"))
uniqueID = 'sol' + str(masterSol)+'_' + seqId
# initialize a new entry in the temporary submasters dict for this submaster sequence
keepSeqId = True
seqIdDict = {}
# print("Am I getting data?", masterSol)
# Skip all EKO's sub_00000; this should never happen so if it does, please warn user
if seqId == 'sub_00000':
print('')
print('ERROR: Found an unexpected sub_00000; this should not be possible with the query. It will be ignored.')
print('')
keepSeqId = False
continue
# the user can define keep out sols, such as Conjunction or holiday plannning. Immediately ignore these sols from analysis as they will skew our data.
elif masterSol in keepOutSols:
if verbose:
print('')
print('ERROR: Submaster ' + seqId + ' on sol' + str(masterSol) +' falls in the user defined keepOutSols. It will be ignored.')
print('')
keepSeqId = False
numKeepOutSolsErrors += 1
continue
else:
try:
# calculate and initialize the planned duration fields
seqIdDict['seqId'] = seqId
seqIdDict['masterSol'] = masterSol
seqIdDict['backboneType'] = []
seqIdDict['planTotalDur'] = result['_source']['Duration']
seqIdDict['planMarginDur'] = 0
seqIdDict['uniqueID'] = uniqueID
# calculate and initialize the seqgen duration fields
#seqIdDict['totalSeqgenDuration'] = result['_source']['seqgenDuration']
#seqIdDict['totalSeqgenDurationMinutes'] = round(result['_source']['seqgenDuration']/60, 2)
except:
if verbose:
print('')
print('ERROR: Could not identify Duration field for the submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numSubDatabaseErrors+=1
continue
# loop through children to identify the backbone type,
marginsFound = 0
# if we find a margin, query for it's duration
for ii, child in enumerate(result['_source']['Children']):
# see if this child has margin in its string identifier
if 'margin' in child.lower():
# there is a templated activity called: APXS Short Standalone with margin + cleanup
# If it is that ignore it
if 'apxs' in child.lower():
seqIdDict['backboneType'].append('unidentified')
else:
marginsFound+=1
# if margin is in the name, identify and extract the id
idRegex = r"\(sol\d{5}_tap_end_of_sol_.{22}\)$"
idMatch = re.search(idRegex, child)
# if you can successfully identify the id, then break it out, else print error message
if idMatch:
#if you need the name it is here:
childName = child[:idMatch.start()]
if childName not in marginNamesSanityCheck:
marginNamesSanityCheck.append(childName)
#grab the child Id, remove the parentheses, so we can identify it in the database
childId = child[idMatch.start()+1:idMatch.end()-1]
#get margin information with a direct query
marginEntry = es.get(id=childId, index=index)
try:
#store the margin duration as a running sum (for when there are multiple margins associated with a single submaster)
seqIdDict['planMarginDur'] += marginEntry['_source']['Duration']
continue
except:
if verbose:
print('')
print('ERROR: Could not identify a duration for the identified margin activity for submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results.')
print('Margin activity results were: ')
print(marginEntry)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to identify an id for the child:' + child + '. Removing submaster ' + seqId + ' from results')
print('Child string that was searched:')
print(child)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
# if I can successfully identify a Science Block, then identify that as the type
elif (('science block' in child.lower()) or ('sb' in child.lower())) and 'SB' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('SB')
# if I can successfully identify Post Drive imaging, then identify that as the type
elif (('pdi' in child.lower()) or ('post-drive imaging' in child.lower())) and 'PDI' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('PDI')
# if I can successfully identify a mobility backbone, then identify that as the type
elif 'mobility backbone' in child.lower() and 'drive' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('drive')
# if I can successfully identify an arm backbone, then identify that as the type
elif 'arm' in child.lower() and 'arm' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('arm')
# identify ECAM imaging
elif (('slip assessment' in child.lower()) or ('ecam trending' in child.lower())) and 'ECAM' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('ECAM')
# ignore dan actives, mahli merges, SAPP_RIMU_DATA_Collection, and SAM activities (for now).
elif ('dan_active | print("{}%".format(percentComplete))
percentComplete+=1 | conditional_block |
submasterDurations.py | ():
#Query for all submasters. We want all activity groups (Pie observations) where the seqID field = sub_XXXX in the last 1000 sols.
# --------------------------------------------- Input Parameters and Initializaton -------------------------------------------------
# parameters that should eventually be inputs
verbose = False # a verbose flag that identifies every time a submaster was rejected from the analysis
filename = 'demonstrationoutput' # name of the .json file output to be used as a pseudo-database
queryLen = 5000 # how large do we let the query get. Currently we wouldn't want anything larger than 5000 results
# earliestSol = 2170 # the earliest sol of results we want to include in our data. With our naming convention for submaster sequences we should only query within modulo 1000
#note that margin strategy changed on 2169
#================================================================================================================================================
#======================================================INPUT=====================================================================================
starting_Sol = 2000
latestSol = 2150
# while(earliestSol == 0 and latestSol == 0):
# inputstart = input("Start Sol: ")
# inputend = input("End Sol: ")
# earliestSol = inputstart
# latestSol = inputend
#================================================================================================================================================
#================================================================================================================================================
#================================================================================================================================================
keepOutSols = range(1759, 1779)+range(2172,2209)+range(2320,2348) # a list of soles we know we don't want to include in the results;
#1759-1779 = conjunction; 2172-2209 = 2172 anomaly recovery; 2320-2348 = Safing on RCE-A on 2320 and again on 2339 and subsequent swap to B
# create some counters that explain the reason for dropping various submasters
numDuplicateSubsErrors = 0
numKeepOutSolsErrors = 0
numSubDatabaseErrors = 0
numMissingMarginErrors = 0
numMarginDatabaseErrors = 0
numMissingActualsErrors = 0
numMultipleActualsErrors = 0
# initialize Spazz for a future query
spazzObj = spazz({'beginTime' : "Sol-" + str(starting_Sol) + "M00:00:00",'timeType': "LST"})
#initialize the query
# the "not" line should remove all instances of sub_00000
# This query is essensially a frame work for the elasticsearch to base off from. It continuosly parses through EVR files to
# match tihs query.
query = {
"query": {
"filtered": {
"query": {
"bool" : {
"must":[
{ "match": {"seqId":"sub"}}
]
}
},
"filter": {
"bool":{
"must":[
{"range" : {
"planSol" : {
"gte" : starting_Sol,
"lte" : latestSol
}
}},
{"term" : {"Tag" : "activitygroup" }},
{"not": {"term" : {"seqId": "00000"}}}
]
}
}
}
},
"size": queryLen,
"_source": ["seqId","Duration","Children","masterSol", "seqgenDuration"],
"sort": { "masterSol": { "order": "desc" }}
}
# ------------------------------------------ Search ---------------------------------------------------
#send query to ES and reduce it down to results
search = es.search(index=index, body=query)
results = search['hits']['hits']
totalHits = len(search['hits']['hits'])
# print("Results are ======== ", )search
#create a variable to store unidentified backbone child names for troubleshooting
unidentifiedBackbones = []
marginNamesSanityCheck = []
#create a variable to store submaster children when the script couldn' identify the associated margin
noMarginFoundChildNames = []
#initialize a new dict to reorganize the information
submasters = {};
# ------------------------------ iterate through results; build pseudo database ----------------------------
# loop through the submasters and populate a new entry in the submasters dict
percentComplete = 0
for count,result in enumerate(results):
#print a message every 10% of the results that has been analyzed
if floor(totalHits/100) == False:
pass
elif (count % (floor(totalHits/100))) == 0: #This is smart lol
print("{}%".format(percentComplete))
percentComplete+=1
seqId = result['_source']['seqId']
# masterSol = int(result['_source']['masterSol'])
masterSol = int(result['_source'].get('masterSol',"0"))
uniqueID = 'sol' + str(masterSol)+'_' + seqId
# initialize a new entry in the temporary submasters dict for this submaster sequence
keepSeqId = True
seqIdDict = {}
# print("Am I getting data?", masterSol)
# Skip all EKO's sub_00000; this should never happen so if it does, please warn user
if seqId == 'sub_00000':
print('')
print('ERROR: Found an unexpected sub_00000; this should not be possible with the query. It will be ignored.')
print('')
keepSeqId = False
continue
# the user can define keep out sols, such as Conjunction or holiday plannning. Immediately ignore these sols from analysis as they will skew our data.
elif masterSol in keepOutSols:
if verbose:
print('')
print('ERROR: Submaster ' + seqId + ' on sol' + str(masterSol) +' falls in the user defined keepOutSols. It will be ignored.')
print('')
keepSeqId = False
numKeepOutSolsErrors += 1
continue
else:
try:
# calculate and initialize the planned duration fields
seqIdDict['seqId'] = seqId
seqIdDict['masterSol'] = masterSol
seqIdDict['backboneType'] = []
seqIdDict['planTotalDur'] = result['_source']['Duration']
seqIdDict['planMarginDur'] = 0
seqIdDict['uniqueID'] = uniqueID
# calculate and initialize the seqgen duration fields
#seqIdDict['totalSeqgenDuration'] = result['_source']['seqgenDuration']
#seqIdDict['totalSeqgenDurationMinutes'] = round(result['_source']['seqgenDuration']/60, 2)
except:
if verbose:
print('')
print('ERROR: Could not identify Duration field for the submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numSubDatabaseErrors+=1
continue
# loop through children to identify the backbone type,
marginsFound = 0
# if we find a margin, query for it's duration
for ii, child in enumerate(result['_source']['Children']):
# see if this child has margin in its string identifier
if 'margin' in child.lower():
# there is a templated activity called: APXS Short Standalone with margin + cleanup
# If it is that ignore it
if 'apxs' in child.lower():
seqIdDict['backboneType'].append('unidentified')
else:
marginsFound+=1
# if margin is in the name, identify and extract the id
idRegex = r"\(sol\d{5}_tap_end_of_sol_.{22}\)$"
idMatch = re.search(idRegex, child)
# if you can successfully identify the id, then break it out, else print error message
if idMatch:
#if you need the name it is here:
childName = child[:idMatch.start()]
if childName not in marginNamesSanityCheck:
marginNamesSanityCheck.append(childName)
#grab the child Id, remove the parentheses, so we can identify it in the database
childId = child[idMatch.start()+1:idMatch.end()-1]
#get margin information with a direct query
marginEntry = es.get(id=childId, index=index)
try:
#store the margin duration as a running sum (for when there are multiple margins associated with a single submaster)
seqIdDict['planMarginDur'] += marginEntry['_source']['Duration']
continue
except:
if verbose:
print('')
print('ERROR: Could not identify a duration for the identified margin activity for submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results.')
print('Margin activity results were: ')
print(marginEntry)
| main | identifier_name | |
transaction.component.ts | <any>;
dataExpense: Array<any>;
dataDebtLoan: Array<any>;
// hiện thị phần thêm chi tiết
public adddetail = true;
// KHỞI TẠO CÁC BIẾN VỊ TRÍ
lat: number = 10.812035;
lng: number = 106.7119887
zoom: number = 14;
// DANH SÁCH TẤT CẢ CÁC ĐỊA ĐIỂM
allPlace: any[] = [];
// OBJCET ĐỊA ĐIỂM
objLocation = {
lat: 10.812035,
lng: 106.7119887,
name: "Đặt vị trí",
}
dataWallets: Array<any>;
infoCheckMoney: any = {};
public modalCheckMoney: NgbModalRef;
titleTransaction: String = "Thêm Giao Dịch";
nameButtonTransaction: String = "Thêm Giao Dịch";
dateCurrent = new Date();
nameWallet: String = '';
// TRANSACTION DEFAULT
transaction: ITransaction = {
groupcategory: '',
idcategory: '',
datecreatetransaction: new Date().toDateString(),
moneytransaction: '',
imagecategory: 'default',
categorytransaction: 'Chọn Danh Mục',
idwallet: '',
}
// URL HÌNH ẢNH
public url: String = '';
private fileToUpload: File = null;
ngOnInit() {
// LẤY TẤT CẢ CÁC VÍ HIỂN THỊ LÊN
this.getDataWallets();
// LẤY TOẠ ĐỘ Ở VỊ TRÍ HIỆN TẠI
this.setCurrentPosition();
}
constructor(private FomatDateService: FomatDateService,
private WalletService: WalletService,
private modalService: NgbModal,
private checkvalue: CheckValueSevice,
private TransactionService: TransactionService,
private ActivatedRoute: ActivatedRoute,
private GooleMapsService: GooleMapsService,
public toastr: ToastsManager,
vcr: ViewContainerRef,
) {
this.toastr.setRootViewContainerRef(vcr);
// LẤY TÊN VÍ HIỆN THỊ LÊN GIAO DIỆN
this.paramIdWalletURL();
// PHẦN CHỨC NĂNG TAG USER
let thisglob = this;
window.onload = function () {
$('#taguser').tagEditor({
autocomplete: {
delay: 0.15,
position: { collision: 'flip' },
source: ['ActionScript', 'AppleScript', 'Asp', 'BASIC', 'C', 'C++', 'CSS', 'Clojure', 'COBOL', 'ColdFusion', 'Erlang', 'Fortran', 'Groovy', 'Haskell', 'HTML', 'Java', 'JavaScript', 'Lisp', 'Perl', 'PHP', 'Python', 'Ruby', 'Scala', 'Scheme']
},
forceLowercase: false,
placeholder: 'Với',
onChange: (field, editor, tags) => {
thisglob.transaction.taguser = tags;
}
});
}
}
// LẤY FILE
onSelectFile(event) {
if (event.target.files && event.target.files[0]) {
var reader = new FileReader();
this.fileToUpload = event.target.files[0];
reader.readAsDataURL(event.target.files[0]);
reader.onload = (event: any) => {
this.url = event.target.result;
}
}
}
// HÀM LẤY DATA TẤT CÁ CẢ VÍ
getDataWallets() {
this.WalletService.getDataWallets();
this.WalletService.getAllWallet.subscribe((wallet) => {
this.dataWallets = wallet;
})
}
changeMoneyWallet() {
let obj = {
_id: this.transaction.idwallet,
money: this.infoCheckMoney.moneytrnasction,
namewallet: this.infoCheckMoney.namewallet
}
this.WalletService.updateDataWallet(obj)
.then((result) => {
this.modalCheckMoney.close();
// CHỈNH SỬA XONG CẬP NHẬT LẠI GIAO DIỆN MỚI
this.reloadData();
this.toastr.success('Điều chỉnh số tiền trong ví thành công ! ', 'Success ! ');
});
}
changeMoneyTransaction() {
this.transaction.moneytransaction = this.infoCheckMoney.moneywallet;
this.modalCheckMoney.close();
}
// SUMMIT GỬI GIAO DỊCH
submitTransaction(modalCheckMoney) {
if (this.transaction.groupcategory == '') {
this.toastr.warning('Vui lòng chọn category ! ', 'Cảnh báo ! ');
} else if (this.transaction.moneytransaction == '') {
this.toastr.warning('Vui lòng nhập số tiền vào ! ', 'Cảnh báo ! ');
} else if (isNaN(Number.parseInt(this.transaction.moneytransaction.toString()))) {
this.toastr.warning('Số tiền phải là 1 số ! ', 'Waring ! ');
} else {
let checkMoney = true;
if (this.transaction.groupcategory == "expense") {
this.dataWallets.forEach((wallet) => {
if (wallet._id == this.transaction.idwallet) {
if ((Number.parseInt(this.transaction.moneytransaction.toString())) > wallet.money) {
this.infoCheckMoney['moneywallet'] = wallet.money;
this.infoCheckMoney['moneytrnasction'] = this.transaction.moneytransaction;
checkMoney = false;
}
}
})
}
if (checkMoney == true) {
// thay đổi dấu
if (this.transaction.groupcategory == "income" || this.transaction.groupcategory == "debt") {
if (Number(this.transaction.moneytransaction) < 0) {
this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
if (this.transaction.groupcategory == "expense" || this.transaction.groupcategory == "loan") {
if (Number(this.transaction.moneytransaction) > 0) { | this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
// tạo một giao dịch
this.TransactionService.createTransaction(this.transaction)
.then((result) => {
// upload hình ảnh
if (this.fileToUpload != null) {
this.TransactionService.uploadImage(result._id, this.fileToUpload)
.then((data) => {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
})
} else {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
}
})
.catch((err) => {
this.toastr.error(err, 'Thất bại ! ');
})
} else {
this.modalCheckMoney = this.modalService.open(modalCheckMoney, { windowClass: 'modalCheckMoney' });
}
}
}
// CHỌN THU NHẬP, CHI TIÊU, HAY NỢ
chooseCategory(event) {
this.transaction.groupcategory = event.detect;
this.transaction.imagecategory = event.image;
this.transaction.categorytransaction = event.name;
this.transaction.idcategory = event._id;
if (this.transaction.groupcategory == 'income') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Thu Nhập';
} else if (this.transaction.groupcategory == 'expense') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Chi Tiêu';
} else if (this.transaction.groupcategory == 'debt-loan') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Nợ/Vay';
}
}
// XOÁ HÌNH ẢNH
deleteImage() {
this.url = null;
this.fileToUpload = null;
}
// KHI USER CHỌN NGÀY
changeDate(event) {
this.dateCurrent = new Date(event.value.toDateString());
this.transaction.datecreatetransaction = new Date(event.value.toDateString()).toString();
}
// LẤY 1 VÍ CÓ ID LÀ
paramIdWalletURL() {
//LẤY ID WALLET TỪ URL
this.ActivatedRoute.paramMap
.subscribe((params) => {
if (params['params'].idwallet != undefined) {
this.WalletService.getDataWalletId(params['params'].idwallet).then((data) => {
this.nameWallet = data.namewallet;
this.infoCheckMoney['namewallet'] = data.namewallet;
this.transaction.idwallet = data._id;
})
.catch((err) => { })
}
})
}
// LẤY DỮ LIỆU KHI NGƯỜI DÙNG CHỌN VÍ NÀO
outputIdWallet(event) {
this.nameWallet = event.namewallet;
this.infoCheckMoney['namewallet'] = event.namewallet;
this.transaction.idwallet = event._id;
}
// LOAD LẠI DATA
reloadData() {
let urlIdWallet = | random_line_split | |
transaction.component.ts | : GooleMapsService,
public toastr: ToastsManager,
vcr: ViewContainerRef,
) {
this.toastr.setRootViewContainerRef(vcr);
// LẤY TÊN VÍ HIỆN THỊ LÊN GIAO DIỆN
this.paramIdWalletURL();
// PHẦN CHỨC NĂNG TAG USER
let thisglob = this;
window.onload = function () {
$('#taguser').tagEditor({
autocomplete: {
delay: 0.15,
position: { collision: 'flip' },
source: ['ActionScript', 'AppleScript', 'Asp', 'BASIC', 'C', 'C++', 'CSS', 'Clojure', 'COBOL', 'ColdFusion', 'Erlang', 'Fortran', 'Groovy', 'Haskell', 'HTML', 'Java', 'JavaScript', 'Lisp', 'Perl', 'PHP', 'Python', 'Ruby', 'Scala', 'Scheme']
},
forceLowercase: false,
placeholder: 'Với',
onChange: (field, editor, tags) => {
thisglob.transaction.taguser = tags;
}
});
}
}
// LẤY FILE
onSelectFile(event) {
if (event.target.files && event.target.files[0]) {
var reader = new FileReader();
this.fileToUpload = event.target.files[0];
reader.readAsDataURL(event.target.files[0]);
reader.onload = (event: any) => {
this.url = event.target.result;
}
}
}
// HÀM LẤY DATA TẤT CÁ CẢ VÍ
getDataWallets() {
this.WalletService.getDataWallets();
this.WalletService.getAllWallet.subscribe((wallet) => {
this.dataWallets = wallet;
})
}
changeMoneyWallet() {
let obj = {
_id: this.transaction.idwallet,
money: this.infoCheckMoney.moneytrnasction,
namewallet: this.infoCheckMoney.namewallet
}
this.WalletService.updateDataWallet(obj)
.then((result) => {
this.modalCheckMoney.close();
// CHỈNH SỬA XONG CẬP NHẬT LẠI GIAO DIỆN MỚI
this.reloadData();
this.toastr.success('Điều chỉnh số tiền trong ví thành công ! ', 'Success ! ');
});
}
changeMoneyTransaction() {
this.transaction.moneytransaction = this.infoCheckMoney.moneywallet;
this.modalCheckMoney.close();
}
// SUMMIT GỬI GIAO DỊCH
submitTransaction(modalCheckMoney) {
if (this.transaction.groupcategory == '') {
this.toastr.warning('Vui lòng chọn category ! ', 'Cảnh báo ! ');
} else if (this.transaction.moneytransaction == '') {
this.toastr.warning('Vui lòng nhập số tiền vào ! ', 'Cảnh báo ! ');
} else if (isNaN(Number.parseInt(this.transaction.moneytransaction.toString()))) {
this.toastr.warning('Số tiền phải là 1 số ! ', 'Waring ! ');
} else {
let checkMoney = true;
if (this.transaction.groupcategory == "expense") {
this.dataWallets.forEach((wallet) => {
if (wallet._id == this.transaction.idwallet) {
if ((Number.parseInt(this.transaction.moneytransaction.toString())) > wallet.money) {
this.infoCheckMoney['moneywallet'] = wallet.money;
this.infoCheckMoney['moneytrnasction'] = this.transaction.moneytransaction;
checkMoney = false;
}
}
})
}
if (checkMoney == true) {
// thay đổi dấu
if (this.transaction.groupcategory == "income" || this.transaction.groupcategory == "debt") {
if (Number(this.transaction.moneytransaction) < 0) {
this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
if (this.transaction.groupcategory == "expense" || this.transaction.groupcategory == "loan") {
if (Number(this.transaction.moneytransaction) > 0) {
this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
// tạo một giao dịch
this.TransactionService.createTransaction(this.transaction)
.then((result) => {
// upload hình ảnh
if (this.fileToUpload != null) {
this.TransactionService.uploadImage(result._id, this.fileToUpload)
.then((data) => {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
})
} else {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
}
})
.catch((err) => {
this.toastr.error(err, 'Thất bại ! ');
})
} else {
this.modalCheckMoney = this.modalService.open(modalCheckMoney, { windowClass: 'modalCheckMoney' });
}
}
}
// CHỌN THU NHẬP, CHI TIÊU, HAY NỢ
chooseCategory(event) {
this.transaction.groupcategory = event.detect;
this.transaction.imagecategory = event.image;
this.transaction.categorytransaction = event.name;
this.transaction.idcategory = event._id;
if (this.transaction.groupcategory == 'income') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Thu Nhập';
} else if (this.transaction.groupcategory == 'expense') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Chi Tiêu';
} else if (this.transaction.groupcategory == 'debt-loan') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Nợ/Vay';
}
}
// XOÁ HÌNH ẢNH
deleteImage() {
this.url = null;
this.fileToUpload = null;
}
// KHI USER CHỌN NGÀY
changeDate(event) {
this.dateCurrent = new Date(event.value.toDateString());
this.transaction.datecreatetransaction = new Date(event.value.toDateString()).toString();
}
// LẤY 1 VÍ CÓ ID LÀ
paramIdWalletURL() {
//LẤY ID WALLET TỪ URL
this.ActivatedRoute.paramMap
.subscribe((params) => {
if (params['params'].idwallet != undefined) {
this.WalletService.getDataWalletId(params['params'].idwallet).then((data) => {
this.nameWallet = data.namewallet;
this.infoCheckMoney['namewallet'] = data.namewallet;
this.transaction.idwallet = data._id;
})
.catch((err) => { })
}
})
}
// LẤY DỮ LIỆU KHI NGƯỜI DÙNG CHỌN VÍ NÀO
outputIdWallet(event) {
this.nameWallet = event.namewallet;
this.infoCheckMoney['namewallet'] = event.namewallet;
this.transaction.idwallet = event._id;
}
// LOAD LẠI DATA
reloadData() {
let urlIdWallet = (this.ActivatedRoute.snapshot.params.idwallet == undefined) ? '' : this.ActivatedRoute.snapshot.params.idwallet;
// LOAD LẠI CẬP NHẬT BÁO CÁO
this.TransactionService.getTransactions(urlIdWallet);
// LOAD CẬP NHẬT LẠI TẤT CẢ CÁC VÍ
this.WalletService.getDataWallets();
}
// RESET DATA
resetData() {
this.titleTransaction = "Thêm Giao Dịch";
this.nameButtonTransaction = "Thêm Giao Dịch";
this.transaction = {
idcategory: '',
groupcategory: '',
notetransaction: '',
datecreatetransaction: new Date().toDateString(),
moneytransaction: '',
imagecategory: 'default',
categorytransaction: 'Chọn Danh Mục',
idwallet: '',
}
// RESET TẤT CẢ CÁC TAGS
if(this.transaction.taguser != null){
let tags = $('#taguser').tagEditor('getTags')[0].tags;
for (let i = 0; i < tags.length; i++) {
$('#taguser').tagEditor('removeTag', tags[i]);
}
}
this.url = null;
this.fileToUpload = null;
delete this.transaction.location;
this.objLocation.name = "Đặt vị trí";
// RESET WALLET
this.paramIdWalletURL();
// RESET IMAGE
this.url = null;
this.fileToUpload = null;
}
private setCurrentPosition() {
if ("geolocation" in navigator) {
navigator.geolocation.getCurrentPosition((position) => {
this.lat = position.coords.latitude;
this.lng = position.coords.longitude;
this.zoom = 14;
});
}
}
/ | / MỞ MODAL CHỌN ĐỊA ĐIỂM GOOGLE MAP
open(content) {
this.GooleMapsService.getPlaceNear(this.lat, this.lng).then((data) => {
this.allPlace = data.results;
})
this.modalService.open(content);
}
// SUBMIT ĐỊA ĐIỂM
submitLocation(pla | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.