file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
ahrs_serv.py | """ AHRS - Madgwicks, basico
Este codigo se conecta por el bus de I2C del Raspberry PI modelo 2 al IMU10 de Adafruit, y usa los datos de los sensores para
alimentar una implementacion del filtro de Madgwicks que retorna la orientacion en quaterniones del sensor (que son transformadas a Angulos
de Euler). Luego lo enivia por tcp/ip a una computadora que grafica el resultado.
"""
# Funciones de comunicacion
def get_interfaces():
""" (Python 3) Funcion que devuelve una lista con strings de todos las interfaces de red que tenga tu computadora
*NOTA: Solo funciona en Linux
get_ifaces()
['enp3s0', 'vmnet1', 'vmnet8', 'wlp2s0', ' lo']"""
with open('/proc/net/dev','r') as f: #Abrimos el archivo con la informacion de red
interfaces = []
for linea in f:
if ':' in linea:
interfaces.append(linea[:linea.find(':')]) #Extraemos los primeros caracteres de las lineas con informacion de las interfaces
return [iface.lstrip().rstrip() for iface in interfaces]
def get_ip_address2(ifname):
""" (Python 2)Funcion que recibe un string con el nombre de una interfaz de red y devuelve
un string con la direccion IP de la interfaz, o None si dicha interfaz no
tiene direccion IP asignada.
get_ip_address('wlp2s0')
'192.168.1.4' """
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except:
return None
def get_network_config2():
""" (Python 2) Funcion que devuelve un diccionario con las interfaces de red de la computadora y sus respectivas direcciones
ip. """
interfaces = get_interfaces()
ips = [get_ip_address2(ip) for ip in interfaces]
return dict(zip(interfaces,ips))
# Funciones que configuran los sensores
def accel_setup():
global ahrs
global accel_addr
ahrs.write_byte_data(accel_addr,0x23,0x88) #Prendemos alta resolucion y hold de update de los registros de salida en el reg 23h
ahrs.write_byte_data(accel_addr,0x20,0x27) #sacamos el accelerometro del shutdown mode
def magn_setup():
global ahrs
global magn_addr
ahrs.write_byte_data(magn_addr,0x00,0x10) #Seteamos la velocidad de las mediciones a 15Hz
ahrs.write_byte_data(magn_addr,0x01,0x20) #Ponemos la escala +-1.3g
ahrs.write_byte_data(magn_addr,0x02,0x00) #Prendemos el magnetometro
def gyro_setup():
global ahrs
global gyro_addr
ahrs.write_byte_data(gyro_addr,0x20,0x8F) #DataRate 400Hz, BW 20Hz, All Axis enabled, Gyro ON
ahrs.write_byte_data(gyro_addr,0x23,0xA0) #Escala 2000dps, BlockUpdates
ahrs.write_byte_data(gyro_addr,0x24,0x02) #OutSel = 10h, use HPF and LPF2, HPen = 0.
# Funciones que sacan los valores de los sensores.
def accel_read():
global ahrs
global accel_addr
accel_data = [0,0,0]
##Sacamos los datos de acceleracion de los 3 ejes
#Eje X
xl = format(ahrs.read_byte_data(accel_addr,0x28), '#010b')[2:6]
xh = format(ahrs.read_byte_data(accel_addr,0x29), '#010b')[2:]
#Eje Y
yl = format(ahrs.read_byte_data(accel_addr,0x2A), '#010b')[2:6]
yh = format(ahrs.read_byte_data(accel_addr,0x2B), '#010b')[2:]
#Eje Z
zl = format(ahrs.read_byte_data(accel_addr,0x2C), '#010b')[2:6]
zh = format(ahrs.read_byte_data(accel_addr,0x2D), '#010b')[2:]
## Combinamos juntos los 2 bytes.
accel_data[0] = int('0b' + xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1)) #Eje X #Unimos los bytes en complemento a 2
accel_data[1] = int('0b' + yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1)) #Eje Y #Unimos los bytes en complemento a 2
accel_data[2] = int('0b' + zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1)) #Eje Z #Unimos los bytes en complemento a 2
#Normalizamos el vector antes de retornarlo
norma = np.linalg.norm(accel_data)
accel_data = list(map(lambda x: x/norma,accel_data))
return accel_data
def magn_read():
global ahrs
global magn_addr
magn_data = [0,0,0]
##Sacamos los datos de campo magnetico de los 3 ejes
#Eje X
xh = ahrs.read_byte_data(magn_addr,0x03)
xl = ahrs.read_byte_data(magn_addr,0x04)
#Eje Y
yh = ahrs.read_byte_data(magn_addr,0x07)
yl = ahrs.read_byte_data(magn_addr,0x08)
#Eje Z
zh = ahrs.read_byte_data(magn_addr,0x05)
zl = ahrs.read_byte_data(magn_addr,0x06)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
magn_data[0] = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
magn_data[1] = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
magn_data[2] = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Escalamos los datos
magn_data[0] = (magn_data[0] - 35.0) * 1.0
magn_data[1] = (magn_data[1] + 35.0) * 1.02702702703
magn_data[2] = (magn_data[2] - 3.0) * 0.974358974359
#Normalizamos el vector
norma = np.linalg.norm(magn_data)
magn_data = list(map(lambda x: x/norma,magn_data))
return magn_data
def gyro_read():
global ahrs
global gyro_addr
gyro_data = [0,0,0]
#Eje X
xh = ahrs.read_byte_data(gyro_addr,0x29)
xl = ahrs.read_byte_data(gyro_addr,0x28)
#Eje Y
yh = ahrs.read_byte_data(gyro_addr,0x2B)
yl = ahrs.read_byte_data(gyro_addr,0x2A)
#Eje Z
zh = ahrs.read_byte_data(gyro_addr,0x2D)
zl = ahrs.read_byte_data(gyro_addr,0x2C)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
x = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
y = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
z = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Calculamos los grados por segundo (para 2000dps)
gyro_data[0] = float(x)*70/1000
gyro_data[1] = float(y)*70/1000
gyro_data[2] = float(z)*70/1000
#Transformamos los datos a radianes/seg
gyro_data = list(map(math.radians, gyro_data))
return gyro_data
def madgwicks_filter(accel_datas, magn_datas, gyro_datas, deltat):
global SEq
global b_x
global b_z
global w_b
global beta
global zeta
# print "accel = {}".format(accel_datas)
# print "magn = {}".format(magn_datas)
# print "gyro = {}".format(gyro_datas)
# print "deltat = {}".format(deltat)
# print SEq
# print b_x
# print w_b
# print beta
#axulirary variables to avoid reapeated calcualtions
halfSEq_1 = 0.5 * SEq[0]
halfSEq_2 = 0.5 * SEq[1]
halfSEq_3 = 0.5 * SEq[2]
halfSEq_4 = 0.5 * SEq[3]
twoSEq_1 = 2.0 * SEq[0]
twoSEq_2 = 2.0 * SEq[1]
twoSEq_3 = 2.0 * SEq[2]
twoSEq_4 = 2.0 * SEq[3]
twob_x = 2.0 * b_x
twob_z = 2.0 * b_z
twob_xSEq_1 = 2.0 * b_x * SEq[0]
twob_xSEq_2 = 2.0 * b_x * SEq[1]
twob_xSEq_3 = 2.0 * b_x * SEq[2]
twob_xSEq_4 = 2.0 * b_x * SEq[3]
twob_zSEq_1 = 2.0 * b_z * SEq[0]
twob_zSEq_2 = 2.0 * b_z * SEq[1]
twob_zSEq_3 = 2.0 * b_z * SEq[2]
twob_zSEq_4 = 2.0 * b_z * SEq[3]
SEq_1SEq_2 = SEq[0] * SEq[1]
SEq_1SEq_3 = SEq[0] * SEq[2]
SEq_1SEq_4 = SEq[0] * SEq[3]
SEq_2SEq_3 = SEq[1] * SEq[2]
SEq_2SEq_4 = SEq[1] * SEq[3]
SEq_3SEq_4 = SEq[2] * SEq[3]
twom_x = 2.0 * magn_datas[0]
twom_y = 2.0 * magn_datas[1]
twom_z = 2.0 * magn_datas[2]
# compute the objective function and Jacobian
f_1 = twoSEq_2 * SEq[3] - twoSEq_1 * SEq[2] - accel_datas[0]
f_2 = twoSEq_1 * SEq[1] + twoSEq_3 * SEq[3] - accel_datas[1]
f_3 = 1.0 - twoSEq_2 * SEq[1] - twoSEq_3 * SEq[2] - accel_datas[2]
f_4 = twob_x * (0.5 - SEq[2] * SEq[2] - SEq[3] * SEq[3]) + twob_z * (SEq_2SEq_4 - SEq_1SEq_3) - magn_datas[0]
f_5 = twob_x * (SEq[1] * SEq[2] - SEq[0] * SEq[3]) + twob_z * (SEq[0] * SEq[1] + SEq[2] * SEq[3]) - magn_datas[1]
f_6 = twob_x * (SEq_1SEq_3 + SEq_2SEq_4) + twob_z * (0.5 - SEq[1] * SEq[1] - SEq[2] * SEq[2]) - magn_datas[2]
J_11or24 = twoSEq_3 # J_11 negated in matrix multiplication
J_12or23 = 2.0 * SEq[3]
J_13or22 = twoSEq_1 # J_12 negated in matrix multiplication
J_14or21 = twoSEq_2
J_32 = 2.0 * J_14or21 # negated in matrix multiplication
J_33 = 2.0 * J_11or24 # negated in matrix multiplication
J_41 = twob_zSEq_3 # negated in matrix multiplication
J_42 = twob_zSEq_4
J_43 = 2.0 * twob_xSEq_3 + twob_zSEq_1 # negated in matrix multiplication
J_44 = 2.0 * twob_xSEq_4 - twob_zSEq_2 # negated in matrix multiplication
J_51 = twob_xSEq_4 - twob_zSEq_2 # negated in matrix multiplication
J_52 = twob_xSEq_3 + twob_zSEq_1
J_53 = twob_xSEq_2 + twob_zSEq_4
J_54 = twob_xSEq_1 - twob_zSEq_3 # negated in matrix multiplication
J_61 = twob_xSEq_3
J_62 = twob_xSEq_4 - 2.0 * twob_zSEq_2
J_63 = twob_xSEq_1 - 2.0 * twob_zSEq_3
J_64 = twob_xSEq_2
#print "f_1 = {} f_2 = {} f_3 = {} f_4 = {} f_5 = {} f_6 = {}".format(f_1,f_2,f_3,f_4,f_5,f_6)
# print "J_64 = {} J_63 = {} J_62 = {} J_61 = {} J_54 = {} J_53 = {} J_52 = {} J_51 = {} J_44 = {} J_43 = {} J_42 = {} J_41 = {}".format(J_64,J_63,J_62,J_61,J_54,J_53,J_52,J_51,J_44,J_43,J_42,J_41)
# compute the gradient (matrix multiplication)
SEqHatDot_1 = J_14or21 * f_2 - J_11or24 * f_1 - J_41 * f_4 - J_51 * f_5 + J_61 * f_6
SEqHatDot_2 = J_12or23 * f_1 + J_13or22 * f_2 - J_32 * f_3 + J_42 * f_4 + J_52 * f_5 + J_62 * f_6
SEqHatDot_3 = J_12or23 * f_2 - J_33 * f_3 - J_13or22 * f_1 - J_43 * f_4 + J_53 * f_5 + J_63 * f_6
SEqHatDot_4 = J_14or21 * f_1 + J_11or24 * f_2 - J_44 * f_4 - J_54 * f_5 + J_64 * f_6
###
# print SEqHatDot_1
# print SEqHatDot_2
# print SEqHatDot_3
# print SEqHatDot_4
# print
# normalise the gradient to estimate direction of the gyroscope error
norm = math.sqrt(SEqHatDot_1**2 + SEqHatDot_2**2 + SEqHatDot_3**2 + SEqHatDot_4**2)
SEqHatDot_1 = SEqHatDot_1 / norm
SEqHatDot_2 = SEqHatDot_2 / norm
SEqHatDot_3 = SEqHatDot_3 / norm
SEqHatDot_4 = SEqHatDot_4 / norm
###
# print "SEqHatDot_1: {} SEqHatDot_2: {} SEqHatDot_3: {} SEqHatDot_4: {}".format(SEqHatDot_1,SEqHatDot_2,SEqHatDot_3,SEqHatDot_4)
# compute angular estimated direction of the gyroscope error
w_err_x = twoSEq_1 * SEqHatDot_2 - twoSEq_2 * SEqHatDot_1 - twoSEq_3 * SEqHatDot_4 + twoSEq_4 * SEqHatDot_3
w_err_y = twoSEq_1 * SEqHatDot_3 + twoSEq_2 * SEqHatDot_4 - twoSEq_3 * SEqHatDot_1 - twoSEq_4 * SEqHatDot_2
w_err_z = twoSEq_1 * SEqHatDot_4 - twoSEq_2 * SEqHatDot_3 + twoSEq_3 * SEqHatDot_2 - twoSEq_4 * SEqHatDot_1
# print "w_err_x: {}, w_err_y:{}, w_err_z:{}".format(w_err_x, w_err_y, w_err_z)
# print "zeta: {}".format(zeta)
# print "deltat: {}".format(deltat)
# compute and remove the gyroscope baises
# print "w_b1: {}".format(w_b)
w_b[0] += w_err_x * deltat * zeta
w_b[1] += w_err_y * deltat * zeta
w_b[2] += w_err_z * deltat * zeta
# print "w_b2: {}".format(w_b)
gyro_datas[0] -= w_b[0]
gyro_datas[1] -= w_b[1]
gyro_datas[2] -= w_b[2]
###
# compute the quaternion rate measured by gyroscopes
SEqDot_omega_1 = -halfSEq_2 * gyro_datas[0] - halfSEq_3 * gyro_datas[1] - halfSEq_4 * gyro_datas[2]
SEqDot_omega_2 = halfSEq_1 * gyro_datas[0] + halfSEq_3 * gyro_datas[2] - halfSEq_4 * gyro_datas[1]
SEqDot_omega_3 = halfSEq_1 * gyro_datas[1] - halfSEq_2 * gyro_datas[2] + halfSEq_4 * gyro_datas[0]
SEqDot_omega_4 = halfSEq_1 * gyro_datas[2] + halfSEq_2 * gyro_datas[1] - halfSEq_3 * gyro_datas[0]
# compute then integrate the estimated quaternion rate
SEq[0] += (SEqDot_omega_1 - (beta * SEqHatDot_1)) * deltat
SEq[1] += (SEqDot_omega_2 - (beta * SEqHatDot_2)) * deltat
SEq[2] += (SEqDot_omega_3 - (beta * SEqHatDot_3)) * deltat
SEq[3] += (SEqDot_omega_4 - (beta * SEqHatDot_4)) * deltat
# Normalizamos los quaterniones
norm = np.linalg.norm(SEq)
SEq = map(lambda x: x/norm,SEq)
# compute flux in the earth frame
SEq_1SEq_2 = SEq[0] * SEq[1] # recompute axulirary variables
SEq_1SEq_3 = SEq[0] * SEq[2]
SEq_1SEq_4 = SEq[0] * SEq[3]
SEq_3SEq_4 = SEq[2] * SEq[3]
SEq_2SEq_3 = SEq[1] * SEq[2]
SEq_2SEq_4 = SEq[1] * SEq[3]
h_x = twom_x * (0.5 - SEq[2] * SEq[2] - SEq[3] * SEq[3]) + twom_y * (SEq_2SEq_3 - SEq_1SEq_4) + twom_z * (SEq_2SEq_4 + SEq_1SEq_3)
h_y = twom_x * (SEq_2SEq_3 + SEq_1SEq_4) + twom_y * (0.5 - SEq[1] * SEq[1] - SEq[3] * SEq[3]) + twom_z * (SEq_3SEq_4 - SEq_1SEq_2)
h_z = twom_x * (SEq_2SEq_4 - SEq_1SEq_3) + twom_y * (SEq_3SEq_4 + SEq_1SEq_2) + twom_z * (0.5 - SEq[1] * SEq[1] - SEq[2] * SEq[2])
# normalise the flux vector to have only components in the x and z
b_x = math.sqrt((h_x * h_x) + (h_y * h_y))
b_z = h_z
def Quat_to_Euler(quater):
euler = [0,0,0]
euler[0] = math.atan2(2*(quater[0]*quater[1] + quater[2]*quater[3]),quater[0]*quater[0] - quater[1]*quater[1] - quater[2]*quater[2] + quater[3]*quater[3])
euler[1] = math.asin(-2*((quater[0]*quater[2] - quater[1]*quater[3]))/(quater[0]*quater[0] + quater[1]*quater[1] + quater[2]*quater[2] + quater[3]*quater[3]))
euler[2] = math.atan2(2*(quater[1]*quater[2] + quater[0]*quater[3]),-quater[0]*quater[0] - quater[1]*quater[1] + quater[2]*quater[2] + quater[3]*quater[3])
euler = map(math.degrees,euler)
return euler
import smbus
import time
import numpy as np
import math
import socket
import fcntl
import struct
#Analizamos la red para encontrar el ip correcto
inter_faces = get_network_config2()
if inter_faces['eth0'] == None: #Le damos prioridad a la conexion ethernet
host = inter_faces['wlan0']
tarjeta = 'wlan0'
else:
host = inter_faces['eth0']
tarjeta = 'eth0'
print("Intentando establecer conexion en interfaz {} con la direccion ip {}".format(tarjeta, host))
#Establecemos la conexion
try:
port = 23322
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(1)
conn,addr = s.accept()
except:
s.close() #Si algo falla, cierra todo.
print("[-] ERROR = No se pudo establecer la conexion")
exit()
#Abrimos el puerto I2C
ahrs = smbus.SMBus(1)
#Definimos las direcciones de los sensores
gyro_addr = 0x6B
accel_addr = 0x19
magn_addr = 0x1E
#Variables globales
SEq = [0.0,0.0,0.0,1.0] #Quaterniones
b_x = 1 #Earth Flux
b_z = 0
w_b = [0,0,0] #Gyroscopic Bias Error
beta = math.sqrt(3.0/4.0)*math.radians(5) #gyro measurment error rad/s (5 deg/s)
zeta = math.sqrt(3.0/4.0)*math.radians(0.2) #gyro drift error rad/s/s (0.2 deg/s/s)
#Colocamos los valores de configuracion
accel_setup()
magn_setup()
gyro_setup()
#Leemos los datos de los sensores.
accel_data = accel_read()
magn_data = magn_read()
gyro_data = gyro_read()
#Variables de tiempo
time_new = 0
time_old = time.time()
#loop de control
while(1):
#sacamos medidas de sensores
| accel_data = accel_read()
magn_data = magn_read()
gyro_data = gyro_read()
#medimos tiempo
time_new = time.time()
#corremos el filtro
madgwicks_filter(accel_data, magn_data, gyro_data, time_new - time_old)
#Actualizamos el tiempo
time_old = time_new
#Calculamos los Angulos de Euler
Angulos = Quat_to_Euler(SEq)
#Imprimimos
print("Pitch: {:+.2f}deg Roll: {:+.2f}deg Yaw: {:+.2f}deg Quaternion:({:+.3f}, {:+.3f}, {:+.3f}, {:+.3f})".format(Angulos[0],Angulos[1],Angulos[2], SEq[0], SEq[1], SEq[2], SEq[3] ))
mensaje = "{:+.2f},{:+.2f},{:+.2f}\n".format(Angulos[0],Angulos[1],Angulos[2])
try:
conn.sendall(mensaje) #Enviamos por TCP la informacion
except:
s.close() #Si algo falla, cierra todo.
print("[-] ERROR = No se pudo mandar el paquete")
exit()
time.sleep(0.01)
# print("Accel:({:+.3f},{:+.3f},{:+.3f}) Magn:({:+.3f},{:+.3f},{:+.3f}) Gyro:({:+.3f},{:+.3f},{:+.3f})".format(accel_data[0],accel_data[1],accel_data[2],magn_data[0],magn_data[1],magn_data[2],gyro_data[0],gyro_data[1],gyro_data[2])) | conditional_block | |
ahrs_serv.py | """ AHRS - Madgwicks, basico
Este codigo se conecta por el bus de I2C del Raspberry PI modelo 2 al IMU10 de Adafruit, y usa los datos de los sensores para
alimentar una implementacion del filtro de Madgwicks que retorna la orientacion en quaterniones del sensor (que son transformadas a Angulos
de Euler). Luego lo enivia por tcp/ip a una computadora que grafica el resultado.
"""
# Funciones de comunicacion
def get_interfaces():
""" (Python 3) Funcion que devuelve una lista con strings de todos las interfaces de red que tenga tu computadora
*NOTA: Solo funciona en Linux
get_ifaces()
['enp3s0', 'vmnet1', 'vmnet8', 'wlp2s0', ' lo']"""
with open('/proc/net/dev','r') as f: #Abrimos el archivo con la informacion de red
interfaces = []
for linea in f:
if ':' in linea:
interfaces.append(linea[:linea.find(':')]) #Extraemos los primeros caracteres de las lineas con informacion de las interfaces
return [iface.lstrip().rstrip() for iface in interfaces]
def get_ip_address2(ifname):
""" (Python 2)Funcion que recibe un string con el nombre de una interfaz de red y devuelve
un string con la direccion IP de la interfaz, o None si dicha interfaz no
tiene direccion IP asignada.
get_ip_address('wlp2s0')
'192.168.1.4' """
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except:
return None
def get_network_config2():
""" (Python 2) Funcion que devuelve un diccionario con las interfaces de red de la computadora y sus respectivas direcciones
ip. """
interfaces = get_interfaces()
ips = [get_ip_address2(ip) for ip in interfaces]
return dict(zip(interfaces,ips))
# Funciones que configuran los sensores
def accel_setup():
global ahrs
global accel_addr
ahrs.write_byte_data(accel_addr,0x23,0x88) #Prendemos alta resolucion y hold de update de los registros de salida en el reg 23h
ahrs.write_byte_data(accel_addr,0x20,0x27) #sacamos el accelerometro del shutdown mode
def magn_setup():
global ahrs
global magn_addr
ahrs.write_byte_data(magn_addr,0x00,0x10) #Seteamos la velocidad de las mediciones a 15Hz
ahrs.write_byte_data(magn_addr,0x01,0x20) #Ponemos la escala +-1.3g
ahrs.write_byte_data(magn_addr,0x02,0x00) #Prendemos el magnetometro
def | ():
global ahrs
global gyro_addr
ahrs.write_byte_data(gyro_addr,0x20,0x8F) #DataRate 400Hz, BW 20Hz, All Axis enabled, Gyro ON
ahrs.write_byte_data(gyro_addr,0x23,0xA0) #Escala 2000dps, BlockUpdates
ahrs.write_byte_data(gyro_addr,0x24,0x02) #OutSel = 10h, use HPF and LPF2, HPen = 0.
# Funciones que sacan los valores de los sensores.
def accel_read():
global ahrs
global accel_addr
accel_data = [0,0,0]
##Sacamos los datos de acceleracion de los 3 ejes
#Eje X
xl = format(ahrs.read_byte_data(accel_addr,0x28), '#010b')[2:6]
xh = format(ahrs.read_byte_data(accel_addr,0x29), '#010b')[2:]
#Eje Y
yl = format(ahrs.read_byte_data(accel_addr,0x2A), '#010b')[2:6]
yh = format(ahrs.read_byte_data(accel_addr,0x2B), '#010b')[2:]
#Eje Z
zl = format(ahrs.read_byte_data(accel_addr,0x2C), '#010b')[2:6]
zh = format(ahrs.read_byte_data(accel_addr,0x2D), '#010b')[2:]
## Combinamos juntos los 2 bytes.
accel_data[0] = int('0b' + xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1)) #Eje X #Unimos los bytes en complemento a 2
accel_data[1] = int('0b' + yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1)) #Eje Y #Unimos los bytes en complemento a 2
accel_data[2] = int('0b' + zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1)) #Eje Z #Unimos los bytes en complemento a 2
#Normalizamos el vector antes de retornarlo
norma = np.linalg.norm(accel_data)
accel_data = list(map(lambda x: x/norma,accel_data))
return accel_data
def magn_read():
global ahrs
global magn_addr
magn_data = [0,0,0]
##Sacamos los datos de campo magnetico de los 3 ejes
#Eje X
xh = ahrs.read_byte_data(magn_addr,0x03)
xl = ahrs.read_byte_data(magn_addr,0x04)
#Eje Y
yh = ahrs.read_byte_data(magn_addr,0x07)
yl = ahrs.read_byte_data(magn_addr,0x08)
#Eje Z
zh = ahrs.read_byte_data(magn_addr,0x05)
zl = ahrs.read_byte_data(magn_addr,0x06)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
magn_data[0] = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
magn_data[1] = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
magn_data[2] = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Escalamos los datos
magn_data[0] = (magn_data[0] - 35.0) * 1.0
magn_data[1] = (magn_data[1] + 35.0) * 1.02702702703
magn_data[2] = (magn_data[2] - 3.0) * 0.974358974359
#Normalizamos el vector
norma = np.linalg.norm(magn_data)
magn_data = list(map(lambda x: x/norma,magn_data))
return magn_data
def gyro_read():
global ahrs
global gyro_addr
gyro_data = [0,0,0]
#Eje X
xh = ahrs.read_byte_data(gyro_addr,0x29)
xl = ahrs.read_byte_data(gyro_addr,0x28)
#Eje Y
yh = ahrs.read_byte_data(gyro_addr,0x2B)
yl = ahrs.read_byte_data(gyro_addr,0x2A)
#Eje Z
zh = ahrs.read_byte_data(gyro_addr,0x2D)
zl = ahrs.read_byte_data(gyro_addr,0x2C)
#Convertimos los resultados a binario para poder verlos
xl = format(xl, '#010b')[2:]
xh = format(xh, '#010b')[2:]
yl = format(yl, '#010b')[2:]
yh = format(yh, '#010b')[2:]
zl = format(zl, '#010b')[2:]
zh = format(zh, '#010b')[2:]
#Y aplicamos el complemento a 2 para conseguir el numero
x = int( xh[1:] + xl,2) - int(xh[0])*(2**(len(xh+xl)-1))
y = int( yh[1:] + yl,2) - int(yh[0])*(2**(len(yh+yl)-1))
z = int( zh[1:] + zl,2) - int(zh[0])*(2**(len(zh+zl)-1))
#Calculamos los grados por segundo (para 2000dps)
gyro_data[0] = float(x)*70/1000
gyro_data[1] = float(y)*70/1000
gyro_data[2] = float(z)*70/1000
#Transformamos los datos a radianes/seg
gyro_data = list(map(math.radians, gyro_data))
return gyro_data
def madgwicks_filter(accel_datas, magn_datas, gyro_datas, deltat):
global SEq
global b_x
global b_z
global w_b
global beta
global zeta
# print "accel = {}".format(accel_datas)
# print "magn = {}".format(magn_datas)
# print "gyro = {}".format(gyro_datas)
# print "deltat = {}".format(deltat)
# print SEq
# print b_x
# print w_b
# print beta
#axulirary variables to avoid reapeated calcualtions
halfSEq_1 = 0.5 * SEq[0]
halfSEq_2 = 0.5 * SEq[1]
halfSEq_3 = 0.5 * SEq[2]
halfSEq_4 = 0.5 * SEq[3]
twoSEq_1 = 2.0 * SEq[0]
twoSEq_2 = 2.0 * SEq[1]
twoSEq_3 = 2.0 * SEq[2]
twoSEq_4 = 2.0 * SEq[3]
twob_x = 2.0 * b_x
twob_z = 2.0 * b_z
twob_xSEq_1 = 2.0 * b_x * SEq[0]
twob_xSEq_2 = 2.0 * b_x * SEq[1]
twob_xSEq_3 = 2.0 * b_x * SEq[2]
twob_xSEq_4 = 2.0 * b_x * SEq[3]
twob_zSEq_1 = 2.0 * b_z * SEq[0]
twob_zSEq_2 = 2.0 * b_z * SEq[1]
twob_zSEq_3 = 2.0 * b_z * SEq[2]
twob_zSEq_4 = 2.0 * b_z * SEq[3]
SEq_1SEq_2 = SEq[0] * SEq[1]
SEq_1SEq_3 = SEq[0] * SEq[2]
SEq_1SEq_4 = SEq[0] * SEq[3]
SEq_2SEq_3 = SEq[1] * SEq[2]
SEq_2SEq_4 = SEq[1] * SEq[3]
SEq_3SEq_4 = SEq[2] * SEq[3]
twom_x = 2.0 * magn_datas[0]
twom_y = 2.0 * magn_datas[1]
twom_z = 2.0 * magn_datas[2]
# compute the objective function and Jacobian
f_1 = twoSEq_2 * SEq[3] - twoSEq_1 * SEq[2] - accel_datas[0]
f_2 = twoSEq_1 * SEq[1] + twoSEq_3 * SEq[3] - accel_datas[1]
f_3 = 1.0 - twoSEq_2 * SEq[1] - twoSEq_3 * SEq[2] - accel_datas[2]
f_4 = twob_x * (0.5 - SEq[2] * SEq[2] - SEq[3] * SEq[3]) + twob_z * (SEq_2SEq_4 - SEq_1SEq_3) - magn_datas[0]
f_5 = twob_x * (SEq[1] * SEq[2] - SEq[0] * SEq[3]) + twob_z * (SEq[0] * SEq[1] + SEq[2] * SEq[3]) - magn_datas[1]
f_6 = twob_x * (SEq_1SEq_3 + SEq_2SEq_4) + twob_z * (0.5 - SEq[1] * SEq[1] - SEq[2] * SEq[2]) - magn_datas[2]
J_11or24 = twoSEq_3 # J_11 negated in matrix multiplication
J_12or23 = 2.0 * SEq[3]
J_13or22 = twoSEq_1 # J_12 negated in matrix multiplication
J_14or21 = twoSEq_2
J_32 = 2.0 * J_14or21 # negated in matrix multiplication
J_33 = 2.0 * J_11or24 # negated in matrix multiplication
J_41 = twob_zSEq_3 # negated in matrix multiplication
J_42 = twob_zSEq_4
J_43 = 2.0 * twob_xSEq_3 + twob_zSEq_1 # negated in matrix multiplication
J_44 = 2.0 * twob_xSEq_4 - twob_zSEq_2 # negated in matrix multiplication
J_51 = twob_xSEq_4 - twob_zSEq_2 # negated in matrix multiplication
J_52 = twob_xSEq_3 + twob_zSEq_1
J_53 = twob_xSEq_2 + twob_zSEq_4
J_54 = twob_xSEq_1 - twob_zSEq_3 # negated in matrix multiplication
J_61 = twob_xSEq_3
J_62 = twob_xSEq_4 - 2.0 * twob_zSEq_2
J_63 = twob_xSEq_1 - 2.0 * twob_zSEq_3
J_64 = twob_xSEq_2
#print "f_1 = {} f_2 = {} f_3 = {} f_4 = {} f_5 = {} f_6 = {}".format(f_1,f_2,f_3,f_4,f_5,f_6)
# print "J_64 = {} J_63 = {} J_62 = {} J_61 = {} J_54 = {} J_53 = {} J_52 = {} J_51 = {} J_44 = {} J_43 = {} J_42 = {} J_41 = {}".format(J_64,J_63,J_62,J_61,J_54,J_53,J_52,J_51,J_44,J_43,J_42,J_41)
# compute the gradient (matrix multiplication)
SEqHatDot_1 = J_14or21 * f_2 - J_11or24 * f_1 - J_41 * f_4 - J_51 * f_5 + J_61 * f_6
SEqHatDot_2 = J_12or23 * f_1 + J_13or22 * f_2 - J_32 * f_3 + J_42 * f_4 + J_52 * f_5 + J_62 * f_6
SEqHatDot_3 = J_12or23 * f_2 - J_33 * f_3 - J_13or22 * f_1 - J_43 * f_4 + J_53 * f_5 + J_63 * f_6
SEqHatDot_4 = J_14or21 * f_1 + J_11or24 * f_2 - J_44 * f_4 - J_54 * f_5 + J_64 * f_6
###
# print SEqHatDot_1
# print SEqHatDot_2
# print SEqHatDot_3
# print SEqHatDot_4
# print
# normalise the gradient to estimate direction of the gyroscope error
norm = math.sqrt(SEqHatDot_1**2 + SEqHatDot_2**2 + SEqHatDot_3**2 + SEqHatDot_4**2)
SEqHatDot_1 = SEqHatDot_1 / norm
SEqHatDot_2 = SEqHatDot_2 / norm
SEqHatDot_3 = SEqHatDot_3 / norm
SEqHatDot_4 = SEqHatDot_4 / norm
###
# print "SEqHatDot_1: {} SEqHatDot_2: {} SEqHatDot_3: {} SEqHatDot_4: {}".format(SEqHatDot_1,SEqHatDot_2,SEqHatDot_3,SEqHatDot_4)
# compute angular estimated direction of the gyroscope error
w_err_x = twoSEq_1 * SEqHatDot_2 - twoSEq_2 * SEqHatDot_1 - twoSEq_3 * SEqHatDot_4 + twoSEq_4 * SEqHatDot_3
w_err_y = twoSEq_1 * SEqHatDot_3 + twoSEq_2 * SEqHatDot_4 - twoSEq_3 * SEqHatDot_1 - twoSEq_4 * SEqHatDot_2
w_err_z = twoSEq_1 * SEqHatDot_4 - twoSEq_2 * SEqHatDot_3 + twoSEq_3 * SEqHatDot_2 - twoSEq_4 * SEqHatDot_1
# print "w_err_x: {}, w_err_y:{}, w_err_z:{}".format(w_err_x, w_err_y, w_err_z)
# print "zeta: {}".format(zeta)
# print "deltat: {}".format(deltat)
# compute and remove the gyroscope baises
# print "w_b1: {}".format(w_b)
w_b[0] += w_err_x * deltat * zeta
w_b[1] += w_err_y * deltat * zeta
w_b[2] += w_err_z * deltat * zeta
# print "w_b2: {}".format(w_b)
gyro_datas[0] -= w_b[0]
gyro_datas[1] -= w_b[1]
gyro_datas[2] -= w_b[2]
###
# compute the quaternion rate measured by gyroscopes
SEqDot_omega_1 = -halfSEq_2 * gyro_datas[0] - halfSEq_3 * gyro_datas[1] - halfSEq_4 * gyro_datas[2]
SEqDot_omega_2 = halfSEq_1 * gyro_datas[0] + halfSEq_3 * gyro_datas[2] - halfSEq_4 * gyro_datas[1]
SEqDot_omega_3 = halfSEq_1 * gyro_datas[1] - halfSEq_2 * gyro_datas[2] + halfSEq_4 * gyro_datas[0]
SEqDot_omega_4 = halfSEq_1 * gyro_datas[2] + halfSEq_2 * gyro_datas[1] - halfSEq_3 * gyro_datas[0]
# compute then integrate the estimated quaternion rate
SEq[0] += (SEqDot_omega_1 - (beta * SEqHatDot_1)) * deltat
SEq[1] += (SEqDot_omega_2 - (beta * SEqHatDot_2)) * deltat
SEq[2] += (SEqDot_omega_3 - (beta * SEqHatDot_3)) * deltat
SEq[3] += (SEqDot_omega_4 - (beta * SEqHatDot_4)) * deltat
# Normalizamos los quaterniones
norm = np.linalg.norm(SEq)
SEq = map(lambda x: x/norm,SEq)
# compute flux in the earth frame
SEq_1SEq_2 = SEq[0] * SEq[1] # recompute axulirary variables
SEq_1SEq_3 = SEq[0] * SEq[2]
SEq_1SEq_4 = SEq[0] * SEq[3]
SEq_3SEq_4 = SEq[2] * SEq[3]
SEq_2SEq_3 = SEq[1] * SEq[2]
SEq_2SEq_4 = SEq[1] * SEq[3]
h_x = twom_x * (0.5 - SEq[2] * SEq[2] - SEq[3] * SEq[3]) + twom_y * (SEq_2SEq_3 - SEq_1SEq_4) + twom_z * (SEq_2SEq_4 + SEq_1SEq_3)
h_y = twom_x * (SEq_2SEq_3 + SEq_1SEq_4) + twom_y * (0.5 - SEq[1] * SEq[1] - SEq[3] * SEq[3]) + twom_z * (SEq_3SEq_4 - SEq_1SEq_2)
h_z = twom_x * (SEq_2SEq_4 - SEq_1SEq_3) + twom_y * (SEq_3SEq_4 + SEq_1SEq_2) + twom_z * (0.5 - SEq[1] * SEq[1] - SEq[2] * SEq[2])
# normalise the flux vector to have only components in the x and z
b_x = math.sqrt((h_x * h_x) + (h_y * h_y))
b_z = h_z
def Quat_to_Euler(quater):
euler = [0,0,0]
euler[0] = math.atan2(2*(quater[0]*quater[1] + quater[2]*quater[3]),quater[0]*quater[0] - quater[1]*quater[1] - quater[2]*quater[2] + quater[3]*quater[3])
euler[1] = math.asin(-2*((quater[0]*quater[2] - quater[1]*quater[3]))/(quater[0]*quater[0] + quater[1]*quater[1] + quater[2]*quater[2] + quater[3]*quater[3]))
euler[2] = math.atan2(2*(quater[1]*quater[2] + quater[0]*quater[3]),-quater[0]*quater[0] - quater[1]*quater[1] + quater[2]*quater[2] + quater[3]*quater[3])
euler = map(math.degrees,euler)
return euler
import smbus
import time
import numpy as np
import math
import socket
import fcntl
import struct
#Analizamos la red para encontrar el ip correcto
inter_faces = get_network_config2()
if inter_faces['eth0'] == None: #Le damos prioridad a la conexion ethernet
host = inter_faces['wlan0']
tarjeta = 'wlan0'
else:
host = inter_faces['eth0']
tarjeta = 'eth0'
print("Intentando establecer conexion en interfaz {} con la direccion ip {}".format(tarjeta, host))
#Establecemos la conexion
try:
port = 23322
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(1)
conn,addr = s.accept()
except:
s.close() #Si algo falla, cierra todo.
print("[-] ERROR = No se pudo establecer la conexion")
exit()
#Abrimos el puerto I2C
ahrs = smbus.SMBus(1)
#Definimos las direcciones de los sensores
gyro_addr = 0x6B
accel_addr = 0x19
magn_addr = 0x1E
#Variables globales
SEq = [0.0,0.0,0.0,1.0] #Quaterniones
b_x = 1 #Earth Flux
b_z = 0
w_b = [0,0,0] #Gyroscopic Bias Error
beta = math.sqrt(3.0/4.0)*math.radians(5) #gyro measurment error rad/s (5 deg/s)
zeta = math.sqrt(3.0/4.0)*math.radians(0.2) #gyro drift error rad/s/s (0.2 deg/s/s)
#Colocamos los valores de configuracion
accel_setup()
magn_setup()
gyro_setup()
#Leemos los datos de los sensores.
accel_data = accel_read()
magn_data = magn_read()
gyro_data = gyro_read()
#Variables de tiempo
time_new = 0
time_old = time.time()
#loop de control
while(1):
#sacamos medidas de sensores
accel_data = accel_read()
magn_data = magn_read()
gyro_data = gyro_read()
#medimos tiempo
time_new = time.time()
#corremos el filtro
madgwicks_filter(accel_data, magn_data, gyro_data, time_new - time_old)
#Actualizamos el tiempo
time_old = time_new
#Calculamos los Angulos de Euler
Angulos = Quat_to_Euler(SEq)
#Imprimimos
print("Pitch: {:+.2f}deg Roll: {:+.2f}deg Yaw: {:+.2f}deg Quaternion:({:+.3f}, {:+.3f}, {:+.3f}, {:+.3f})".format(Angulos[0],Angulos[1],Angulos[2], SEq[0], SEq[1], SEq[2], SEq[3] ))
mensaje = "{:+.2f},{:+.2f},{:+.2f}\n".format(Angulos[0],Angulos[1],Angulos[2])
try:
conn.sendall(mensaje) #Enviamos por TCP la informacion
except:
s.close() #Si algo falla, cierra todo.
print("[-] ERROR = No se pudo mandar el paquete")
exit()
time.sleep(0.01)
# print("Accel:({:+.3f},{:+.3f},{:+.3f}) Magn:({:+.3f},{:+.3f},{:+.3f}) Gyro:({:+.3f},{:+.3f},{:+.3f})".format(accel_data[0],accel_data[1],accel_data[2],magn_data[0],magn_data[1],magn_data[2],gyro_data[0],gyro_data[1],gyro_data[2]))
| gyro_setup | identifier_name |
a1.py | # -*- coding: utf-8 -*-
"""csc311_A1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1BmCgUTnUIAjM-NZ47tsFFKIXnQ9LkOHA
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import pickle
import sklearn.linear_model as lin
import sklearn.neighbors as ngh
# In the functions below,
# X = input data
# T = data labels
# w = weight vector for decision boundary
# b = bias term for decision boundary
# elevation and azimuth are angles describing the 3D viewing direction
import numpy.random as rnd
rnd.seed(3)
print('\n\nQuestion 1')
print('----------')
print('\nQuestion 1(a):')
B = np.random.rand(4,5)
print(B)
print('\nQuestion 1(b):')
y = np.random.rand(4,1)
print(y)
print('\nQuestion 1(c):')
C = B.reshape((2,10))
print(C)
print('\nQuestion 1(d):')
D = B - y
print(D)
print('\nQuestion 1(e):')
z = y.reshape(4)
print(z)
print('\nQuestion 1(f):')
B[:,3] = z
print(B)
print('\nQuestion 1(g):')
D[:,0] = B[:,2] + z
print(D)
print('\nQuestion 1(h):')
print(B[:3])
print('\nQuestion 1(i):')
print(B[:,[1,3]])
print('\nQuestion 1(j):')
print(np.log(B))
print('\nQuestion 1(k):')
print(np.sum(B))
print('\nQuestion 1(l):')
print(np.amax(B, axis=0))
print('\nQuestion 1(m):')
print(np.max(B.sum(axis=1)))
print('\nQuestion 1(n):')
print(np.matmul(B.transpose(), D))
print('\nQuestion 1(j):')
print(y.transpose()@D@D.transpose()@y)
print('\n\nQuestion 2')
print('----------')
# Q2(a)
def matrix_poly(A):
#helper
def mat_mul(X,Y):
# calculate X * Y
mat = np.zeros(X.shape)
elem_sum = 0
for i in range(X.shape[0]):
for j in range(Y.shape[1]):
for k in range(Y.shape[0]):
elem_sum += X[i,k] * Y[k,j]
mat[i,j] = elem_sum
elem_sum = 0
return mat
# find A*A
final = mat_mul(A,A)
# find A + A*A
for i in range(A.shape[0]):
for j in range(A.shape[1]):
final[i,j] += A[i,j]
# find A*(A + A*A)
final = mat_mul(A,final)
# find A + (A*(A + A*A))
for i in range(A.shape[0]):
for j in range(A.shape[1]):
final[i,j] += A[i,j]
return final
# Q2(b)
def timing(N):
A = np.random.rand(N,N)
loop_start = time.time()
B1 = matrix_poly(A)
loop_end = time.time()
np_start = time.time()
B2 = A + (A@(A+(A@A)))
np_end = time.time()
print("Magnitude of B1-B2: " + str(np.linalg.norm(B1-B2, 2)))
print("Execution time for naive iterative method with N = " + str(N) + " is " + str(loop_end - loop_start))
print("Execution time for vectorized method with N = " + str(N) + " is " + str(np_end - np_start))
# test = np.arange(9).reshape(3,3)
# print(matrix_poly(test))
# print(test + (test@(test + (test @ test))))
print("\nQuestion 2(c):")
print("N = 100:")
timing(100)
print("N = 300:")
timing(300)
print("N = 1000:")
timing(1000)
# Q3(a)
def least_squares(x,t):
X = np.ones((x.shape[0], 2))
X[:,1] = x
w = np.linalg.inv(X.transpose()@X) @ X.transpose() @ t
return w
# print(least_squares(dataTrain[0],dataTrain[1]))
# Q3(b)
def plot_data(x,t):
b, a = least_squares(x,t)
min_x, max_x = np.min(x), np.max(x)
pt1 = [min_x, max_x]
pt2 = [a*min_x+b, a*max_x+b]
plt.scatter(x,t)
plt.plot(pt1,pt2,color="r")
plt.title("Question 3(b): the fitted line")
plt.show()
return a,b
# plot_data(dataTrain[0],dataTrain[1])
# Q3(c)
def error(a,b,X,T):
est_mat = a*X+b
mse = np.mean(np.square(T-est_mat))
return mse
# a,b = least_squares(dataTrain[0],dataTrain[1])
# error(a,b,dataTrain[0],dataTrain[1])
print('\n\nQuestion 3')
print('----------')
# Q3(d)
# Read the training and test data from the file dataA1Q3.pickle
with open('dataA1Q3.pickle','rb') as f:
dataTrain, dataTest = pickle.load(f)
# Call plot_data to fit a line to the training data
train_a,train_b = plot_data(dataTrain[0],dataTrain[1])
print("\nQuestion 3(d):")
# Print the values of a and b for the fitted line
print("a: "+str(train_a))
print("b: "+str(train_b))
# Compute and print the training error
print("Mean Square Error of training data: " + str(error(train_a,train_b,dataTrain[0],dataTrain[1])))
# Compute and print the test error
print("Mean Square Error of test data: " + str(error(train_a, train_b, dataTest[0],dataTest[1])))
def boundary_mesh(X,w,w0):
# decision boundary
|
def plot_data(X,T,elevation=30,azimuth=30):
colors = np.array(['r','b']) # red for class 0 , blue for class 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = np.array(['r','b']) # red for class 0 , blue for class 1
X = X.T
ax.scatter(X[0],X[1],X[2],color=colors[T],s=1)
ax.view_init(elevation,azimuth)
plt.draw()
return ax,fig
def plot_db(X,T,w,w0,elevation=30,azimuth=30):
xx,yy,zz, = boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,elevation,azimuth)
ax.plot_surface(xx,yy,zz,alpha=0.5,color='green')
return ax,fig
def plot_db3(X,T,w,w0):
_,fig1 = plot_db(X,T,w,w0,30,0)
_,fig2 = plot_db(X,T,w,w0,30,45)
_,fig3 = plot_db(X,T,w,w0,30,175)
return fig1,fig2,fig3
def movie_data(X,T):
ax,fig = plot_data(X,T,30,-20)
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
def movie_db(X,T,w,w0):
xx,yy,zz,= boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,30,-20)
ax.plot_surface(xx,yy,zz,alpha=0.3,color='green')
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
with open("dataA1Q4v2.pickle","rb") as f:
Xtrain,Ttrain,Xtest,Ttest = pickle.load(f)
clf = lin.LogisticRegression()
clf.fit(Xtrain, Ttrain)
w = clf.coef_[0]
bias = clf.intercept_[0]
print("\nQuestion 4")
print("----------")
print('\nQuestion 4(a):')
print("Weight: " + str(w))
print("Bias: " + str(bias))
print('\nQuestion 4(b):')
accuracy1 = clf.score(Xtest,Ttest)
comparison = np.equal(clf.predict(Xtest), Ttest)
accuracy2 = np.count_nonzero(comparison == True) / Ttest.shape[0]
print("accuracy1: " + str(accuracy1))
print("accuracy2: " + str(accuracy2))
print("accuracy1 - accuracy2: " + str(accuracy1 - accuracy2))
# Q4(c).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,5)
fig.suptitle("Question 4(c): Training data and decision boundary")
# Q4(d).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,20)
fig.suptitle("Question 4(d): Training data and decision boundary")
# plot_data(Xtrain, Ttrain,30,10)
print('\n\nQuestion 6')
print('----------')
# Q5 (a)-(k)
def gd_logreg(lrate):
# Q5(a). initialize weight
np.random.seed(3)
# Q5(b).
w0 = np.random.randn(Xtrain.shape[1]+1)/1000
w1 = w0.copy()
# add x0=1 to Xtrain and Ttrain
unbiased_train = np.ones((Xtrain.shape[0],Xtrain.shape[1]+1))
unbiased_train[:,1:] = Xtrain
unbiased_test = np.ones((Xtest.shape[0],Xtest.shape[1]+1))
unbiased_test[:,1:] = Xtest
# Q5(c). all helper functions below are needed
def sigma(z):
return 1/(1+np.exp(-z))
def z(x,w):
return x@w
def h(x,w):
return sigma(z(x,w))
def gd(x,t,w):
# gradient of L_ce = [X^T(y-t)]
return 1/(Ttrain.shape[0]) * x.transpose()@(h(x,w)-t)
def E(x,t,w):
# logistic-cross-entropy
return (t@np.logaddexp(0,-z(x,w))+(1-t)@np.logaddexp(0,z(x,w)))/t.shape[0]
train_CE = []
test_CE = []
train_acc = []
test_acc = []
E0 = E(unbiased_train,Ttrain,w0)
E1 = 1
# Q5(d).
while abs(E0-E1) >= np.float64(10**-10):
# for i in range(200):
E0 = E1
w0 = w1.copy()
weight_update = gd(unbiased_train,Ttrain,w1)
w1 -= lrate * weight_update
train_est_mat = np.where(z(unbiased_train,w1)>=0,1,0)
test_est_mat = np.where(z(unbiased_test,w1)>=0,1,0)
train_compare = np.equal(train_est_mat,Ttrain)
train_acc.append(np.count_nonzero(train_compare==True)/Ttrain.shape[0])
test_compare = np.equal(test_est_mat,Ttest)
test_acc.append(np.count_nonzero(test_compare==True)/Ttest.shape[0])
E1 = E(unbiased_train,Ttrain,w1)
train_CE.append(E1)
test_CE.append(E(unbiased_test,Ttest,w1))
# Q5(e).
print("Q4 outputs:")
print("Weight: " + str(w))
print("Bias: " + str(bias))
print("Q5 outputs:")
print("Bias: "+str(w1[0]))
print("final weight vector = "+str(w1[1:]))
print("learning rate: " + str(lrate))
# Q5(f).
plt.plot(train_CE)
plt.plot(test_CE,color="r")
plt.suptitle("Question 5: Training and test loss v.s. iterations")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(g)
plt.semilogx(train_CE)
plt.semilogx(test_CE,color="r")
plt.suptitle("Question 5: Training and test loss v.s. iterations (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(h)
plt.semilogx(train_acc)
plt.semilogx(test_acc,color="r")
plt.suptitle("Question 5: Training and test accuracy v.s. iterations (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Accuracy")
plt.show()
# Q5(i).
plt.plot(train_CE[-100:])
plt.suptitle("Question 5: last 100 training cross entropies")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(j).
plt.semilogx(test_CE[50:],color="r")
plt.suptitle("Question 5: test loss from iteration 50 on (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(k).
ax,fig = plot_db(unbiased_train,Ttrain,w1[1:],w1[0],30,5)
fig.suptitle("Question 5: Training data and decision boundary")
return w1
# print("lrate = 10")
# print(gd_logreg(10))
# print("lrate = 3")
# print(gd_logreg(3))
print("\nQuestion 5(e):")
print(gd_logreg(1))
# print("lrate = 0.3")
# print(gd_logreg(0.3))
# print("lrate = 0.1")
# print(gd_logreg(0.1))
with open('mnistTVT.pickle','rb') as f:
Xtrain,Ttrain,Xval,Tval,Xtest,Ttest = pickle.load(f)
# Q6(a).
def reduce_train(Xtrain,Ttrain):
reduced_Ttrain_index = np.where((Ttrain == 5) | (Ttrain == 6), True, False)
full_reduced_Xtrain = Xtrain[reduced_Ttrain_index]
full_reduced_Ttrain = Ttrain[reduced_Ttrain_index]
return full_reduced_Xtrain, full_reduced_Ttrain
# Q6(b).
def plot_first_16():
full_reduced_Xtrain, full_reduced_Ttrain = reduce_train(Xtrain,Ttrain)
for i in range(16):
plt.subplot(4,4,i+1)
plt.axis(False)
plt.imshow(full_reduced_Xtrain[i].reshape((28,28)),cmap="Greys",interpolation="nearest")
plt.suptitle("Question 6(b): 16 MNIST training images.")
plt.plot()
plot_first_16()
def train_with(target1,target2,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest):
# Note: the reason why I'm including the data-reduction and ploting part in
# here is because if I modify "reduce_train" function from pervious, and call
# it in this function, the one(occasional several) of the return numpy arrays
# will become a tuple, and will even fail to be converted to a numpy array
# using np.array(). I do believe it is a problem caused by the machine, and
# I'm unable to solve it within the time this assignment is due.
# reducing training data
reduced_Ttrain_index = np.where((Ttrain == target1) | (Ttrain == target2), True, False)
full_reduced_Xtrain = Xtrain[reduced_Ttrain_index]
full_reduced_Ttrain = Ttrain[reduced_Ttrain_index]
small_reduced_Xtrain = full_reduced_Xtrain[:2000]
small_reduced_Ttrain = full_reduced_Ttrain[:2000]
# reducing validation data
reduced_Tval_index = np.where((Tval == target1) | (Tval == target2), True, False)
reduced_Xval = Xval[reduced_Tval_index]
reduced_Tval = Tval[reduced_Tval_index]
# reducing testing data
reduced_Ttest_index = np.where((Ttest == target1) | (Ttest == target2), True, False)
reduced_Xtest = Xtest[reduced_Ttest_index]
reduced_Ttest = Ttest[reduced_Ttest_index]
# print("Done reducing data!")
# fit each k into model
val_acc = []
train_acc = []
best_val_acc, best_k = -1, None
# Q6(c). step i: loop through odd k [1,19] to find best k
for k in range(1,20,2):
knn = ngh.KNeighborsClassifier(k)
knn.fit(full_reduced_Xtrain,full_reduced_Ttrain)
val_acc.append(knn.score(reduced_Xval, reduced_Tval))
train_acc.append(knn.score(small_reduced_Xtrain,small_reduced_Ttrain))
# Q6(c). step iii
if best_val_acc < val_acc[-1]:
best_val_acc = val_acc[-1]
best_k = k
# print("k = " + str(k) + " Done!")
# Q6(c). step ii: plot all k
plt.plot(train_acc)
plt.plot(val_acc,color="r")
plt.xticks([x for x in range(10)],labels=[i for i in range(1,20,2)])
plt.suptitle("Question 6(c): Training and Validation Accuracy for KNN, digits "+str(target1)+" and "+str(target2))
plt.xlabel("Number of Neighbours, K")
plt.ylabel("Accuracy")
# Q6(c). step iv: print out best k output
knn_best = ngh.KNeighborsClassifier(best_k)
knn_best.fit(full_reduced_Xtrain,full_reduced_Ttrain)
knn_best_acc = knn_best.score(reduced_Xtest, reduced_Ttest)
# Q6(c). step v,vi:
print("best k value: " + str(best_k))
print("best k validation accuracy: " + str(val_acc[best_k//2]))
print("best k test accuracy" + str(knn_best_acc))
# train models with 5,6 as target
print("Question 6")
print("----------")
print("\nQuestion 6(c):")
train_with(5,6,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest)
# Q6(d). train models with 4,7 as target
print("\nQuestion 6(d):")
train_with(4,7,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest)
| X = X.T
xmin = np.min(X[0])
xmax = np.max(X[0])
zmin = np.min(X[2])
zmax = np.max(X[2])
x = np.linspace(xmin,xmax,2)
z = np.linspace(zmin,zmax,2)
xx,zz = np.meshgrid(x,z)
yy = -(xx*w[0] + zz*w[2] + w0)/w[1]
return xx,yy,zz | identifier_body |
a1.py | # -*- coding: utf-8 -*-
"""csc311_A1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1BmCgUTnUIAjM-NZ47tsFFKIXnQ9LkOHA
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import pickle
import sklearn.linear_model as lin
import sklearn.neighbors as ngh
# In the functions below,
# X = input data
# T = data labels
# w = weight vector for decision boundary
# b = bias term for decision boundary
# elevation and azimuth are angles describing the 3D viewing direction
import numpy.random as rnd
rnd.seed(3)
print('\n\nQuestion 1')
print('----------')
print('\nQuestion 1(a):')
B = np.random.rand(4,5)
print(B)
print('\nQuestion 1(b):')
y = np.random.rand(4,1)
print(y)
print('\nQuestion 1(c):')
C = B.reshape((2,10))
print(C)
print('\nQuestion 1(d):')
D = B - y
print(D)
print('\nQuestion 1(e):')
z = y.reshape(4)
print(z)
print('\nQuestion 1(f):')
B[:,3] = z
print(B)
print('\nQuestion 1(g):')
D[:,0] = B[:,2] + z
print(D)
print('\nQuestion 1(h):')
print(B[:3])
print('\nQuestion 1(i):')
print(B[:,[1,3]])
print('\nQuestion 1(j):')
print(np.log(B))
print('\nQuestion 1(k):')
print(np.sum(B))
print('\nQuestion 1(l):')
print(np.amax(B, axis=0))
print('\nQuestion 1(m):')
print(np.max(B.sum(axis=1)))
print('\nQuestion 1(n):')
print(np.matmul(B.transpose(), D))
print('\nQuestion 1(j):')
print(y.transpose()@D@D.transpose()@y)
print('\n\nQuestion 2')
print('----------')
# Q2(a)
def matrix_poly(A):
#helper
def mat_mul(X,Y):
# calculate X * Y
mat = np.zeros(X.shape)
elem_sum = 0
for i in range(X.shape[0]):
for j in range(Y.shape[1]):
for k in range(Y.shape[0]):
elem_sum += X[i,k] * Y[k,j]
mat[i,j] = elem_sum
elem_sum = 0
return mat
# find A*A
final = mat_mul(A,A)
# find A + A*A
for i in range(A.shape[0]):
for j in range(A.shape[1]):
final[i,j] += A[i,j]
# find A*(A + A*A)
final = mat_mul(A,final)
# find A + (A*(A + A*A))
for i in range(A.shape[0]):
for j in range(A.shape[1]):
final[i,j] += A[i,j]
return final
# Q2(b)
def timing(N):
A = np.random.rand(N,N)
loop_start = time.time()
B1 = matrix_poly(A)
loop_end = time.time()
np_start = time.time()
B2 = A + (A@(A+(A@A)))
np_end = time.time()
print("Magnitude of B1-B2: " + str(np.linalg.norm(B1-B2, 2)))
print("Execution time for naive iterative method with N = " + str(N) + " is " + str(loop_end - loop_start))
print("Execution time for vectorized method with N = " + str(N) + " is " + str(np_end - np_start))
# test = np.arange(9).reshape(3,3)
# print(matrix_poly(test))
# print(test + (test@(test + (test @ test))))
print("\nQuestion 2(c):")
print("N = 100:")
timing(100)
print("N = 300:")
timing(300)
print("N = 1000:")
timing(1000)
# Q3(a)
def least_squares(x,t):
X = np.ones((x.shape[0], 2))
X[:,1] = x
w = np.linalg.inv(X.transpose()@X) @ X.transpose() @ t
return w
# print(least_squares(dataTrain[0],dataTrain[1]))
# Q3(b)
def plot_data(x,t):
b, a = least_squares(x,t)
min_x, max_x = np.min(x), np.max(x)
pt1 = [min_x, max_x]
pt2 = [a*min_x+b, a*max_x+b]
plt.scatter(x,t)
plt.plot(pt1,pt2,color="r")
plt.title("Question 3(b): the fitted line")
plt.show()
return a,b
# plot_data(dataTrain[0],dataTrain[1])
# Q3(c)
def error(a,b,X,T):
est_mat = a*X+b
mse = np.mean(np.square(T-est_mat))
return mse
# a,b = least_squares(dataTrain[0],dataTrain[1])
# error(a,b,dataTrain[0],dataTrain[1])
print('\n\nQuestion 3')
print('----------')
# Q3(d)
# Read the training and test data from the file dataA1Q3.pickle
with open('dataA1Q3.pickle','rb') as f:
dataTrain, dataTest = pickle.load(f)
# Call plot_data to fit a line to the training data
train_a,train_b = plot_data(dataTrain[0],dataTrain[1])
print("\nQuestion 3(d):")
# Print the values of a and b for the fitted line
print("a: "+str(train_a))
print("b: "+str(train_b))
# Compute and print the training error
print("Mean Square Error of training data: " + str(error(train_a,train_b,dataTrain[0],dataTrain[1])))
# Compute and print the test error
print("Mean Square Error of test data: " + str(error(train_a, train_b, dataTest[0],dataTest[1])))
def boundary_mesh(X,w,w0):
# decision boundary
X = X.T
xmin = np.min(X[0])
xmax = np.max(X[0])
zmin = np.min(X[2])
zmax = np.max(X[2])
x = np.linspace(xmin,xmax,2)
z = np.linspace(zmin,zmax,2)
xx,zz = np.meshgrid(x,z)
yy = -(xx*w[0] + zz*w[2] + w0)/w[1]
return xx,yy,zz |
def plot_data(X,T,elevation=30,azimuth=30):
colors = np.array(['r','b']) # red for class 0 , blue for class 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = np.array(['r','b']) # red for class 0 , blue for class 1
X = X.T
ax.scatter(X[0],X[1],X[2],color=colors[T],s=1)
ax.view_init(elevation,azimuth)
plt.draw()
return ax,fig
def plot_db(X,T,w,w0,elevation=30,azimuth=30):
xx,yy,zz, = boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,elevation,azimuth)
ax.plot_surface(xx,yy,zz,alpha=0.5,color='green')
return ax,fig
def plot_db3(X,T,w,w0):
_,fig1 = plot_db(X,T,w,w0,30,0)
_,fig2 = plot_db(X,T,w,w0,30,45)
_,fig3 = plot_db(X,T,w,w0,30,175)
return fig1,fig2,fig3
def movie_data(X,T):
ax,fig = plot_data(X,T,30,-20)
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
def movie_db(X,T,w,w0):
xx,yy,zz,= boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,30,-20)
ax.plot_surface(xx,yy,zz,alpha=0.3,color='green')
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
with open("dataA1Q4v2.pickle","rb") as f:
Xtrain,Ttrain,Xtest,Ttest = pickle.load(f)
clf = lin.LogisticRegression()
clf.fit(Xtrain, Ttrain)
w = clf.coef_[0]
bias = clf.intercept_[0]
print("\nQuestion 4")
print("----------")
print('\nQuestion 4(a):')
print("Weight: " + str(w))
print("Bias: " + str(bias))
print('\nQuestion 4(b):')
accuracy1 = clf.score(Xtest,Ttest)
comparison = np.equal(clf.predict(Xtest), Ttest)
accuracy2 = np.count_nonzero(comparison == True) / Ttest.shape[0]
print("accuracy1: " + str(accuracy1))
print("accuracy2: " + str(accuracy2))
print("accuracy1 - accuracy2: " + str(accuracy1 - accuracy2))
# Q4(c).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,5)
fig.suptitle("Question 4(c): Training data and decision boundary")
# Q4(d).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,20)
fig.suptitle("Question 4(d): Training data and decision boundary")
# plot_data(Xtrain, Ttrain,30,10)
print('\n\nQuestion 6')
print('----------')
# Q5 (a)-(k)
def gd_logreg(lrate):
# Q5(a). initialize weight
np.random.seed(3)
# Q5(b).
w0 = np.random.randn(Xtrain.shape[1]+1)/1000
w1 = w0.copy()
# add x0=1 to Xtrain and Ttrain
unbiased_train = np.ones((Xtrain.shape[0],Xtrain.shape[1]+1))
unbiased_train[:,1:] = Xtrain
unbiased_test = np.ones((Xtest.shape[0],Xtest.shape[1]+1))
unbiased_test[:,1:] = Xtest
# Q5(c). all helper functions below are needed
def sigma(z):
return 1/(1+np.exp(-z))
def z(x,w):
return x@w
def h(x,w):
return sigma(z(x,w))
def gd(x,t,w):
# gradient of L_ce = [X^T(y-t)]
return 1/(Ttrain.shape[0]) * x.transpose()@(h(x,w)-t)
def E(x,t,w):
# logistic-cross-entropy
return (t@np.logaddexp(0,-z(x,w))+(1-t)@np.logaddexp(0,z(x,w)))/t.shape[0]
train_CE = []
test_CE = []
train_acc = []
test_acc = []
E0 = E(unbiased_train,Ttrain,w0)
E1 = 1
# Q5(d).
while abs(E0-E1) >= np.float64(10**-10):
# for i in range(200):
E0 = E1
w0 = w1.copy()
weight_update = gd(unbiased_train,Ttrain,w1)
w1 -= lrate * weight_update
train_est_mat = np.where(z(unbiased_train,w1)>=0,1,0)
test_est_mat = np.where(z(unbiased_test,w1)>=0,1,0)
train_compare = np.equal(train_est_mat,Ttrain)
train_acc.append(np.count_nonzero(train_compare==True)/Ttrain.shape[0])
test_compare = np.equal(test_est_mat,Ttest)
test_acc.append(np.count_nonzero(test_compare==True)/Ttest.shape[0])
E1 = E(unbiased_train,Ttrain,w1)
train_CE.append(E1)
test_CE.append(E(unbiased_test,Ttest,w1))
# Q5(e).
print("Q4 outputs:")
print("Weight: " + str(w))
print("Bias: " + str(bias))
print("Q5 outputs:")
print("Bias: "+str(w1[0]))
print("final weight vector = "+str(w1[1:]))
print("learning rate: " + str(lrate))
# Q5(f).
plt.plot(train_CE)
plt.plot(test_CE,color="r")
plt.suptitle("Question 5: Training and test loss v.s. iterations")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(g)
plt.semilogx(train_CE)
plt.semilogx(test_CE,color="r")
plt.suptitle("Question 5: Training and test loss v.s. iterations (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(h)
plt.semilogx(train_acc)
plt.semilogx(test_acc,color="r")
plt.suptitle("Question 5: Training and test accuracy v.s. iterations (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Accuracy")
plt.show()
# Q5(i).
plt.plot(train_CE[-100:])
plt.suptitle("Question 5: last 100 training cross entropies")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(j).
plt.semilogx(test_CE[50:],color="r")
plt.suptitle("Question 5: test loss from iteration 50 on (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(k).
ax,fig = plot_db(unbiased_train,Ttrain,w1[1:],w1[0],30,5)
fig.suptitle("Question 5: Training data and decision boundary")
return w1
# print("lrate = 10")
# print(gd_logreg(10))
# print("lrate = 3")
# print(gd_logreg(3))
print("\nQuestion 5(e):")
print(gd_logreg(1))
# print("lrate = 0.3")
# print(gd_logreg(0.3))
# print("lrate = 0.1")
# print(gd_logreg(0.1))
with open('mnistTVT.pickle','rb') as f:
Xtrain,Ttrain,Xval,Tval,Xtest,Ttest = pickle.load(f)
# Q6(a).
def reduce_train(Xtrain,Ttrain):
reduced_Ttrain_index = np.where((Ttrain == 5) | (Ttrain == 6), True, False)
full_reduced_Xtrain = Xtrain[reduced_Ttrain_index]
full_reduced_Ttrain = Ttrain[reduced_Ttrain_index]
return full_reduced_Xtrain, full_reduced_Ttrain
# Q6(b).
def plot_first_16():
full_reduced_Xtrain, full_reduced_Ttrain = reduce_train(Xtrain,Ttrain)
for i in range(16):
plt.subplot(4,4,i+1)
plt.axis(False)
plt.imshow(full_reduced_Xtrain[i].reshape((28,28)),cmap="Greys",interpolation="nearest")
plt.suptitle("Question 6(b): 16 MNIST training images.")
plt.plot()
plot_first_16()
def train_with(target1,target2,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest):
# Note: the reason why I'm including the data-reduction and ploting part in
# here is because if I modify "reduce_train" function from pervious, and call
# it in this function, the one(occasional several) of the return numpy arrays
# will become a tuple, and will even fail to be converted to a numpy array
# using np.array(). I do believe it is a problem caused by the machine, and
# I'm unable to solve it within the time this assignment is due.
# reducing training data
reduced_Ttrain_index = np.where((Ttrain == target1) | (Ttrain == target2), True, False)
full_reduced_Xtrain = Xtrain[reduced_Ttrain_index]
full_reduced_Ttrain = Ttrain[reduced_Ttrain_index]
small_reduced_Xtrain = full_reduced_Xtrain[:2000]
small_reduced_Ttrain = full_reduced_Ttrain[:2000]
# reducing validation data
reduced_Tval_index = np.where((Tval == target1) | (Tval == target2), True, False)
reduced_Xval = Xval[reduced_Tval_index]
reduced_Tval = Tval[reduced_Tval_index]
# reducing testing data
reduced_Ttest_index = np.where((Ttest == target1) | (Ttest == target2), True, False)
reduced_Xtest = Xtest[reduced_Ttest_index]
reduced_Ttest = Ttest[reduced_Ttest_index]
# print("Done reducing data!")
# fit each k into model
val_acc = []
train_acc = []
best_val_acc, best_k = -1, None
# Q6(c). step i: loop through odd k [1,19] to find best k
for k in range(1,20,2):
knn = ngh.KNeighborsClassifier(k)
knn.fit(full_reduced_Xtrain,full_reduced_Ttrain)
val_acc.append(knn.score(reduced_Xval, reduced_Tval))
train_acc.append(knn.score(small_reduced_Xtrain,small_reduced_Ttrain))
# Q6(c). step iii
if best_val_acc < val_acc[-1]:
best_val_acc = val_acc[-1]
best_k = k
# print("k = " + str(k) + " Done!")
# Q6(c). step ii: plot all k
plt.plot(train_acc)
plt.plot(val_acc,color="r")
plt.xticks([x for x in range(10)],labels=[i for i in range(1,20,2)])
plt.suptitle("Question 6(c): Training and Validation Accuracy for KNN, digits "+str(target1)+" and "+str(target2))
plt.xlabel("Number of Neighbours, K")
plt.ylabel("Accuracy")
# Q6(c). step iv: print out best k output
knn_best = ngh.KNeighborsClassifier(best_k)
knn_best.fit(full_reduced_Xtrain,full_reduced_Ttrain)
knn_best_acc = knn_best.score(reduced_Xtest, reduced_Ttest)
# Q6(c). step v,vi:
print("best k value: " + str(best_k))
print("best k validation accuracy: " + str(val_acc[best_k//2]))
print("best k test accuracy" + str(knn_best_acc))
# train models with 5,6 as target
print("Question 6")
print("----------")
print("\nQuestion 6(c):")
train_with(5,6,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest)
# Q6(d). train models with 4,7 as target
print("\nQuestion 6(d):")
train_with(4,7,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest) | random_line_split | |
a1.py | # -*- coding: utf-8 -*-
"""csc311_A1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1BmCgUTnUIAjM-NZ47tsFFKIXnQ9LkOHA
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import pickle
import sklearn.linear_model as lin
import sklearn.neighbors as ngh
# In the functions below,
# X = input data
# T = data labels
# w = weight vector for decision boundary
# b = bias term for decision boundary
# elevation and azimuth are angles describing the 3D viewing direction
import numpy.random as rnd
rnd.seed(3)
print('\n\nQuestion 1')
print('----------')
print('\nQuestion 1(a):')
B = np.random.rand(4,5)
print(B)
print('\nQuestion 1(b):')
y = np.random.rand(4,1)
print(y)
print('\nQuestion 1(c):')
C = B.reshape((2,10))
print(C)
print('\nQuestion 1(d):')
D = B - y
print(D)
print('\nQuestion 1(e):')
z = y.reshape(4)
print(z)
print('\nQuestion 1(f):')
B[:,3] = z
print(B)
print('\nQuestion 1(g):')
D[:,0] = B[:,2] + z
print(D)
print('\nQuestion 1(h):')
print(B[:3])
print('\nQuestion 1(i):')
print(B[:,[1,3]])
print('\nQuestion 1(j):')
print(np.log(B))
print('\nQuestion 1(k):')
print(np.sum(B))
print('\nQuestion 1(l):')
print(np.amax(B, axis=0))
print('\nQuestion 1(m):')
print(np.max(B.sum(axis=1)))
print('\nQuestion 1(n):')
print(np.matmul(B.transpose(), D))
print('\nQuestion 1(j):')
print(y.transpose()@D@D.transpose()@y)
print('\n\nQuestion 2')
print('----------')
# Q2(a)
def matrix_poly(A):
#helper
def mat_mul(X,Y):
# calculate X * Y
mat = np.zeros(X.shape)
elem_sum = 0
for i in range(X.shape[0]):
for j in range(Y.shape[1]):
for k in range(Y.shape[0]):
elem_sum += X[i,k] * Y[k,j]
mat[i,j] = elem_sum
elem_sum = 0
return mat
# find A*A
final = mat_mul(A,A)
# find A + A*A
for i in range(A.shape[0]):
for j in range(A.shape[1]):
|
# find A*(A + A*A)
final = mat_mul(A,final)
# find A + (A*(A + A*A))
for i in range(A.shape[0]):
for j in range(A.shape[1]):
final[i,j] += A[i,j]
return final
# Q2(b)
def timing(N):
A = np.random.rand(N,N)
loop_start = time.time()
B1 = matrix_poly(A)
loop_end = time.time()
np_start = time.time()
B2 = A + (A@(A+(A@A)))
np_end = time.time()
print("Magnitude of B1-B2: " + str(np.linalg.norm(B1-B2, 2)))
print("Execution time for naive iterative method with N = " + str(N) + " is " + str(loop_end - loop_start))
print("Execution time for vectorized method with N = " + str(N) + " is " + str(np_end - np_start))
# test = np.arange(9).reshape(3,3)
# print(matrix_poly(test))
# print(test + (test@(test + (test @ test))))
print("\nQuestion 2(c):")
print("N = 100:")
timing(100)
print("N = 300:")
timing(300)
print("N = 1000:")
timing(1000)
# Q3(a)
def least_squares(x,t):
X = np.ones((x.shape[0], 2))
X[:,1] = x
w = np.linalg.inv(X.transpose()@X) @ X.transpose() @ t
return w
# print(least_squares(dataTrain[0],dataTrain[1]))
# Q3(b)
def plot_data(x,t):
b, a = least_squares(x,t)
min_x, max_x = np.min(x), np.max(x)
pt1 = [min_x, max_x]
pt2 = [a*min_x+b, a*max_x+b]
plt.scatter(x,t)
plt.plot(pt1,pt2,color="r")
plt.title("Question 3(b): the fitted line")
plt.show()
return a,b
# plot_data(dataTrain[0],dataTrain[1])
# Q3(c)
def error(a,b,X,T):
est_mat = a*X+b
mse = np.mean(np.square(T-est_mat))
return mse
# a,b = least_squares(dataTrain[0],dataTrain[1])
# error(a,b,dataTrain[0],dataTrain[1])
print('\n\nQuestion 3')
print('----------')
# Q3(d)
# Read the training and test data from the file dataA1Q3.pickle
with open('dataA1Q3.pickle','rb') as f:
dataTrain, dataTest = pickle.load(f)
# Call plot_data to fit a line to the training data
train_a,train_b = plot_data(dataTrain[0],dataTrain[1])
print("\nQuestion 3(d):")
# Print the values of a and b for the fitted line
print("a: "+str(train_a))
print("b: "+str(train_b))
# Compute and print the training error
print("Mean Square Error of training data: " + str(error(train_a,train_b,dataTrain[0],dataTrain[1])))
# Compute and print the test error
print("Mean Square Error of test data: " + str(error(train_a, train_b, dataTest[0],dataTest[1])))
def boundary_mesh(X,w,w0):
# decision boundary
X = X.T
xmin = np.min(X[0])
xmax = np.max(X[0])
zmin = np.min(X[2])
zmax = np.max(X[2])
x = np.linspace(xmin,xmax,2)
z = np.linspace(zmin,zmax,2)
xx,zz = np.meshgrid(x,z)
yy = -(xx*w[0] + zz*w[2] + w0)/w[1]
return xx,yy,zz
def plot_data(X,T,elevation=30,azimuth=30):
colors = np.array(['r','b']) # red for class 0 , blue for class 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = np.array(['r','b']) # red for class 0 , blue for class 1
X = X.T
ax.scatter(X[0],X[1],X[2],color=colors[T],s=1)
ax.view_init(elevation,azimuth)
plt.draw()
return ax,fig
def plot_db(X,T,w,w0,elevation=30,azimuth=30):
xx,yy,zz, = boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,elevation,azimuth)
ax.plot_surface(xx,yy,zz,alpha=0.5,color='green')
return ax,fig
def plot_db3(X,T,w,w0):
_,fig1 = plot_db(X,T,w,w0,30,0)
_,fig2 = plot_db(X,T,w,w0,30,45)
_,fig3 = plot_db(X,T,w,w0,30,175)
return fig1,fig2,fig3
def movie_data(X,T):
ax,fig = plot_data(X,T,30,-20)
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
def movie_db(X,T,w,w0):
xx,yy,zz,= boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,30,-20)
ax.plot_surface(xx,yy,zz,alpha=0.3,color='green')
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
with open("dataA1Q4v2.pickle","rb") as f:
Xtrain,Ttrain,Xtest,Ttest = pickle.load(f)
clf = lin.LogisticRegression()
clf.fit(Xtrain, Ttrain)
w = clf.coef_[0]
bias = clf.intercept_[0]
print("\nQuestion 4")
print("----------")
print('\nQuestion 4(a):')
print("Weight: " + str(w))
print("Bias: " + str(bias))
print('\nQuestion 4(b):')
accuracy1 = clf.score(Xtest,Ttest)
comparison = np.equal(clf.predict(Xtest), Ttest)
accuracy2 = np.count_nonzero(comparison == True) / Ttest.shape[0]
print("accuracy1: " + str(accuracy1))
print("accuracy2: " + str(accuracy2))
print("accuracy1 - accuracy2: " + str(accuracy1 - accuracy2))
# Q4(c).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,5)
fig.suptitle("Question 4(c): Training data and decision boundary")
# Q4(d).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,20)
fig.suptitle("Question 4(d): Training data and decision boundary")
# plot_data(Xtrain, Ttrain,30,10)
print('\n\nQuestion 6')
print('----------')
# Q5 (a)-(k)
def gd_logreg(lrate):
# Q5(a). initialize weight
np.random.seed(3)
# Q5(b).
w0 = np.random.randn(Xtrain.shape[1]+1)/1000
w1 = w0.copy()
# add x0=1 to Xtrain and Ttrain
unbiased_train = np.ones((Xtrain.shape[0],Xtrain.shape[1]+1))
unbiased_train[:,1:] = Xtrain
unbiased_test = np.ones((Xtest.shape[0],Xtest.shape[1]+1))
unbiased_test[:,1:] = Xtest
# Q5(c). all helper functions below are needed
def sigma(z):
return 1/(1+np.exp(-z))
def z(x,w):
return x@w
def h(x,w):
return sigma(z(x,w))
def gd(x,t,w):
# gradient of L_ce = [X^T(y-t)]
return 1/(Ttrain.shape[0]) * x.transpose()@(h(x,w)-t)
def E(x,t,w):
# logistic-cross-entropy
return (t@np.logaddexp(0,-z(x,w))+(1-t)@np.logaddexp(0,z(x,w)))/t.shape[0]
train_CE = []
test_CE = []
train_acc = []
test_acc = []
E0 = E(unbiased_train,Ttrain,w0)
E1 = 1
# Q5(d).
while abs(E0-E1) >= np.float64(10**-10):
# for i in range(200):
E0 = E1
w0 = w1.copy()
weight_update = gd(unbiased_train,Ttrain,w1)
w1 -= lrate * weight_update
train_est_mat = np.where(z(unbiased_train,w1)>=0,1,0)
test_est_mat = np.where(z(unbiased_test,w1)>=0,1,0)
train_compare = np.equal(train_est_mat,Ttrain)
train_acc.append(np.count_nonzero(train_compare==True)/Ttrain.shape[0])
test_compare = np.equal(test_est_mat,Ttest)
test_acc.append(np.count_nonzero(test_compare==True)/Ttest.shape[0])
E1 = E(unbiased_train,Ttrain,w1)
train_CE.append(E1)
test_CE.append(E(unbiased_test,Ttest,w1))
# Q5(e).
print("Q4 outputs:")
print("Weight: " + str(w))
print("Bias: " + str(bias))
print("Q5 outputs:")
print("Bias: "+str(w1[0]))
print("final weight vector = "+str(w1[1:]))
print("learning rate: " + str(lrate))
# Q5(f).
plt.plot(train_CE)
plt.plot(test_CE,color="r")
plt.suptitle("Question 5: Training and test loss v.s. iterations")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(g)
plt.semilogx(train_CE)
plt.semilogx(test_CE,color="r")
plt.suptitle("Question 5: Training and test loss v.s. iterations (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(h)
plt.semilogx(train_acc)
plt.semilogx(test_acc,color="r")
plt.suptitle("Question 5: Training and test accuracy v.s. iterations (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Accuracy")
plt.show()
# Q5(i).
plt.plot(train_CE[-100:])
plt.suptitle("Question 5: last 100 training cross entropies")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(j).
plt.semilogx(test_CE[50:],color="r")
plt.suptitle("Question 5: test loss from iteration 50 on (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(k).
ax,fig = plot_db(unbiased_train,Ttrain,w1[1:],w1[0],30,5)
fig.suptitle("Question 5: Training data and decision boundary")
return w1
# print("lrate = 10")
# print(gd_logreg(10))
# print("lrate = 3")
# print(gd_logreg(3))
print("\nQuestion 5(e):")
print(gd_logreg(1))
# print("lrate = 0.3")
# print(gd_logreg(0.3))
# print("lrate = 0.1")
# print(gd_logreg(0.1))
with open('mnistTVT.pickle','rb') as f:
Xtrain,Ttrain,Xval,Tval,Xtest,Ttest = pickle.load(f)
# Q6(a).
def reduce_train(Xtrain,Ttrain):
reduced_Ttrain_index = np.where((Ttrain == 5) | (Ttrain == 6), True, False)
full_reduced_Xtrain = Xtrain[reduced_Ttrain_index]
full_reduced_Ttrain = Ttrain[reduced_Ttrain_index]
return full_reduced_Xtrain, full_reduced_Ttrain
# Q6(b).
def plot_first_16():
full_reduced_Xtrain, full_reduced_Ttrain = reduce_train(Xtrain,Ttrain)
for i in range(16):
plt.subplot(4,4,i+1)
plt.axis(False)
plt.imshow(full_reduced_Xtrain[i].reshape((28,28)),cmap="Greys",interpolation="nearest")
plt.suptitle("Question 6(b): 16 MNIST training images.")
plt.plot()
plot_first_16()
def train_with(target1,target2,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest):
# Note: the reason why I'm including the data-reduction and ploting part in
# here is because if I modify "reduce_train" function from pervious, and call
# it in this function, the one(occasional several) of the return numpy arrays
# will become a tuple, and will even fail to be converted to a numpy array
# using np.array(). I do believe it is a problem caused by the machine, and
# I'm unable to solve it within the time this assignment is due.
# reducing training data
reduced_Ttrain_index = np.where((Ttrain == target1) | (Ttrain == target2), True, False)
full_reduced_Xtrain = Xtrain[reduced_Ttrain_index]
full_reduced_Ttrain = Ttrain[reduced_Ttrain_index]
small_reduced_Xtrain = full_reduced_Xtrain[:2000]
small_reduced_Ttrain = full_reduced_Ttrain[:2000]
# reducing validation data
reduced_Tval_index = np.where((Tval == target1) | (Tval == target2), True, False)
reduced_Xval = Xval[reduced_Tval_index]
reduced_Tval = Tval[reduced_Tval_index]
# reducing testing data
reduced_Ttest_index = np.where((Ttest == target1) | (Ttest == target2), True, False)
reduced_Xtest = Xtest[reduced_Ttest_index]
reduced_Ttest = Ttest[reduced_Ttest_index]
# print("Done reducing data!")
# fit each k into model
val_acc = []
train_acc = []
best_val_acc, best_k = -1, None
# Q6(c). step i: loop through odd k [1,19] to find best k
for k in range(1,20,2):
knn = ngh.KNeighborsClassifier(k)
knn.fit(full_reduced_Xtrain,full_reduced_Ttrain)
val_acc.append(knn.score(reduced_Xval, reduced_Tval))
train_acc.append(knn.score(small_reduced_Xtrain,small_reduced_Ttrain))
# Q6(c). step iii
if best_val_acc < val_acc[-1]:
best_val_acc = val_acc[-1]
best_k = k
# print("k = " + str(k) + " Done!")
# Q6(c). step ii: plot all k
plt.plot(train_acc)
plt.plot(val_acc,color="r")
plt.xticks([x for x in range(10)],labels=[i for i in range(1,20,2)])
plt.suptitle("Question 6(c): Training and Validation Accuracy for KNN, digits "+str(target1)+" and "+str(target2))
plt.xlabel("Number of Neighbours, K")
plt.ylabel("Accuracy")
# Q6(c). step iv: print out best k output
knn_best = ngh.KNeighborsClassifier(best_k)
knn_best.fit(full_reduced_Xtrain,full_reduced_Ttrain)
knn_best_acc = knn_best.score(reduced_Xtest, reduced_Ttest)
# Q6(c). step v,vi:
print("best k value: " + str(best_k))
print("best k validation accuracy: " + str(val_acc[best_k//2]))
print("best k test accuracy" + str(knn_best_acc))
# train models with 5,6 as target
print("Question 6")
print("----------")
print("\nQuestion 6(c):")
train_with(5,6,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest)
# Q6(d). train models with 4,7 as target
print("\nQuestion 6(d):")
train_with(4,7,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest)
| final[i,j] += A[i,j] | conditional_block |
a1.py | # -*- coding: utf-8 -*-
"""csc311_A1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1BmCgUTnUIAjM-NZ47tsFFKIXnQ9LkOHA
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import pickle
import sklearn.linear_model as lin
import sklearn.neighbors as ngh
# In the functions below,
# X = input data
# T = data labels
# w = weight vector for decision boundary
# b = bias term for decision boundary
# elevation and azimuth are angles describing the 3D viewing direction
import numpy.random as rnd
rnd.seed(3)
print('\n\nQuestion 1')
print('----------')
print('\nQuestion 1(a):')
B = np.random.rand(4,5)
print(B)
print('\nQuestion 1(b):')
y = np.random.rand(4,1)
print(y)
print('\nQuestion 1(c):')
C = B.reshape((2,10))
print(C)
print('\nQuestion 1(d):')
D = B - y
print(D)
print('\nQuestion 1(e):')
z = y.reshape(4)
print(z)
print('\nQuestion 1(f):')
B[:,3] = z
print(B)
print('\nQuestion 1(g):')
D[:,0] = B[:,2] + z
print(D)
print('\nQuestion 1(h):')
print(B[:3])
print('\nQuestion 1(i):')
print(B[:,[1,3]])
print('\nQuestion 1(j):')
print(np.log(B))
print('\nQuestion 1(k):')
print(np.sum(B))
print('\nQuestion 1(l):')
print(np.amax(B, axis=0))
print('\nQuestion 1(m):')
print(np.max(B.sum(axis=1)))
print('\nQuestion 1(n):')
print(np.matmul(B.transpose(), D))
print('\nQuestion 1(j):')
print(y.transpose()@D@D.transpose()@y)
print('\n\nQuestion 2')
print('----------')
# Q2(a)
def matrix_poly(A):
#helper
def mat_mul(X,Y):
# calculate X * Y
mat = np.zeros(X.shape)
elem_sum = 0
for i in range(X.shape[0]):
for j in range(Y.shape[1]):
for k in range(Y.shape[0]):
elem_sum += X[i,k] * Y[k,j]
mat[i,j] = elem_sum
elem_sum = 0
return mat
# find A*A
final = mat_mul(A,A)
# find A + A*A
for i in range(A.shape[0]):
for j in range(A.shape[1]):
final[i,j] += A[i,j]
# find A*(A + A*A)
final = mat_mul(A,final)
# find A + (A*(A + A*A))
for i in range(A.shape[0]):
for j in range(A.shape[1]):
final[i,j] += A[i,j]
return final
# Q2(b)
def timing(N):
A = np.random.rand(N,N)
loop_start = time.time()
B1 = matrix_poly(A)
loop_end = time.time()
np_start = time.time()
B2 = A + (A@(A+(A@A)))
np_end = time.time()
print("Magnitude of B1-B2: " + str(np.linalg.norm(B1-B2, 2)))
print("Execution time for naive iterative method with N = " + str(N) + " is " + str(loop_end - loop_start))
print("Execution time for vectorized method with N = " + str(N) + " is " + str(np_end - np_start))
# test = np.arange(9).reshape(3,3)
# print(matrix_poly(test))
# print(test + (test@(test + (test @ test))))
print("\nQuestion 2(c):")
print("N = 100:")
timing(100)
print("N = 300:")
timing(300)
print("N = 1000:")
timing(1000)
# Q3(a)
def least_squares(x,t):
X = np.ones((x.shape[0], 2))
X[:,1] = x
w = np.linalg.inv(X.transpose()@X) @ X.transpose() @ t
return w
# print(least_squares(dataTrain[0],dataTrain[1]))
# Q3(b)
def plot_data(x,t):
b, a = least_squares(x,t)
min_x, max_x = np.min(x), np.max(x)
pt1 = [min_x, max_x]
pt2 = [a*min_x+b, a*max_x+b]
plt.scatter(x,t)
plt.plot(pt1,pt2,color="r")
plt.title("Question 3(b): the fitted line")
plt.show()
return a,b
# plot_data(dataTrain[0],dataTrain[1])
# Q3(c)
def error(a,b,X,T):
est_mat = a*X+b
mse = np.mean(np.square(T-est_mat))
return mse
# a,b = least_squares(dataTrain[0],dataTrain[1])
# error(a,b,dataTrain[0],dataTrain[1])
print('\n\nQuestion 3')
print('----------')
# Q3(d)
# Read the training and test data from the file dataA1Q3.pickle
with open('dataA1Q3.pickle','rb') as f:
dataTrain, dataTest = pickle.load(f)
# Call plot_data to fit a line to the training data
train_a,train_b = plot_data(dataTrain[0],dataTrain[1])
print("\nQuestion 3(d):")
# Print the values of a and b for the fitted line
print("a: "+str(train_a))
print("b: "+str(train_b))
# Compute and print the training error
print("Mean Square Error of training data: " + str(error(train_a,train_b,dataTrain[0],dataTrain[1])))
# Compute and print the test error
print("Mean Square Error of test data: " + str(error(train_a, train_b, dataTest[0],dataTest[1])))
def boundary_mesh(X,w,w0):
# decision boundary
X = X.T
xmin = np.min(X[0])
xmax = np.max(X[0])
zmin = np.min(X[2])
zmax = np.max(X[2])
x = np.linspace(xmin,xmax,2)
z = np.linspace(zmin,zmax,2)
xx,zz = np.meshgrid(x,z)
yy = -(xx*w[0] + zz*w[2] + w0)/w[1]
return xx,yy,zz
def plot_data(X,T,elevation=30,azimuth=30):
colors = np.array(['r','b']) # red for class 0 , blue for class 1
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = np.array(['r','b']) # red for class 0 , blue for class 1
X = X.T
ax.scatter(X[0],X[1],X[2],color=colors[T],s=1)
ax.view_init(elevation,azimuth)
plt.draw()
return ax,fig
def plot_db(X,T,w,w0,elevation=30,azimuth=30):
xx,yy,zz, = boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,elevation,azimuth)
ax.plot_surface(xx,yy,zz,alpha=0.5,color='green')
return ax,fig
def plot_db3(X,T,w,w0):
_,fig1 = plot_db(X,T,w,w0,30,0)
_,fig2 = plot_db(X,T,w,w0,30,45)
_,fig3 = plot_db(X,T,w,w0,30,175)
return fig1,fig2,fig3
def movie_data(X,T):
ax,fig = plot_data(X,T,30,-20)
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
def movie_db(X,T,w,w0):
xx,yy,zz,= boundary_mesh(X,w,w0)
ax,fig = plot_data(X,T,30,-20)
ax.plot_surface(xx,yy,zz,alpha=0.3,color='green')
plt.pause(1)
for angle in range(-20,200):
ax.view_init(30, angle)
plt.draw()
plt.pause(0.0001)
return ax
with open("dataA1Q4v2.pickle","rb") as f:
Xtrain,Ttrain,Xtest,Ttest = pickle.load(f)
clf = lin.LogisticRegression()
clf.fit(Xtrain, Ttrain)
w = clf.coef_[0]
bias = clf.intercept_[0]
print("\nQuestion 4")
print("----------")
print('\nQuestion 4(a):')
print("Weight: " + str(w))
print("Bias: " + str(bias))
print('\nQuestion 4(b):')
accuracy1 = clf.score(Xtest,Ttest)
comparison = np.equal(clf.predict(Xtest), Ttest)
accuracy2 = np.count_nonzero(comparison == True) / Ttest.shape[0]
print("accuracy1: " + str(accuracy1))
print("accuracy2: " + str(accuracy2))
print("accuracy1 - accuracy2: " + str(accuracy1 - accuracy2))
# Q4(c).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,5)
fig.suptitle("Question 4(c): Training data and decision boundary")
# Q4(d).
ax,fig = plot_db(Xtrain,Ttrain,w,bias,30,20)
fig.suptitle("Question 4(d): Training data and decision boundary")
# plot_data(Xtrain, Ttrain,30,10)
print('\n\nQuestion 6')
print('----------')
# Q5 (a)-(k)
def gd_logreg(lrate):
# Q5(a). initialize weight
np.random.seed(3)
# Q5(b).
w0 = np.random.randn(Xtrain.shape[1]+1)/1000
w1 = w0.copy()
# add x0=1 to Xtrain and Ttrain
unbiased_train = np.ones((Xtrain.shape[0],Xtrain.shape[1]+1))
unbiased_train[:,1:] = Xtrain
unbiased_test = np.ones((Xtest.shape[0],Xtest.shape[1]+1))
unbiased_test[:,1:] = Xtest
# Q5(c). all helper functions below are needed
def sigma(z):
return 1/(1+np.exp(-z))
def z(x,w):
return x@w
def h(x,w):
return sigma(z(x,w))
def gd(x,t,w):
# gradient of L_ce = [X^T(y-t)]
return 1/(Ttrain.shape[0]) * x.transpose()@(h(x,w)-t)
def | (x,t,w):
# logistic-cross-entropy
return (t@np.logaddexp(0,-z(x,w))+(1-t)@np.logaddexp(0,z(x,w)))/t.shape[0]
train_CE = []
test_CE = []
train_acc = []
test_acc = []
E0 = E(unbiased_train,Ttrain,w0)
E1 = 1
# Q5(d).
while abs(E0-E1) >= np.float64(10**-10):
# for i in range(200):
E0 = E1
w0 = w1.copy()
weight_update = gd(unbiased_train,Ttrain,w1)
w1 -= lrate * weight_update
train_est_mat = np.where(z(unbiased_train,w1)>=0,1,0)
test_est_mat = np.where(z(unbiased_test,w1)>=0,1,0)
train_compare = np.equal(train_est_mat,Ttrain)
train_acc.append(np.count_nonzero(train_compare==True)/Ttrain.shape[0])
test_compare = np.equal(test_est_mat,Ttest)
test_acc.append(np.count_nonzero(test_compare==True)/Ttest.shape[0])
E1 = E(unbiased_train,Ttrain,w1)
train_CE.append(E1)
test_CE.append(E(unbiased_test,Ttest,w1))
# Q5(e).
print("Q4 outputs:")
print("Weight: " + str(w))
print("Bias: " + str(bias))
print("Q5 outputs:")
print("Bias: "+str(w1[0]))
print("final weight vector = "+str(w1[1:]))
print("learning rate: " + str(lrate))
# Q5(f).
plt.plot(train_CE)
plt.plot(test_CE,color="r")
plt.suptitle("Question 5: Training and test loss v.s. iterations")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(g)
plt.semilogx(train_CE)
plt.semilogx(test_CE,color="r")
plt.suptitle("Question 5: Training and test loss v.s. iterations (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(h)
plt.semilogx(train_acc)
plt.semilogx(test_acc,color="r")
plt.suptitle("Question 5: Training and test accuracy v.s. iterations (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Accuracy")
plt.show()
# Q5(i).
plt.plot(train_CE[-100:])
plt.suptitle("Question 5: last 100 training cross entropies")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(j).
plt.semilogx(test_CE[50:],color="r")
plt.suptitle("Question 5: test loss from iteration 50 on (log scale)")
plt.xlabel("Iteration number")
plt.ylabel("Cross entropy")
plt.show()
# Q5(k).
ax,fig = plot_db(unbiased_train,Ttrain,w1[1:],w1[0],30,5)
fig.suptitle("Question 5: Training data and decision boundary")
return w1
# print("lrate = 10")
# print(gd_logreg(10))
# print("lrate = 3")
# print(gd_logreg(3))
print("\nQuestion 5(e):")
print(gd_logreg(1))
# print("lrate = 0.3")
# print(gd_logreg(0.3))
# print("lrate = 0.1")
# print(gd_logreg(0.1))
with open('mnistTVT.pickle','rb') as f:
Xtrain,Ttrain,Xval,Tval,Xtest,Ttest = pickle.load(f)
# Q6(a).
def reduce_train(Xtrain,Ttrain):
reduced_Ttrain_index = np.where((Ttrain == 5) | (Ttrain == 6), True, False)
full_reduced_Xtrain = Xtrain[reduced_Ttrain_index]
full_reduced_Ttrain = Ttrain[reduced_Ttrain_index]
return full_reduced_Xtrain, full_reduced_Ttrain
# Q6(b).
def plot_first_16():
full_reduced_Xtrain, full_reduced_Ttrain = reduce_train(Xtrain,Ttrain)
for i in range(16):
plt.subplot(4,4,i+1)
plt.axis(False)
plt.imshow(full_reduced_Xtrain[i].reshape((28,28)),cmap="Greys",interpolation="nearest")
plt.suptitle("Question 6(b): 16 MNIST training images.")
plt.plot()
plot_first_16()
def train_with(target1,target2,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest):
# Note: the reason why I'm including the data-reduction and ploting part in
# here is because if I modify "reduce_train" function from pervious, and call
# it in this function, the one(occasional several) of the return numpy arrays
# will become a tuple, and will even fail to be converted to a numpy array
# using np.array(). I do believe it is a problem caused by the machine, and
# I'm unable to solve it within the time this assignment is due.
# reducing training data
reduced_Ttrain_index = np.where((Ttrain == target1) | (Ttrain == target2), True, False)
full_reduced_Xtrain = Xtrain[reduced_Ttrain_index]
full_reduced_Ttrain = Ttrain[reduced_Ttrain_index]
small_reduced_Xtrain = full_reduced_Xtrain[:2000]
small_reduced_Ttrain = full_reduced_Ttrain[:2000]
# reducing validation data
reduced_Tval_index = np.where((Tval == target1) | (Tval == target2), True, False)
reduced_Xval = Xval[reduced_Tval_index]
reduced_Tval = Tval[reduced_Tval_index]
# reducing testing data
reduced_Ttest_index = np.where((Ttest == target1) | (Ttest == target2), True, False)
reduced_Xtest = Xtest[reduced_Ttest_index]
reduced_Ttest = Ttest[reduced_Ttest_index]
# print("Done reducing data!")
# fit each k into model
val_acc = []
train_acc = []
best_val_acc, best_k = -1, None
# Q6(c). step i: loop through odd k [1,19] to find best k
for k in range(1,20,2):
knn = ngh.KNeighborsClassifier(k)
knn.fit(full_reduced_Xtrain,full_reduced_Ttrain)
val_acc.append(knn.score(reduced_Xval, reduced_Tval))
train_acc.append(knn.score(small_reduced_Xtrain,small_reduced_Ttrain))
# Q6(c). step iii
if best_val_acc < val_acc[-1]:
best_val_acc = val_acc[-1]
best_k = k
# print("k = " + str(k) + " Done!")
# Q6(c). step ii: plot all k
plt.plot(train_acc)
plt.plot(val_acc,color="r")
plt.xticks([x for x in range(10)],labels=[i for i in range(1,20,2)])
plt.suptitle("Question 6(c): Training and Validation Accuracy for KNN, digits "+str(target1)+" and "+str(target2))
plt.xlabel("Number of Neighbours, K")
plt.ylabel("Accuracy")
# Q6(c). step iv: print out best k output
knn_best = ngh.KNeighborsClassifier(best_k)
knn_best.fit(full_reduced_Xtrain,full_reduced_Ttrain)
knn_best_acc = knn_best.score(reduced_Xtest, reduced_Ttest)
# Q6(c). step v,vi:
print("best k value: " + str(best_k))
print("best k validation accuracy: " + str(val_acc[best_k//2]))
print("best k test accuracy" + str(knn_best_acc))
# train models with 5,6 as target
print("Question 6")
print("----------")
print("\nQuestion 6(c):")
train_with(5,6,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest)
# Q6(d). train models with 4,7 as target
print("\nQuestion 6(d):")
train_with(4,7,Xtrain,Ttrain,Xval,Tval,Xtest,Ttest)
| E | identifier_name |
validation.go | package codegen
import (
"bytes"
"errors"
"fmt"
"strings"
"text/template"
"goa.design/goa/v3/expr"
)
var (
enumValT *template.Template
formatValT *template.Template
patternValT *template.Template
exclMinMaxValT *template.Template
minMaxValT *template.Template
lengthValT *template.Template
requiredValT *template.Template
arrayValT *template.Template
mapValT *template.Template
unionValT *template.Template
userValT *template.Template
)
func init() {
fm := template.FuncMap{
"slice": toSlice,
"oneof": oneof,
"constant": constant,
"add": func(a, b int) int { return a + b },
}
enumValT = template.Must(template.New("enum").Funcs(fm).Parse(enumValTmpl))
formatValT = template.Must(template.New("format").Funcs(fm).Parse(formatValTmpl))
patternValT = template.Must(template.New("pattern").Funcs(fm).Parse(patternValTmpl))
exclMinMaxValT = template.Must(template.New("exclMinMax").Funcs(fm).Parse(exclMinMaxValTmpl))
minMaxValT = template.Must(template.New("minMax").Funcs(fm).Parse(minMaxValTmpl))
lengthValT = template.Must(template.New("length").Funcs(fm).Parse(lengthValTmpl))
requiredValT = template.Must(template.New("req").Funcs(fm).Parse(requiredValTmpl))
arrayValT = template.Must(template.New("array").Funcs(fm).Parse(arrayValTmpl))
mapValT = template.Must(template.New("map").Funcs(fm).Parse(mapValTmpl))
unionValT = template.Must(template.New("union").Funcs(fm).Parse(unionValTmpl))
userValT = template.Must(template.New("user").Funcs(fm).Parse(userValTmpl))
}
// AttributeValidationCode produces Go code that runs the validations defined
// in the given attribute against the value held by the variable named target.
//
// See ValidationCode for a description of the arguments.
func AttributeValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target, attName string) string {
seen := make(map[string]*bytes.Buffer)
return recurseValidationCode(att, put, attCtx, req, alias, target, attName, seen).String()
}
// ValidationCode produces Go code that runs the validations defined in the
// given attribute and its children recursively against the value held by the
// variable named target.
//
// put is the parent UserType if any. It is used to compute proto oneof type names.
//
// attCtx is the attribute context used to generate attribute name and reference
// in the validation code.
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func ValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target string) string |
func recurseValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target, context string, seen map[string]*bytes.Buffer) *bytes.Buffer {
var (
buf = new(bytes.Buffer)
first = true
ut, isUT = att.Type.(expr.UserType)
)
// Break infinite recursions
if isUT {
if buf, ok := seen[ut.ID()]; ok {
return buf
}
seen[ut.ID()] = buf
}
flattenValidations(att, make(map[string]struct{}))
newline := func() {
if !first {
buf.WriteByte('\n')
} else {
first = false
}
}
// Write validations on attribute if any.
validation := validationCode(att, attCtx, req, alias, target, context)
if validation != "" {
buf.WriteString(validation)
first = false
}
// Recurse down depending on attribute type.
switch {
case expr.IsObject(att.Type):
if isUT {
put = ut
}
for _, nat := range *(expr.AsObject(att.Type)) {
tgt := fmt.Sprintf("%s.%s", target, attCtx.Scope.Field(nat.Attribute, nat.Name, true))
ctx := fmt.Sprintf("%s.%s", context, nat.Name)
val := validateAttribute(attCtx, nat.Attribute, put, tgt, ctx, att.IsRequired(nat.Name))
if val != "" {
newline()
buf.WriteString(val)
}
}
case expr.IsArray(att.Type):
elem := expr.AsArray(att.Type).ElemType
ctx := attCtx
if ctx.Pointer && expr.IsPrimitive(elem.Type) {
// Array elements of primitive type are never pointers
ctx = attCtx.Dup()
ctx.Pointer = false
}
val := validateAttribute(ctx, elem, put, "e", context+"[*]", true)
if val != "" {
newline()
data := map[string]any{"target": target, "validation": val}
if err := arrayValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsMap(att.Type):
m := expr.AsMap(att.Type)
ctx := attCtx.Dup()
ctx.Pointer = false
keyVal := validateAttribute(ctx, m.KeyType, put, "k", context+".key", true)
if keyVal != "" {
keyVal = "\n" + keyVal
}
valueVal := validateAttribute(ctx, m.ElemType, put, "v", context+"[key]", true)
if valueVal != "" {
valueVal = "\n" + valueVal
}
if keyVal != "" || valueVal != "" {
newline()
data := map[string]any{"target": target, "keyValidation": keyVal, "valueValidation": valueVal}
if err := mapValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsUnion(att.Type):
// NOTE: the only time we validate a union is when we are
// validating a proto-generated type since the HTTP
// serialization transforms unions into objects.
u := expr.AsUnion(att.Type)
tref := attCtx.Scope.Ref(&expr.AttributeExpr{Type: put}, attCtx.DefaultPkg)
var vals []string
var types []string
for _, v := range u.Values {
vatt := v.Attribute
fieldName := attCtx.Scope.Field(vatt, v.Name, true)
val := validateAttribute(attCtx, vatt, put, "v."+fieldName, context+".value", true)
if val != "" {
types = append(types, tref+"_"+fieldName)
vals = append(vals, val)
}
}
if len(vals) > 0 {
newline()
data := map[string]any{
"target": target,
"types": types,
"values": vals,
}
if err := unionValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
}
return buf
}
func validateAttribute(ctx *AttributeContext, att *expr.AttributeExpr, put expr.UserType, target, context string, req bool) string {
ut, isUT := att.Type.(expr.UserType)
if !isUT {
code := recurseValidationCode(att, put, ctx, req, false, target, context, nil).String()
if code == "" {
return ""
}
if expr.IsArray(att.Type) || expr.IsMap(att.Type) || expr.IsUnion(att.Type) {
return code
}
if !ctx.Pointer && (req || (att.DefaultValue != nil && ctx.UseDefault)) {
return code
}
cond := fmt.Sprintf("if %s != nil {\n", target)
if strings.HasPrefix(code, cond) {
return code
}
return fmt.Sprintf("%s%s\n}", cond, code)
}
if expr.IsAlias(ut) {
return recurseValidationCode(ut.Attribute(), put, ctx, req, true, target, context, nil).String()
}
if !hasValidations(ctx, ut) {
return ""
}
var buf bytes.Buffer
name := ctx.Scope.Name(att, "", ctx.Pointer, ctx.UseDefault)
data := map[string]any{"name": Goify(name, true), "target": target}
if err := userValT.Execute(&buf, data); err != nil {
panic(err) // bug
}
return fmt.Sprintf("if %s != nil {\n\t%s\n}", target, buf.String())
}
// validationCode produces Go code that runs the validations defined in the
// given attribute definition if any against the content of the variable named
// target. The generated code assumes that there is a pre-existing "err"
// variable of type error. It initializes that variable in case a validation
// fails.
//
// attCtx is the attribute context
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func validationCode(att *expr.AttributeExpr, attCtx *AttributeContext, req, alias bool, target, context string) string {
validation := att.Validation
if ut, ok := att.Type.(expr.UserType); ok {
val := ut.Attribute().Validation
if val != nil {
if validation == nil {
validation = val
} else {
validation.Merge(val)
}
att.Validation = validation
}
}
if validation == nil {
return ""
}
var (
kind = att.Type.Kind()
isNativePointer = kind == expr.BytesKind || kind == expr.AnyKind
isPointer = attCtx.Pointer || (!req && (att.DefaultValue == nil || !attCtx.UseDefault))
tval = target
)
if isPointer && expr.IsPrimitive(att.Type) && !isNativePointer {
tval = "*" + tval
}
if alias {
tval = fmt.Sprintf("%s(%s)", att.Type.Name(), tval)
}
data := map[string]any{
"attribute": att,
"attCtx": attCtx,
"isPointer": isPointer,
"context": context,
"target": target,
"targetVal": tval,
"string": kind == expr.StringKind,
"array": expr.IsArray(att.Type),
"map": expr.IsMap(att.Type),
}
runTemplate := func(tmpl *template.Template, data any) string {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
panic(err) // bug
}
return buf.String()
}
var res []string
if values := validation.Values; values != nil {
data["values"] = values
if val := runTemplate(enumValT, data); val != "" {
res = append(res, val)
}
}
if format := validation.Format; format != "" {
data["format"] = string(format)
if val := runTemplate(formatValT, data); val != "" {
res = append(res, val)
}
}
if pattern := validation.Pattern; pattern != "" {
data["pattern"] = pattern
if val := runTemplate(patternValT, data); val != "" {
res = append(res, val)
}
}
if exclMin := validation.ExclusiveMinimum; exclMin != nil {
data["exclMin"] = *exclMin
data["isExclMin"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if min := validation.Minimum; min != nil {
data["min"] = *min
data["isMin"] = true
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if exclMax := validation.ExclusiveMaximum; exclMax != nil {
data["exclMax"] = *exclMax
data["isExclMax"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if max := validation.Maximum; max != nil {
data["max"] = *max
data["isMin"] = false
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if minLength := validation.MinLength; minLength != nil {
data["minLength"] = minLength
data["isMinLength"] = true
delete(data, "maxLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
if maxLength := validation.MaxLength; maxLength != nil {
data["maxLength"] = maxLength
data["isMinLength"] = false
delete(data, "minLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
reqs := generatedRequiredValidation(att, attCtx)
obj := expr.AsObject(att.Type)
for _, r := range reqs {
reqAtt := obj.Attribute(r)
data["req"] = r
data["reqAtt"] = reqAtt
res = append(res, runTemplate(requiredValT, data))
}
return strings.Join(res, "\n")
}
// hasValidations returns true if a UserType contains validations.
func hasValidations(attCtx *AttributeContext, ut expr.UserType) bool {
// We need to check empirically whether there are validations to be
// generated, we can't just generate and check whether something was
// generated to avoid infinite recursions.
res := false
done := errors.New("done")
Walk(ut.Attribute(), func(a *expr.AttributeExpr) error {
if a.Validation == nil {
return nil
}
if attCtx.Pointer || !a.Validation.HasRequiredOnly() {
res = true
return done
}
res = len(generatedRequiredValidation(a, attCtx)) > 0
if res {
return done
}
return nil
})
return res
}
// There is a case where there is validation but no actual validation code: if
// the validation is a required validation that applies to attributes that
// cannot be nil i.e. primitive types.
func generatedRequiredValidation(att *expr.AttributeExpr, attCtx *AttributeContext) (res []string) {
if att.Validation == nil {
return
}
obj := expr.AsObject(att.Type)
for _, req := range att.Validation.Required {
reqAtt := obj.Attribute(req)
if reqAtt == nil {
continue
}
if !attCtx.Pointer && expr.IsPrimitive(reqAtt.Type) &&
reqAtt.Type.Kind() != expr.BytesKind &&
reqAtt.Type.Kind() != expr.AnyKind {
continue
}
if attCtx.IgnoreRequired && expr.IsPrimitive(reqAtt.Type) {
continue
}
res = append(res, req)
}
return
}
func flattenValidations(att *expr.AttributeExpr, seen map[string]struct{}) {
switch actual := att.Type.(type) {
case *expr.Array:
flattenValidations(actual.ElemType, seen)
case *expr.Map:
flattenValidations(actual.KeyType, seen)
flattenValidations(actual.ElemType, seen)
case *expr.Object:
for _, nat := range *actual {
flattenValidations(nat.Attribute, seen)
}
case *expr.Union:
for _, nat := range actual.Values {
flattenValidations(nat.Attribute, seen)
}
case expr.UserType:
if _, ok := seen[actual.ID()]; ok {
return
}
seen[actual.ID()] = struct{}{}
v := att.Validation
ut, ok := actual.Attribute().Type.(expr.UserType)
for ok {
if val := ut.Attribute().Validation; val != nil {
if v == nil {
v = val
} else {
v.Merge(val)
}
}
ut, ok = ut.Attribute().Type.(expr.UserType)
}
att.Validation = v
flattenValidations(actual.Attribute(), seen)
}
}
// toSlice returns Go code that represents the given slice.
func toSlice(val []any) string {
elems := make([]string, len(val))
for i, v := range val {
elems[i] = fmt.Sprintf("%#v", v)
}
return fmt.Sprintf("[]any{%s}", strings.Join(elems, ", "))
}
// oneof produces code that compares target with each element of vals and ORs
// the result, e.g. "target == 1 || target == 2".
func oneof(target string, vals []any) string {
elems := make([]string, len(vals))
for i, v := range vals {
elems[i] = fmt.Sprintf("%s == %#v", target, v)
}
return strings.Join(elems, " || ")
}
// constant returns the Go constant name of the format with the given value.
func constant(formatName string) string {
switch formatName {
case "date":
return "goa.FormatDate"
case "date-time":
return "goa.FormatDateTime"
case "uuid":
return "goa.FormatUUID"
case "email":
return "goa.FormatEmail"
case "hostname":
return "goa.FormatHostname"
case "ipv4":
return "goa.FormatIPv4"
case "ipv6":
return "goa.FormatIPv6"
case "ip":
return "goa.FormatIP"
case "uri":
return "goa.FormatURI"
case "mac":
return "goa.FormatMAC"
case "cidr":
return "goa.FormatCIDR"
case "regexp":
return "goa.FormatRegexp"
case "json":
return "goa.FormatJSON"
case "rfc1123":
return "goa.FormatRFC1123"
}
panic("unknown format") // bug
}
const (
arrayValTmpl = `for _, e := range {{ .target }} {
{{ .validation }}
}`
mapValTmpl = `for {{if .keyValidation }}k{{ else }}_{{ end }}, {{ if .valueValidation }}v{{ else }}_{{ end }} := range {{ .target }} {
{{- .keyValidation }}
{{- .valueValidation }}
}`
unionValTmpl = `switch v := {{ .target }}.(type) {
{{- range $i, $val := .values }}
case {{ index $.types $i }}:
{{ $val }}
{{ end -}}
}`
userValTmpl = `if err2 := Validate{{ .name }}({{ .target }}); err2 != nil {
err = goa.MergeErrors(err, err2)
}`
enumValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
if !({{ oneof .targetVal .values }}) {
err = goa.MergeErrors(err, goa.InvalidEnumValueError({{ printf "%q" .context }}, {{ .targetVal }}, {{ slice .values }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
patternValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
err = goa.MergeErrors(err, goa.ValidatePattern({{ printf "%q" .context }}, {{ .targetVal }}, {{ printf "%q" .pattern }}))
{{- if .isPointer }}
}
{{- end }}`
formatValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
err = goa.MergeErrors(err, goa.ValidateFormat({{ printf "%q" .context }}, {{ .targetVal}}, {{ constant .format }}))
{{- if .isPointer }}
}
{{- end }}`
exclMinMaxValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
if {{ .targetVal }} {{ if .isExclMin }}<={{ else }}>={{ end }} {{ if .isExclMin }}{{ .exclMin }}{{ else }}{{ .exclMax }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidRangeError({{ printf "%q" .context }}, {{ .targetVal }}, {{ if .isExclMin }}{{ .exclMin }}, true{{ else }}{{ .exclMax }}, false{{ end }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
minMaxValTmpl = `{{ if .isPointer -}}if {{ .target }} != nil {
{{ end -}}
if {{ .targetVal }} {{ if .isMin }}<{{ else }}>{{ end }} {{ if .isMin }}{{ .min }}{{ else }}{{ .max }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidRangeError({{ printf "%q" .context }}, {{ .targetVal }}, {{ if .isMin }}{{ .min }}, true{{ else }}{{ .max }}, false{{ end }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
lengthValTmpl = `{{ $target := or (and (or (or .array .map) .nonzero) .target) .targetVal -}}
{{ if and .isPointer .string -}}
if {{ .target }} != nil {
{{ end -}}
if {{ if .string }}utf8.RuneCountInString({{ $target }}){{ else }}len({{ $target }}){{ end }} {{ if .isMinLength }}<{{ else }}>{{ end }} {{ if .isMinLength }}{{ .minLength }}{{ else }}{{ .maxLength }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidLengthError({{ printf "%q" .context }}, {{ $target }}, {{ if .string }}utf8.RuneCountInString({{ $target }}){{ else }}len({{ $target }}){{ end }}, {{ if .isMinLength }}{{ .minLength }}, true{{ else }}{{ .maxLength }}, false{{ end }}))
}{{- if and .isPointer .string }}
}
{{- end }}`
requiredValTmpl = `if {{ $.target }}.{{ .attCtx.Scope.Field $.reqAtt .req true }} == nil {
err = goa.MergeErrors(err, goa.MissingFieldError("{{ .req }}", {{ printf "%q" $.context }}))
}`
)
| {
seen := make(map[string]*bytes.Buffer)
return recurseValidationCode(att, put, attCtx, req, alias, target, target, seen).String()
} | identifier_body |
validation.go | package codegen
import (
"bytes"
"errors"
"fmt"
"strings"
"text/template"
"goa.design/goa/v3/expr"
)
var (
enumValT *template.Template
formatValT *template.Template
patternValT *template.Template
exclMinMaxValT *template.Template
minMaxValT *template.Template
lengthValT *template.Template
requiredValT *template.Template
arrayValT *template.Template
mapValT *template.Template
unionValT *template.Template
userValT *template.Template
)
func init() {
fm := template.FuncMap{
"slice": toSlice,
"oneof": oneof,
"constant": constant,
"add": func(a, b int) int { return a + b },
}
enumValT = template.Must(template.New("enum").Funcs(fm).Parse(enumValTmpl))
formatValT = template.Must(template.New("format").Funcs(fm).Parse(formatValTmpl))
patternValT = template.Must(template.New("pattern").Funcs(fm).Parse(patternValTmpl))
exclMinMaxValT = template.Must(template.New("exclMinMax").Funcs(fm).Parse(exclMinMaxValTmpl))
minMaxValT = template.Must(template.New("minMax").Funcs(fm).Parse(minMaxValTmpl))
lengthValT = template.Must(template.New("length").Funcs(fm).Parse(lengthValTmpl))
requiredValT = template.Must(template.New("req").Funcs(fm).Parse(requiredValTmpl))
arrayValT = template.Must(template.New("array").Funcs(fm).Parse(arrayValTmpl))
mapValT = template.Must(template.New("map").Funcs(fm).Parse(mapValTmpl))
unionValT = template.Must(template.New("union").Funcs(fm).Parse(unionValTmpl))
userValT = template.Must(template.New("user").Funcs(fm).Parse(userValTmpl))
}
// AttributeValidationCode produces Go code that runs the validations defined
// in the given attribute against the value held by the variable named target.
//
// See ValidationCode for a description of the arguments.
func AttributeValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target, attName string) string {
seen := make(map[string]*bytes.Buffer)
return recurseValidationCode(att, put, attCtx, req, alias, target, attName, seen).String()
}
// ValidationCode produces Go code that runs the validations defined in the
// given attribute and its children recursively against the value held by the
// variable named target.
//
// put is the parent UserType if any. It is used to compute proto oneof type names.
//
// attCtx is the attribute context used to generate attribute name and reference
// in the validation code.
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func ValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target string) string {
seen := make(map[string]*bytes.Buffer)
return recurseValidationCode(att, put, attCtx, req, alias, target, target, seen).String()
}
func recurseValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target, context string, seen map[string]*bytes.Buffer) *bytes.Buffer {
var (
buf = new(bytes.Buffer)
first = true
ut, isUT = att.Type.(expr.UserType)
)
// Break infinite recursions
if isUT {
if buf, ok := seen[ut.ID()]; ok {
return buf
}
seen[ut.ID()] = buf
}
flattenValidations(att, make(map[string]struct{}))
newline := func() {
if !first {
buf.WriteByte('\n')
} else {
first = false
}
}
// Write validations on attribute if any.
validation := validationCode(att, attCtx, req, alias, target, context)
if validation != "" {
buf.WriteString(validation)
first = false
}
// Recurse down depending on attribute type.
switch {
case expr.IsObject(att.Type):
if isUT {
put = ut
}
for _, nat := range *(expr.AsObject(att.Type)) {
tgt := fmt.Sprintf("%s.%s", target, attCtx.Scope.Field(nat.Attribute, nat.Name, true))
ctx := fmt.Sprintf("%s.%s", context, nat.Name)
val := validateAttribute(attCtx, nat.Attribute, put, tgt, ctx, att.IsRequired(nat.Name))
if val != "" {
newline()
buf.WriteString(val)
}
}
case expr.IsArray(att.Type):
elem := expr.AsArray(att.Type).ElemType
ctx := attCtx
if ctx.Pointer && expr.IsPrimitive(elem.Type) {
// Array elements of primitive type are never pointers
ctx = attCtx.Dup()
ctx.Pointer = false
}
val := validateAttribute(ctx, elem, put, "e", context+"[*]", true)
if val != "" {
newline()
data := map[string]any{"target": target, "validation": val}
if err := arrayValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsMap(att.Type):
m := expr.AsMap(att.Type)
ctx := attCtx.Dup()
ctx.Pointer = false
keyVal := validateAttribute(ctx, m.KeyType, put, "k", context+".key", true)
if keyVal != "" {
keyVal = "\n" + keyVal
}
valueVal := validateAttribute(ctx, m.ElemType, put, "v", context+"[key]", true)
if valueVal != "" {
valueVal = "\n" + valueVal
}
if keyVal != "" || valueVal != "" {
newline()
data := map[string]any{"target": target, "keyValidation": keyVal, "valueValidation": valueVal}
if err := mapValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsUnion(att.Type):
// NOTE: the only time we validate a union is when we are
// validating a proto-generated type since the HTTP
// serialization transforms unions into objects.
u := expr.AsUnion(att.Type)
tref := attCtx.Scope.Ref(&expr.AttributeExpr{Type: put}, attCtx.DefaultPkg)
var vals []string
var types []string
for _, v := range u.Values {
vatt := v.Attribute
fieldName := attCtx.Scope.Field(vatt, v.Name, true)
val := validateAttribute(attCtx, vatt, put, "v."+fieldName, context+".value", true)
if val != "" {
types = append(types, tref+"_"+fieldName)
vals = append(vals, val)
}
}
if len(vals) > 0 {
newline()
data := map[string]any{
"target": target,
"types": types,
"values": vals,
}
if err := unionValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
}
return buf
}
func validateAttribute(ctx *AttributeContext, att *expr.AttributeExpr, put expr.UserType, target, context string, req bool) string {
ut, isUT := att.Type.(expr.UserType)
if !isUT {
code := recurseValidationCode(att, put, ctx, req, false, target, context, nil).String()
if code == "" {
return ""
}
if expr.IsArray(att.Type) || expr.IsMap(att.Type) || expr.IsUnion(att.Type) {
return code
}
if !ctx.Pointer && (req || (att.DefaultValue != nil && ctx.UseDefault)) {
return code
}
cond := fmt.Sprintf("if %s != nil {\n", target)
if strings.HasPrefix(code, cond) {
return code
}
return fmt.Sprintf("%s%s\n}", cond, code)
}
if expr.IsAlias(ut) {
return recurseValidationCode(ut.Attribute(), put, ctx, req, true, target, context, nil).String()
}
if !hasValidations(ctx, ut) {
return ""
}
var buf bytes.Buffer
name := ctx.Scope.Name(att, "", ctx.Pointer, ctx.UseDefault)
data := map[string]any{"name": Goify(name, true), "target": target}
if err := userValT.Execute(&buf, data); err != nil {
panic(err) // bug
}
return fmt.Sprintf("if %s != nil {\n\t%s\n}", target, buf.String())
}
// validationCode produces Go code that runs the validations defined in the
// given attribute definition if any against the content of the variable named
// target. The generated code assumes that there is a pre-existing "err"
// variable of type error. It initializes that variable in case a validation
// fails.
//
// attCtx is the attribute context
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func validationCode(att *expr.AttributeExpr, attCtx *AttributeContext, req, alias bool, target, context string) string {
validation := att.Validation
if ut, ok := att.Type.(expr.UserType); ok {
val := ut.Attribute().Validation
if val != nil {
if validation == nil {
validation = val
} else {
validation.Merge(val)
}
att.Validation = validation
}
}
if validation == nil {
return ""
}
var (
kind = att.Type.Kind()
isNativePointer = kind == expr.BytesKind || kind == expr.AnyKind
isPointer = attCtx.Pointer || (!req && (att.DefaultValue == nil || !attCtx.UseDefault))
tval = target
)
if isPointer && expr.IsPrimitive(att.Type) && !isNativePointer {
tval = "*" + tval
}
if alias {
tval = fmt.Sprintf("%s(%s)", att.Type.Name(), tval)
}
data := map[string]any{
"attribute": att,
"attCtx": attCtx,
"isPointer": isPointer,
"context": context,
"target": target,
"targetVal": tval,
"string": kind == expr.StringKind,
"array": expr.IsArray(att.Type),
"map": expr.IsMap(att.Type),
}
runTemplate := func(tmpl *template.Template, data any) string {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
panic(err) // bug
}
return buf.String()
}
var res []string
if values := validation.Values; values != nil |
if format := validation.Format; format != "" {
data["format"] = string(format)
if val := runTemplate(formatValT, data); val != "" {
res = append(res, val)
}
}
if pattern := validation.Pattern; pattern != "" {
data["pattern"] = pattern
if val := runTemplate(patternValT, data); val != "" {
res = append(res, val)
}
}
if exclMin := validation.ExclusiveMinimum; exclMin != nil {
data["exclMin"] = *exclMin
data["isExclMin"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if min := validation.Minimum; min != nil {
data["min"] = *min
data["isMin"] = true
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if exclMax := validation.ExclusiveMaximum; exclMax != nil {
data["exclMax"] = *exclMax
data["isExclMax"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if max := validation.Maximum; max != nil {
data["max"] = *max
data["isMin"] = false
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if minLength := validation.MinLength; minLength != nil {
data["minLength"] = minLength
data["isMinLength"] = true
delete(data, "maxLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
if maxLength := validation.MaxLength; maxLength != nil {
data["maxLength"] = maxLength
data["isMinLength"] = false
delete(data, "minLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
reqs := generatedRequiredValidation(att, attCtx)
obj := expr.AsObject(att.Type)
for _, r := range reqs {
reqAtt := obj.Attribute(r)
data["req"] = r
data["reqAtt"] = reqAtt
res = append(res, runTemplate(requiredValT, data))
}
return strings.Join(res, "\n")
}
// hasValidations returns true if a UserType contains validations.
func hasValidations(attCtx *AttributeContext, ut expr.UserType) bool {
// We need to check empirically whether there are validations to be
// generated, we can't just generate and check whether something was
// generated to avoid infinite recursions.
res := false
done := errors.New("done")
Walk(ut.Attribute(), func(a *expr.AttributeExpr) error {
if a.Validation == nil {
return nil
}
if attCtx.Pointer || !a.Validation.HasRequiredOnly() {
res = true
return done
}
res = len(generatedRequiredValidation(a, attCtx)) > 0
if res {
return done
}
return nil
})
return res
}
// There is a case where there is validation but no actual validation code: if
// the validation is a required validation that applies to attributes that
// cannot be nil i.e. primitive types.
func generatedRequiredValidation(att *expr.AttributeExpr, attCtx *AttributeContext) (res []string) {
if att.Validation == nil {
return
}
obj := expr.AsObject(att.Type)
for _, req := range att.Validation.Required {
reqAtt := obj.Attribute(req)
if reqAtt == nil {
continue
}
if !attCtx.Pointer && expr.IsPrimitive(reqAtt.Type) &&
reqAtt.Type.Kind() != expr.BytesKind &&
reqAtt.Type.Kind() != expr.AnyKind {
continue
}
if attCtx.IgnoreRequired && expr.IsPrimitive(reqAtt.Type) {
continue
}
res = append(res, req)
}
return
}
func flattenValidations(att *expr.AttributeExpr, seen map[string]struct{}) {
switch actual := att.Type.(type) {
case *expr.Array:
flattenValidations(actual.ElemType, seen)
case *expr.Map:
flattenValidations(actual.KeyType, seen)
flattenValidations(actual.ElemType, seen)
case *expr.Object:
for _, nat := range *actual {
flattenValidations(nat.Attribute, seen)
}
case *expr.Union:
for _, nat := range actual.Values {
flattenValidations(nat.Attribute, seen)
}
case expr.UserType:
if _, ok := seen[actual.ID()]; ok {
return
}
seen[actual.ID()] = struct{}{}
v := att.Validation
ut, ok := actual.Attribute().Type.(expr.UserType)
for ok {
if val := ut.Attribute().Validation; val != nil {
if v == nil {
v = val
} else {
v.Merge(val)
}
}
ut, ok = ut.Attribute().Type.(expr.UserType)
}
att.Validation = v
flattenValidations(actual.Attribute(), seen)
}
}
// toSlice returns Go code that represents the given slice.
func toSlice(val []any) string {
elems := make([]string, len(val))
for i, v := range val {
elems[i] = fmt.Sprintf("%#v", v)
}
return fmt.Sprintf("[]any{%s}", strings.Join(elems, ", "))
}
// oneof produces code that compares target with each element of vals and ORs
// the result, e.g. "target == 1 || target == 2".
func oneof(target string, vals []any) string {
elems := make([]string, len(vals))
for i, v := range vals {
elems[i] = fmt.Sprintf("%s == %#v", target, v)
}
return strings.Join(elems, " || ")
}
// constant returns the Go constant name of the format with the given value.
func constant(formatName string) string {
switch formatName {
case "date":
return "goa.FormatDate"
case "date-time":
return "goa.FormatDateTime"
case "uuid":
return "goa.FormatUUID"
case "email":
return "goa.FormatEmail"
case "hostname":
return "goa.FormatHostname"
case "ipv4":
return "goa.FormatIPv4"
case "ipv6":
return "goa.FormatIPv6"
case "ip":
return "goa.FormatIP"
case "uri":
return "goa.FormatURI"
case "mac":
return "goa.FormatMAC"
case "cidr":
return "goa.FormatCIDR"
case "regexp":
return "goa.FormatRegexp"
case "json":
return "goa.FormatJSON"
case "rfc1123":
return "goa.FormatRFC1123"
}
panic("unknown format") // bug
}
const (
arrayValTmpl = `for _, e := range {{ .target }} {
{{ .validation }}
}`
mapValTmpl = `for {{if .keyValidation }}k{{ else }}_{{ end }}, {{ if .valueValidation }}v{{ else }}_{{ end }} := range {{ .target }} {
{{- .keyValidation }}
{{- .valueValidation }}
}`
unionValTmpl = `switch v := {{ .target }}.(type) {
{{- range $i, $val := .values }}
case {{ index $.types $i }}:
{{ $val }}
{{ end -}}
}`
userValTmpl = `if err2 := Validate{{ .name }}({{ .target }}); err2 != nil {
err = goa.MergeErrors(err, err2)
}`
enumValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
if !({{ oneof .targetVal .values }}) {
err = goa.MergeErrors(err, goa.InvalidEnumValueError({{ printf "%q" .context }}, {{ .targetVal }}, {{ slice .values }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
patternValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
err = goa.MergeErrors(err, goa.ValidatePattern({{ printf "%q" .context }}, {{ .targetVal }}, {{ printf "%q" .pattern }}))
{{- if .isPointer }}
}
{{- end }}`
formatValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
err = goa.MergeErrors(err, goa.ValidateFormat({{ printf "%q" .context }}, {{ .targetVal}}, {{ constant .format }}))
{{- if .isPointer }}
}
{{- end }}`
exclMinMaxValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
if {{ .targetVal }} {{ if .isExclMin }}<={{ else }}>={{ end }} {{ if .isExclMin }}{{ .exclMin }}{{ else }}{{ .exclMax }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidRangeError({{ printf "%q" .context }}, {{ .targetVal }}, {{ if .isExclMin }}{{ .exclMin }}, true{{ else }}{{ .exclMax }}, false{{ end }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
minMaxValTmpl = `{{ if .isPointer -}}if {{ .target }} != nil {
{{ end -}}
if {{ .targetVal }} {{ if .isMin }}<{{ else }}>{{ end }} {{ if .isMin }}{{ .min }}{{ else }}{{ .max }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidRangeError({{ printf "%q" .context }}, {{ .targetVal }}, {{ if .isMin }}{{ .min }}, true{{ else }}{{ .max }}, false{{ end }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
lengthValTmpl = `{{ $target := or (and (or (or .array .map) .nonzero) .target) .targetVal -}}
{{ if and .isPointer .string -}}
if {{ .target }} != nil {
{{ end -}}
if {{ if .string }}utf8.RuneCountInString({{ $target }}){{ else }}len({{ $target }}){{ end }} {{ if .isMinLength }}<{{ else }}>{{ end }} {{ if .isMinLength }}{{ .minLength }}{{ else }}{{ .maxLength }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidLengthError({{ printf "%q" .context }}, {{ $target }}, {{ if .string }}utf8.RuneCountInString({{ $target }}){{ else }}len({{ $target }}){{ end }}, {{ if .isMinLength }}{{ .minLength }}, true{{ else }}{{ .maxLength }}, false{{ end }}))
}{{- if and .isPointer .string }}
}
{{- end }}`
requiredValTmpl = `if {{ $.target }}.{{ .attCtx.Scope.Field $.reqAtt .req true }} == nil {
err = goa.MergeErrors(err, goa.MissingFieldError("{{ .req }}", {{ printf "%q" $.context }}))
}`
)
| {
data["values"] = values
if val := runTemplate(enumValT, data); val != "" {
res = append(res, val)
}
} | conditional_block |
validation.go | package codegen
import (
"bytes"
"errors"
"fmt"
"strings"
"text/template"
"goa.design/goa/v3/expr"
)
var (
enumValT *template.Template
formatValT *template.Template
patternValT *template.Template
exclMinMaxValT *template.Template
minMaxValT *template.Template
lengthValT *template.Template
requiredValT *template.Template
arrayValT *template.Template
mapValT *template.Template
unionValT *template.Template
userValT *template.Template
)
func init() {
fm := template.FuncMap{
"slice": toSlice,
"oneof": oneof,
"constant": constant,
"add": func(a, b int) int { return a + b },
}
enumValT = template.Must(template.New("enum").Funcs(fm).Parse(enumValTmpl))
formatValT = template.Must(template.New("format").Funcs(fm).Parse(formatValTmpl))
patternValT = template.Must(template.New("pattern").Funcs(fm).Parse(patternValTmpl))
exclMinMaxValT = template.Must(template.New("exclMinMax").Funcs(fm).Parse(exclMinMaxValTmpl))
minMaxValT = template.Must(template.New("minMax").Funcs(fm).Parse(minMaxValTmpl))
lengthValT = template.Must(template.New("length").Funcs(fm).Parse(lengthValTmpl))
requiredValT = template.Must(template.New("req").Funcs(fm).Parse(requiredValTmpl))
arrayValT = template.Must(template.New("array").Funcs(fm).Parse(arrayValTmpl))
mapValT = template.Must(template.New("map").Funcs(fm).Parse(mapValTmpl))
unionValT = template.Must(template.New("union").Funcs(fm).Parse(unionValTmpl))
userValT = template.Must(template.New("user").Funcs(fm).Parse(userValTmpl))
}
// AttributeValidationCode produces Go code that runs the validations defined
// in the given attribute against the value held by the variable named target.
//
// See ValidationCode for a description of the arguments.
func AttributeValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target, attName string) string {
seen := make(map[string]*bytes.Buffer)
return recurseValidationCode(att, put, attCtx, req, alias, target, attName, seen).String()
}
// ValidationCode produces Go code that runs the validations defined in the
// given attribute and its children recursively against the value held by the
// variable named target.
//
// put is the parent UserType if any. It is used to compute proto oneof type names.
//
// attCtx is the attribute context used to generate attribute name and reference
// in the validation code.
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func ValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target string) string {
seen := make(map[string]*bytes.Buffer)
return recurseValidationCode(att, put, attCtx, req, alias, target, target, seen).String()
}
func recurseValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target, context string, seen map[string]*bytes.Buffer) *bytes.Buffer {
var (
buf = new(bytes.Buffer)
first = true
ut, isUT = att.Type.(expr.UserType)
)
// Break infinite recursions
if isUT {
if buf, ok := seen[ut.ID()]; ok {
return buf
}
seen[ut.ID()] = buf
}
flattenValidations(att, make(map[string]struct{}))
newline := func() {
if !first {
buf.WriteByte('\n')
} else {
first = false
}
}
// Write validations on attribute if any.
validation := validationCode(att, attCtx, req, alias, target, context)
if validation != "" {
buf.WriteString(validation)
first = false
}
// Recurse down depending on attribute type.
switch {
case expr.IsObject(att.Type):
if isUT {
put = ut
}
for _, nat := range *(expr.AsObject(att.Type)) {
tgt := fmt.Sprintf("%s.%s", target, attCtx.Scope.Field(nat.Attribute, nat.Name, true))
ctx := fmt.Sprintf("%s.%s", context, nat.Name)
val := validateAttribute(attCtx, nat.Attribute, put, tgt, ctx, att.IsRequired(nat.Name))
if val != "" {
newline()
buf.WriteString(val)
}
}
case expr.IsArray(att.Type):
elem := expr.AsArray(att.Type).ElemType
ctx := attCtx
if ctx.Pointer && expr.IsPrimitive(elem.Type) {
// Array elements of primitive type are never pointers
ctx = attCtx.Dup()
ctx.Pointer = false
}
val := validateAttribute(ctx, elem, put, "e", context+"[*]", true)
if val != "" {
newline()
data := map[string]any{"target": target, "validation": val}
if err := arrayValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsMap(att.Type):
m := expr.AsMap(att.Type)
ctx := attCtx.Dup()
ctx.Pointer = false
keyVal := validateAttribute(ctx, m.KeyType, put, "k", context+".key", true)
if keyVal != "" {
keyVal = "\n" + keyVal
}
valueVal := validateAttribute(ctx, m.ElemType, put, "v", context+"[key]", true)
if valueVal != "" {
valueVal = "\n" + valueVal
}
if keyVal != "" || valueVal != "" {
newline()
data := map[string]any{"target": target, "keyValidation": keyVal, "valueValidation": valueVal}
if err := mapValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsUnion(att.Type):
// NOTE: the only time we validate a union is when we are
// validating a proto-generated type since the HTTP
// serialization transforms unions into objects.
u := expr.AsUnion(att.Type)
tref := attCtx.Scope.Ref(&expr.AttributeExpr{Type: put}, attCtx.DefaultPkg)
var vals []string
var types []string
for _, v := range u.Values {
vatt := v.Attribute
fieldName := attCtx.Scope.Field(vatt, v.Name, true)
val := validateAttribute(attCtx, vatt, put, "v."+fieldName, context+".value", true)
if val != "" {
types = append(types, tref+"_"+fieldName)
vals = append(vals, val)
}
}
if len(vals) > 0 {
newline()
data := map[string]any{
"target": target,
"types": types,
"values": vals,
}
if err := unionValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
}
return buf
}
func validateAttribute(ctx *AttributeContext, att *expr.AttributeExpr, put expr.UserType, target, context string, req bool) string {
ut, isUT := att.Type.(expr.UserType)
if !isUT {
code := recurseValidationCode(att, put, ctx, req, false, target, context, nil).String()
if code == "" {
return ""
}
if expr.IsArray(att.Type) || expr.IsMap(att.Type) || expr.IsUnion(att.Type) {
return code
}
if !ctx.Pointer && (req || (att.DefaultValue != nil && ctx.UseDefault)) {
return code
}
cond := fmt.Sprintf("if %s != nil {\n", target)
if strings.HasPrefix(code, cond) {
return code
}
return fmt.Sprintf("%s%s\n}", cond, code)
}
if expr.IsAlias(ut) {
return recurseValidationCode(ut.Attribute(), put, ctx, req, true, target, context, nil).String()
}
if !hasValidations(ctx, ut) {
return ""
}
var buf bytes.Buffer
name := ctx.Scope.Name(att, "", ctx.Pointer, ctx.UseDefault)
data := map[string]any{"name": Goify(name, true), "target": target}
if err := userValT.Execute(&buf, data); err != nil {
panic(err) // bug
}
return fmt.Sprintf("if %s != nil {\n\t%s\n}", target, buf.String())
}
// validationCode produces Go code that runs the validations defined in the
// given attribute definition if any against the content of the variable named
// target. The generated code assumes that there is a pre-existing "err"
// variable of type error. It initializes that variable in case a validation
// fails.
//
// attCtx is the attribute context
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func | (att *expr.AttributeExpr, attCtx *AttributeContext, req, alias bool, target, context string) string {
validation := att.Validation
if ut, ok := att.Type.(expr.UserType); ok {
val := ut.Attribute().Validation
if val != nil {
if validation == nil {
validation = val
} else {
validation.Merge(val)
}
att.Validation = validation
}
}
if validation == nil {
return ""
}
var (
kind = att.Type.Kind()
isNativePointer = kind == expr.BytesKind || kind == expr.AnyKind
isPointer = attCtx.Pointer || (!req && (att.DefaultValue == nil || !attCtx.UseDefault))
tval = target
)
if isPointer && expr.IsPrimitive(att.Type) && !isNativePointer {
tval = "*" + tval
}
if alias {
tval = fmt.Sprintf("%s(%s)", att.Type.Name(), tval)
}
data := map[string]any{
"attribute": att,
"attCtx": attCtx,
"isPointer": isPointer,
"context": context,
"target": target,
"targetVal": tval,
"string": kind == expr.StringKind,
"array": expr.IsArray(att.Type),
"map": expr.IsMap(att.Type),
}
runTemplate := func(tmpl *template.Template, data any) string {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
panic(err) // bug
}
return buf.String()
}
var res []string
if values := validation.Values; values != nil {
data["values"] = values
if val := runTemplate(enumValT, data); val != "" {
res = append(res, val)
}
}
if format := validation.Format; format != "" {
data["format"] = string(format)
if val := runTemplate(formatValT, data); val != "" {
res = append(res, val)
}
}
if pattern := validation.Pattern; pattern != "" {
data["pattern"] = pattern
if val := runTemplate(patternValT, data); val != "" {
res = append(res, val)
}
}
if exclMin := validation.ExclusiveMinimum; exclMin != nil {
data["exclMin"] = *exclMin
data["isExclMin"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if min := validation.Minimum; min != nil {
data["min"] = *min
data["isMin"] = true
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if exclMax := validation.ExclusiveMaximum; exclMax != nil {
data["exclMax"] = *exclMax
data["isExclMax"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if max := validation.Maximum; max != nil {
data["max"] = *max
data["isMin"] = false
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if minLength := validation.MinLength; minLength != nil {
data["minLength"] = minLength
data["isMinLength"] = true
delete(data, "maxLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
if maxLength := validation.MaxLength; maxLength != nil {
data["maxLength"] = maxLength
data["isMinLength"] = false
delete(data, "minLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
reqs := generatedRequiredValidation(att, attCtx)
obj := expr.AsObject(att.Type)
for _, r := range reqs {
reqAtt := obj.Attribute(r)
data["req"] = r
data["reqAtt"] = reqAtt
res = append(res, runTemplate(requiredValT, data))
}
return strings.Join(res, "\n")
}
// hasValidations returns true if a UserType contains validations.
func hasValidations(attCtx *AttributeContext, ut expr.UserType) bool {
// We need to check empirically whether there are validations to be
// generated, we can't just generate and check whether something was
// generated to avoid infinite recursions.
res := false
done := errors.New("done")
Walk(ut.Attribute(), func(a *expr.AttributeExpr) error {
if a.Validation == nil {
return nil
}
if attCtx.Pointer || !a.Validation.HasRequiredOnly() {
res = true
return done
}
res = len(generatedRequiredValidation(a, attCtx)) > 0
if res {
return done
}
return nil
})
return res
}
// There is a case where there is validation but no actual validation code: if
// the validation is a required validation that applies to attributes that
// cannot be nil i.e. primitive types.
func generatedRequiredValidation(att *expr.AttributeExpr, attCtx *AttributeContext) (res []string) {
if att.Validation == nil {
return
}
obj := expr.AsObject(att.Type)
for _, req := range att.Validation.Required {
reqAtt := obj.Attribute(req)
if reqAtt == nil {
continue
}
if !attCtx.Pointer && expr.IsPrimitive(reqAtt.Type) &&
reqAtt.Type.Kind() != expr.BytesKind &&
reqAtt.Type.Kind() != expr.AnyKind {
continue
}
if attCtx.IgnoreRequired && expr.IsPrimitive(reqAtt.Type) {
continue
}
res = append(res, req)
}
return
}
func flattenValidations(att *expr.AttributeExpr, seen map[string]struct{}) {
switch actual := att.Type.(type) {
case *expr.Array:
flattenValidations(actual.ElemType, seen)
case *expr.Map:
flattenValidations(actual.KeyType, seen)
flattenValidations(actual.ElemType, seen)
case *expr.Object:
for _, nat := range *actual {
flattenValidations(nat.Attribute, seen)
}
case *expr.Union:
for _, nat := range actual.Values {
flattenValidations(nat.Attribute, seen)
}
case expr.UserType:
if _, ok := seen[actual.ID()]; ok {
return
}
seen[actual.ID()] = struct{}{}
v := att.Validation
ut, ok := actual.Attribute().Type.(expr.UserType)
for ok {
if val := ut.Attribute().Validation; val != nil {
if v == nil {
v = val
} else {
v.Merge(val)
}
}
ut, ok = ut.Attribute().Type.(expr.UserType)
}
att.Validation = v
flattenValidations(actual.Attribute(), seen)
}
}
// toSlice returns Go code that represents the given slice.
func toSlice(val []any) string {
elems := make([]string, len(val))
for i, v := range val {
elems[i] = fmt.Sprintf("%#v", v)
}
return fmt.Sprintf("[]any{%s}", strings.Join(elems, ", "))
}
// oneof produces code that compares target with each element of vals and ORs
// the result, e.g. "target == 1 || target == 2".
func oneof(target string, vals []any) string {
elems := make([]string, len(vals))
for i, v := range vals {
elems[i] = fmt.Sprintf("%s == %#v", target, v)
}
return strings.Join(elems, " || ")
}
// constant returns the Go constant name of the format with the given value.
func constant(formatName string) string {
switch formatName {
case "date":
return "goa.FormatDate"
case "date-time":
return "goa.FormatDateTime"
case "uuid":
return "goa.FormatUUID"
case "email":
return "goa.FormatEmail"
case "hostname":
return "goa.FormatHostname"
case "ipv4":
return "goa.FormatIPv4"
case "ipv6":
return "goa.FormatIPv6"
case "ip":
return "goa.FormatIP"
case "uri":
return "goa.FormatURI"
case "mac":
return "goa.FormatMAC"
case "cidr":
return "goa.FormatCIDR"
case "regexp":
return "goa.FormatRegexp"
case "json":
return "goa.FormatJSON"
case "rfc1123":
return "goa.FormatRFC1123"
}
panic("unknown format") // bug
}
const (
arrayValTmpl = `for _, e := range {{ .target }} {
{{ .validation }}
}`
mapValTmpl = `for {{if .keyValidation }}k{{ else }}_{{ end }}, {{ if .valueValidation }}v{{ else }}_{{ end }} := range {{ .target }} {
{{- .keyValidation }}
{{- .valueValidation }}
}`
unionValTmpl = `switch v := {{ .target }}.(type) {
{{- range $i, $val := .values }}
case {{ index $.types $i }}:
{{ $val }}
{{ end -}}
}`
userValTmpl = `if err2 := Validate{{ .name }}({{ .target }}); err2 != nil {
err = goa.MergeErrors(err, err2)
}`
enumValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
if !({{ oneof .targetVal .values }}) {
err = goa.MergeErrors(err, goa.InvalidEnumValueError({{ printf "%q" .context }}, {{ .targetVal }}, {{ slice .values }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
patternValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
err = goa.MergeErrors(err, goa.ValidatePattern({{ printf "%q" .context }}, {{ .targetVal }}, {{ printf "%q" .pattern }}))
{{- if .isPointer }}
}
{{- end }}`
formatValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
err = goa.MergeErrors(err, goa.ValidateFormat({{ printf "%q" .context }}, {{ .targetVal}}, {{ constant .format }}))
{{- if .isPointer }}
}
{{- end }}`
exclMinMaxValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
if {{ .targetVal }} {{ if .isExclMin }}<={{ else }}>={{ end }} {{ if .isExclMin }}{{ .exclMin }}{{ else }}{{ .exclMax }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidRangeError({{ printf "%q" .context }}, {{ .targetVal }}, {{ if .isExclMin }}{{ .exclMin }}, true{{ else }}{{ .exclMax }}, false{{ end }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
minMaxValTmpl = `{{ if .isPointer -}}if {{ .target }} != nil {
{{ end -}}
if {{ .targetVal }} {{ if .isMin }}<{{ else }}>{{ end }} {{ if .isMin }}{{ .min }}{{ else }}{{ .max }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidRangeError({{ printf "%q" .context }}, {{ .targetVal }}, {{ if .isMin }}{{ .min }}, true{{ else }}{{ .max }}, false{{ end }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
lengthValTmpl = `{{ $target := or (and (or (or .array .map) .nonzero) .target) .targetVal -}}
{{ if and .isPointer .string -}}
if {{ .target }} != nil {
{{ end -}}
if {{ if .string }}utf8.RuneCountInString({{ $target }}){{ else }}len({{ $target }}){{ end }} {{ if .isMinLength }}<{{ else }}>{{ end }} {{ if .isMinLength }}{{ .minLength }}{{ else }}{{ .maxLength }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidLengthError({{ printf "%q" .context }}, {{ $target }}, {{ if .string }}utf8.RuneCountInString({{ $target }}){{ else }}len({{ $target }}){{ end }}, {{ if .isMinLength }}{{ .minLength }}, true{{ else }}{{ .maxLength }}, false{{ end }}))
}{{- if and .isPointer .string }}
}
{{- end }}`
requiredValTmpl = `if {{ $.target }}.{{ .attCtx.Scope.Field $.reqAtt .req true }} == nil {
err = goa.MergeErrors(err, goa.MissingFieldError("{{ .req }}", {{ printf "%q" $.context }}))
}`
)
| validationCode | identifier_name |
validation.go | package codegen
import (
"bytes"
"errors"
"fmt"
"strings"
"text/template"
"goa.design/goa/v3/expr"
)
var (
enumValT *template.Template
formatValT *template.Template
patternValT *template.Template
exclMinMaxValT *template.Template
minMaxValT *template.Template
lengthValT *template.Template
requiredValT *template.Template
arrayValT *template.Template
mapValT *template.Template
unionValT *template.Template
userValT *template.Template
)
func init() {
fm := template.FuncMap{
"slice": toSlice,
"oneof": oneof,
"constant": constant,
"add": func(a, b int) int { return a + b },
}
enumValT = template.Must(template.New("enum").Funcs(fm).Parse(enumValTmpl))
formatValT = template.Must(template.New("format").Funcs(fm).Parse(formatValTmpl))
patternValT = template.Must(template.New("pattern").Funcs(fm).Parse(patternValTmpl))
exclMinMaxValT = template.Must(template.New("exclMinMax").Funcs(fm).Parse(exclMinMaxValTmpl))
minMaxValT = template.Must(template.New("minMax").Funcs(fm).Parse(minMaxValTmpl))
lengthValT = template.Must(template.New("length").Funcs(fm).Parse(lengthValTmpl))
requiredValT = template.Must(template.New("req").Funcs(fm).Parse(requiredValTmpl))
arrayValT = template.Must(template.New("array").Funcs(fm).Parse(arrayValTmpl))
mapValT = template.Must(template.New("map").Funcs(fm).Parse(mapValTmpl))
unionValT = template.Must(template.New("union").Funcs(fm).Parse(unionValTmpl))
userValT = template.Must(template.New("user").Funcs(fm).Parse(userValTmpl))
}
// AttributeValidationCode produces Go code that runs the validations defined
// in the given attribute against the value held by the variable named target.
//
// See ValidationCode for a description of the arguments.
func AttributeValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target, attName string) string {
seen := make(map[string]*bytes.Buffer)
return recurseValidationCode(att, put, attCtx, req, alias, target, attName, seen).String()
}
// ValidationCode produces Go code that runs the validations defined in the
// given attribute and its children recursively against the value held by the
// variable named target.
//
// put is the parent UserType if any. It is used to compute proto oneof type names.
//
// attCtx is the attribute context used to generate attribute name and reference
// in the validation code.
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func ValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target string) string {
seen := make(map[string]*bytes.Buffer)
return recurseValidationCode(att, put, attCtx, req, alias, target, target, seen).String()
}
func recurseValidationCode(att *expr.AttributeExpr, put expr.UserType, attCtx *AttributeContext, req, alias bool, target, context string, seen map[string]*bytes.Buffer) *bytes.Buffer {
var (
buf = new(bytes.Buffer)
first = true
ut, isUT = att.Type.(expr.UserType)
)
// Break infinite recursions
if isUT {
if buf, ok := seen[ut.ID()]; ok {
return buf
}
seen[ut.ID()] = buf
}
flattenValidations(att, make(map[string]struct{}))
newline := func() {
if !first {
buf.WriteByte('\n')
} else {
first = false
}
}
// Write validations on attribute if any.
validation := validationCode(att, attCtx, req, alias, target, context)
if validation != "" {
buf.WriteString(validation)
first = false
}
// Recurse down depending on attribute type.
switch {
case expr.IsObject(att.Type):
if isUT {
put = ut
}
for _, nat := range *(expr.AsObject(att.Type)) {
tgt := fmt.Sprintf("%s.%s", target, attCtx.Scope.Field(nat.Attribute, nat.Name, true))
ctx := fmt.Sprintf("%s.%s", context, nat.Name)
val := validateAttribute(attCtx, nat.Attribute, put, tgt, ctx, att.IsRequired(nat.Name))
if val != "" {
newline()
buf.WriteString(val)
}
}
case expr.IsArray(att.Type):
elem := expr.AsArray(att.Type).ElemType
ctx := attCtx
if ctx.Pointer && expr.IsPrimitive(elem.Type) {
// Array elements of primitive type are never pointers
ctx = attCtx.Dup()
ctx.Pointer = false
}
val := validateAttribute(ctx, elem, put, "e", context+"[*]", true)
if val != "" {
newline()
data := map[string]any{"target": target, "validation": val}
if err := arrayValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
case expr.IsMap(att.Type):
m := expr.AsMap(att.Type)
ctx := attCtx.Dup()
ctx.Pointer = false
keyVal := validateAttribute(ctx, m.KeyType, put, "k", context+".key", true)
if keyVal != "" {
keyVal = "\n" + keyVal
}
valueVal := validateAttribute(ctx, m.ElemType, put, "v", context+"[key]", true)
if valueVal != "" {
valueVal = "\n" + valueVal
}
if keyVal != "" || valueVal != "" {
newline()
data := map[string]any{"target": target, "keyValidation": keyVal, "valueValidation": valueVal}
if err := mapValT.Execute(buf, data); err != nil { | panic(err) // bug
}
}
case expr.IsUnion(att.Type):
// NOTE: the only time we validate a union is when we are
// validating a proto-generated type since the HTTP
// serialization transforms unions into objects.
u := expr.AsUnion(att.Type)
tref := attCtx.Scope.Ref(&expr.AttributeExpr{Type: put}, attCtx.DefaultPkg)
var vals []string
var types []string
for _, v := range u.Values {
vatt := v.Attribute
fieldName := attCtx.Scope.Field(vatt, v.Name, true)
val := validateAttribute(attCtx, vatt, put, "v."+fieldName, context+".value", true)
if val != "" {
types = append(types, tref+"_"+fieldName)
vals = append(vals, val)
}
}
if len(vals) > 0 {
newline()
data := map[string]any{
"target": target,
"types": types,
"values": vals,
}
if err := unionValT.Execute(buf, data); err != nil {
panic(err) // bug
}
}
}
return buf
}
func validateAttribute(ctx *AttributeContext, att *expr.AttributeExpr, put expr.UserType, target, context string, req bool) string {
ut, isUT := att.Type.(expr.UserType)
if !isUT {
code := recurseValidationCode(att, put, ctx, req, false, target, context, nil).String()
if code == "" {
return ""
}
if expr.IsArray(att.Type) || expr.IsMap(att.Type) || expr.IsUnion(att.Type) {
return code
}
if !ctx.Pointer && (req || (att.DefaultValue != nil && ctx.UseDefault)) {
return code
}
cond := fmt.Sprintf("if %s != nil {\n", target)
if strings.HasPrefix(code, cond) {
return code
}
return fmt.Sprintf("%s%s\n}", cond, code)
}
if expr.IsAlias(ut) {
return recurseValidationCode(ut.Attribute(), put, ctx, req, true, target, context, nil).String()
}
if !hasValidations(ctx, ut) {
return ""
}
var buf bytes.Buffer
name := ctx.Scope.Name(att, "", ctx.Pointer, ctx.UseDefault)
data := map[string]any{"name": Goify(name, true), "target": target}
if err := userValT.Execute(&buf, data); err != nil {
panic(err) // bug
}
return fmt.Sprintf("if %s != nil {\n\t%s\n}", target, buf.String())
}
// validationCode produces Go code that runs the validations defined in the
// given attribute definition if any against the content of the variable named
// target. The generated code assumes that there is a pre-existing "err"
// variable of type error. It initializes that variable in case a validation
// fails.
//
// attCtx is the attribute context
//
// req indicates whether the attribute is required (true) or optional (false)
//
// alias indicates whether the attribute is an alias user type attribute.
//
// target is the variable name against which the validation code is generated
//
// context is used to produce helpful messages in case of error.
func validationCode(att *expr.AttributeExpr, attCtx *AttributeContext, req, alias bool, target, context string) string {
validation := att.Validation
if ut, ok := att.Type.(expr.UserType); ok {
val := ut.Attribute().Validation
if val != nil {
if validation == nil {
validation = val
} else {
validation.Merge(val)
}
att.Validation = validation
}
}
if validation == nil {
return ""
}
var (
kind = att.Type.Kind()
isNativePointer = kind == expr.BytesKind || kind == expr.AnyKind
isPointer = attCtx.Pointer || (!req && (att.DefaultValue == nil || !attCtx.UseDefault))
tval = target
)
if isPointer && expr.IsPrimitive(att.Type) && !isNativePointer {
tval = "*" + tval
}
if alias {
tval = fmt.Sprintf("%s(%s)", att.Type.Name(), tval)
}
data := map[string]any{
"attribute": att,
"attCtx": attCtx,
"isPointer": isPointer,
"context": context,
"target": target,
"targetVal": tval,
"string": kind == expr.StringKind,
"array": expr.IsArray(att.Type),
"map": expr.IsMap(att.Type),
}
runTemplate := func(tmpl *template.Template, data any) string {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, data); err != nil {
panic(err) // bug
}
return buf.String()
}
var res []string
if values := validation.Values; values != nil {
data["values"] = values
if val := runTemplate(enumValT, data); val != "" {
res = append(res, val)
}
}
if format := validation.Format; format != "" {
data["format"] = string(format)
if val := runTemplate(formatValT, data); val != "" {
res = append(res, val)
}
}
if pattern := validation.Pattern; pattern != "" {
data["pattern"] = pattern
if val := runTemplate(patternValT, data); val != "" {
res = append(res, val)
}
}
if exclMin := validation.ExclusiveMinimum; exclMin != nil {
data["exclMin"] = *exclMin
data["isExclMin"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if min := validation.Minimum; min != nil {
data["min"] = *min
data["isMin"] = true
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if exclMax := validation.ExclusiveMaximum; exclMax != nil {
data["exclMax"] = *exclMax
data["isExclMax"] = true
if val := runTemplate(exclMinMaxValT, data); val != "" {
res = append(res, val)
}
}
if max := validation.Maximum; max != nil {
data["max"] = *max
data["isMin"] = false
if val := runTemplate(minMaxValT, data); val != "" {
res = append(res, val)
}
}
if minLength := validation.MinLength; minLength != nil {
data["minLength"] = minLength
data["isMinLength"] = true
delete(data, "maxLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
if maxLength := validation.MaxLength; maxLength != nil {
data["maxLength"] = maxLength
data["isMinLength"] = false
delete(data, "minLength")
if val := runTemplate(lengthValT, data); val != "" {
res = append(res, val)
}
}
reqs := generatedRequiredValidation(att, attCtx)
obj := expr.AsObject(att.Type)
for _, r := range reqs {
reqAtt := obj.Attribute(r)
data["req"] = r
data["reqAtt"] = reqAtt
res = append(res, runTemplate(requiredValT, data))
}
return strings.Join(res, "\n")
}
// hasValidations returns true if a UserType contains validations.
func hasValidations(attCtx *AttributeContext, ut expr.UserType) bool {
// We need to check empirically whether there are validations to be
// generated, we can't just generate and check whether something was
// generated to avoid infinite recursions.
res := false
done := errors.New("done")
Walk(ut.Attribute(), func(a *expr.AttributeExpr) error {
if a.Validation == nil {
return nil
}
if attCtx.Pointer || !a.Validation.HasRequiredOnly() {
res = true
return done
}
res = len(generatedRequiredValidation(a, attCtx)) > 0
if res {
return done
}
return nil
})
return res
}
// There is a case where there is validation but no actual validation code: if
// the validation is a required validation that applies to attributes that
// cannot be nil i.e. primitive types.
func generatedRequiredValidation(att *expr.AttributeExpr, attCtx *AttributeContext) (res []string) {
if att.Validation == nil {
return
}
obj := expr.AsObject(att.Type)
for _, req := range att.Validation.Required {
reqAtt := obj.Attribute(req)
if reqAtt == nil {
continue
}
if !attCtx.Pointer && expr.IsPrimitive(reqAtt.Type) &&
reqAtt.Type.Kind() != expr.BytesKind &&
reqAtt.Type.Kind() != expr.AnyKind {
continue
}
if attCtx.IgnoreRequired && expr.IsPrimitive(reqAtt.Type) {
continue
}
res = append(res, req)
}
return
}
func flattenValidations(att *expr.AttributeExpr, seen map[string]struct{}) {
switch actual := att.Type.(type) {
case *expr.Array:
flattenValidations(actual.ElemType, seen)
case *expr.Map:
flattenValidations(actual.KeyType, seen)
flattenValidations(actual.ElemType, seen)
case *expr.Object:
for _, nat := range *actual {
flattenValidations(nat.Attribute, seen)
}
case *expr.Union:
for _, nat := range actual.Values {
flattenValidations(nat.Attribute, seen)
}
case expr.UserType:
if _, ok := seen[actual.ID()]; ok {
return
}
seen[actual.ID()] = struct{}{}
v := att.Validation
ut, ok := actual.Attribute().Type.(expr.UserType)
for ok {
if val := ut.Attribute().Validation; val != nil {
if v == nil {
v = val
} else {
v.Merge(val)
}
}
ut, ok = ut.Attribute().Type.(expr.UserType)
}
att.Validation = v
flattenValidations(actual.Attribute(), seen)
}
}
// toSlice returns Go code that represents the given slice.
func toSlice(val []any) string {
elems := make([]string, len(val))
for i, v := range val {
elems[i] = fmt.Sprintf("%#v", v)
}
return fmt.Sprintf("[]any{%s}", strings.Join(elems, ", "))
}
// oneof produces code that compares target with each element of vals and ORs
// the result, e.g. "target == 1 || target == 2".
func oneof(target string, vals []any) string {
elems := make([]string, len(vals))
for i, v := range vals {
elems[i] = fmt.Sprintf("%s == %#v", target, v)
}
return strings.Join(elems, " || ")
}
// constant returns the Go constant name of the format with the given value.
func constant(formatName string) string {
switch formatName {
case "date":
return "goa.FormatDate"
case "date-time":
return "goa.FormatDateTime"
case "uuid":
return "goa.FormatUUID"
case "email":
return "goa.FormatEmail"
case "hostname":
return "goa.FormatHostname"
case "ipv4":
return "goa.FormatIPv4"
case "ipv6":
return "goa.FormatIPv6"
case "ip":
return "goa.FormatIP"
case "uri":
return "goa.FormatURI"
case "mac":
return "goa.FormatMAC"
case "cidr":
return "goa.FormatCIDR"
case "regexp":
return "goa.FormatRegexp"
case "json":
return "goa.FormatJSON"
case "rfc1123":
return "goa.FormatRFC1123"
}
panic("unknown format") // bug
}
const (
arrayValTmpl = `for _, e := range {{ .target }} {
{{ .validation }}
}`
mapValTmpl = `for {{if .keyValidation }}k{{ else }}_{{ end }}, {{ if .valueValidation }}v{{ else }}_{{ end }} := range {{ .target }} {
{{- .keyValidation }}
{{- .valueValidation }}
}`
unionValTmpl = `switch v := {{ .target }}.(type) {
{{- range $i, $val := .values }}
case {{ index $.types $i }}:
{{ $val }}
{{ end -}}
}`
userValTmpl = `if err2 := Validate{{ .name }}({{ .target }}); err2 != nil {
err = goa.MergeErrors(err, err2)
}`
enumValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
if !({{ oneof .targetVal .values }}) {
err = goa.MergeErrors(err, goa.InvalidEnumValueError({{ printf "%q" .context }}, {{ .targetVal }}, {{ slice .values }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
patternValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
err = goa.MergeErrors(err, goa.ValidatePattern({{ printf "%q" .context }}, {{ .targetVal }}, {{ printf "%q" .pattern }}))
{{- if .isPointer }}
}
{{- end }}`
formatValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
err = goa.MergeErrors(err, goa.ValidateFormat({{ printf "%q" .context }}, {{ .targetVal}}, {{ constant .format }}))
{{- if .isPointer }}
}
{{- end }}`
exclMinMaxValTmpl = `{{ if .isPointer }}if {{ .target }} != nil {
{{ end -}}
if {{ .targetVal }} {{ if .isExclMin }}<={{ else }}>={{ end }} {{ if .isExclMin }}{{ .exclMin }}{{ else }}{{ .exclMax }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidRangeError({{ printf "%q" .context }}, {{ .targetVal }}, {{ if .isExclMin }}{{ .exclMin }}, true{{ else }}{{ .exclMax }}, false{{ end }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
minMaxValTmpl = `{{ if .isPointer -}}if {{ .target }} != nil {
{{ end -}}
if {{ .targetVal }} {{ if .isMin }}<{{ else }}>{{ end }} {{ if .isMin }}{{ .min }}{{ else }}{{ .max }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidRangeError({{ printf "%q" .context }}, {{ .targetVal }}, {{ if .isMin }}{{ .min }}, true{{ else }}{{ .max }}, false{{ end }}))
{{ if .isPointer -}}
}
{{ end -}}
}`
lengthValTmpl = `{{ $target := or (and (or (or .array .map) .nonzero) .target) .targetVal -}}
{{ if and .isPointer .string -}}
if {{ .target }} != nil {
{{ end -}}
if {{ if .string }}utf8.RuneCountInString({{ $target }}){{ else }}len({{ $target }}){{ end }} {{ if .isMinLength }}<{{ else }}>{{ end }} {{ if .isMinLength }}{{ .minLength }}{{ else }}{{ .maxLength }}{{ end }} {
err = goa.MergeErrors(err, goa.InvalidLengthError({{ printf "%q" .context }}, {{ $target }}, {{ if .string }}utf8.RuneCountInString({{ $target }}){{ else }}len({{ $target }}){{ end }}, {{ if .isMinLength }}{{ .minLength }}, true{{ else }}{{ .maxLength }}, false{{ end }}))
}{{- if and .isPointer .string }}
}
{{- end }}`
requiredValTmpl = `if {{ $.target }}.{{ .attCtx.Scope.Field $.reqAtt .req true }} == nil {
err = goa.MergeErrors(err, goa.MissingFieldError("{{ .req }}", {{ printf "%q" $.context }}))
}`
) | random_line_split | |
mod.rs | //! Metrics
//! ---
//! Contains a set of optimization metrics
//!
//! These are useful for different scorers
extern crate es_data;
extern crate float_ord;
extern crate hashbrown;
use self::es_data::dataset::types::{MetaType, Metadata};
use self::hashbrown::HashMap;
use self::float_ord::FloatOrd;
/// Computes DCG@K for a given relevance set
fn dcg(scores: &[f32], k: usize) -> f64 {
let mut rdcg = 0f64;
for i in 0..k {
let s = scores[i];
rdcg += ((2f64).powi(s as i32) - 1.) / (2. + i as f64).log2()
}
rdcg
}
/// Computes NDCG@K for a given relevance set
pub fn ndcg(scores: &mut [f32], k: Option<usize>) -> f64 |
#[inline]
/// Gets relevance for ERR
fn get_relevance(score: f32, score_max: f32) -> f32 {
(2f32.powf(score) - 1.) / 2f32.powf(score_max)
}
/// Computes ERR. Assumes scores are sorted
pub fn get_err(scores: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(scores.len()).min(scores.len());
let score_max = scores
.iter()
.max_by_key(|x| FloatOrd(**x))
.expect("Must have a maximum score");
let mut err = 0.0;
let mut p = 1.0;
for rank in 1..=k {
let relevance = get_relevance(scores[rank - 1], *score_max);
err += p * relevance / (rank as f32);
p *= 1. - relevance;
}
err
}
/// Gets the weights for sub-topics for Discrete-ERRIA. Computes p(t | q)
pub fn get_subtopic_weights(subtopics: &[u32]) -> HashMap<u32, f32> {
let mut weights = HashMap::new();
let num_examples = subtopics.len();
if num_examples == 0 {
return weights;
}
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize];
let right = vals[pos.ceil() as usize];
let delta = pos.fract();
left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
{
let mut metadata = Metadata::new();
metadata.insert("taxonomy".to_string(), MetaType::Str(format!("{:?}", i)));
str_data.push(metadata);
expected.insert(i, 1. / 30.);
}
{
let mut metadata = Metadata::new();
metadata.insert(
"taxonomy".to_string(),
MetaType::Str(format!("2{:?}", i / 10)),
);
str_data.push(metadata);
expected.insert(20 + i / 10, 1. / 3.);
}
{
let metadata = Metadata::new();
str_data.push(metadata);
expected.insert(std::u32::MAX, 1. / 3.);
}
}
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let sub: Vec<_> = str_data.iter().collect();
let subtopics = get_subtopics(&sub, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(subtopics.len(), sub.len());
println!("Weights: {:?}", weights);
println!("expected: {:?}", expected);
assert_eq!(weights.len(), expected.len());
for (key, val) in expected.iter() {
assert!(weights.contains_key(key));
let actual_val = weights.get(key).expect("key should be in weights");
assert!((val - actual_val).abs() < 1e-6);
}
}
#[test]
fn test_err_ia() {
let mut cat1_metadata = Metadata::new();
cat1_metadata.insert("taxonomy".to_string(), MetaType::Str("1".to_string()));
let mut cat2_metadata = Metadata::new();
cat2_metadata.insert("taxonomy".to_string(), MetaType::Str("2".to_string()));
let scores = vec![
(4., &cat1_metadata),
(0., &cat2_metadata),
(2., &cat1_metadata),
(1., &cat2_metadata),
(2., &cat2_metadata),
];
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let metadata: Vec<_> = scores.iter().map(|x| x.1).collect();
let just_scores: Vec<_> = scores.iter().map(|x| x.0).collect();
let subtopics = get_subtopics(&metadata, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(0)),
0f32
);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(1)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(2)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(3)) - 0.3765625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(4)) - 0.4140625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(5)) - 0.4815625).abs() < 1e-6);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, None),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(10)),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
}
#[test]
fn test_interpolate() {
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, None), 2.0);
assert_eq!(interpolate(&values, 25, None), 2.0);
assert_eq!(interpolate(&values, 50, None), 3.0);
assert_eq!(interpolate(&values, 100, None), 4.0);
}
{
let values = vec![2.0, 4.0, 100.0];
assert_eq!(interpolate(&values, 50, None), 4.0);
}
{
// Example from wikipedia
let values = vec![15.0, 20.0, 35.0, 40.0, 50.0];
assert_eq!(interpolate(&values, 5, None), 15.0);
assert_eq!(interpolate(&values, 30, None), 20.0);
assert_eq!(interpolate(&values, 40, None), 27.5);
assert_eq!(interpolate(&values, 95, None), 50.0);
}
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, Some(1.0)), 2.0);
assert_eq!(interpolate(&values, 10, Some(1.0)), 2.2);
assert_eq!(interpolate(&values, 25, Some(1.0)), 2.5);
assert_eq!(interpolate(&values, 75, Some(1.0)), 3.5);
assert_eq!(interpolate(&values, 100, Some(1.0)), 4.0);
}
}
#[test]
fn test_get_percentiles() {
let mut values = vec![1000.0, 20.0, 100.0];
let quantiles = vec![50];
assert_eq!(get_percentiles(&mut values, &quantiles, None), 100.0);
}
}
| {
let size = k.unwrap_or(scores.len()).min(scores.len());
let r_dcg = dcg(scores, size);
// Sort them in ascending order
scores.sort_by_key(|v| FloatOrd(-*v));
let idcg = dcg(scores, size);
if idcg > 0.0 {
r_dcg / idcg
} else {
0.0
}
} | identifier_body |
mod.rs | //! Metrics
//! ---
//! Contains a set of optimization metrics
//!
//! These are useful for different scorers
extern crate es_data;
extern crate float_ord;
extern crate hashbrown;
use self::es_data::dataset::types::{MetaType, Metadata};
use self::hashbrown::HashMap;
use self::float_ord::FloatOrd;
/// Computes DCG@K for a given relevance set
fn dcg(scores: &[f32], k: usize) -> f64 {
let mut rdcg = 0f64;
for i in 0..k {
let s = scores[i];
rdcg += ((2f64).powi(s as i32) - 1.) / (2. + i as f64).log2()
}
rdcg
}
/// Computes NDCG@K for a given relevance set
pub fn ndcg(scores: &mut [f32], k: Option<usize>) -> f64 {
let size = k.unwrap_or(scores.len()).min(scores.len());
let r_dcg = dcg(scores, size);
// Sort them in ascending order
scores.sort_by_key(|v| FloatOrd(-*v));
let idcg = dcg(scores, size);
if idcg > 0.0 {
r_dcg / idcg
} else {
0.0
}
}
#[inline]
/// Gets relevance for ERR
fn get_relevance(score: f32, score_max: f32) -> f32 {
(2f32.powf(score) - 1.) / 2f32.powf(score_max)
}
/// Computes ERR. Assumes scores are sorted
pub fn get_err(scores: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(scores.len()).min(scores.len());
let score_max = scores
.iter()
.max_by_key(|x| FloatOrd(**x))
.expect("Must have a maximum score");
let mut err = 0.0;
let mut p = 1.0;
for rank in 1..=k {
let relevance = get_relevance(scores[rank - 1], *score_max);
err += p * relevance / (rank as f32);
p *= 1. - relevance;
}
err
}
/// Gets the weights for sub-topics for Discrete-ERRIA. Computes p(t | q)
pub fn get_subtopic_weights(subtopics: &[u32]) -> HashMap<u32, f32> {
let mut weights = HashMap::new();
let num_examples = subtopics.len();
if num_examples == 0 {
return weights;
}
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize];
let right = vals[pos.ceil() as usize];
let delta = pos.fract();
left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
{
let mut metadata = Metadata::new();
metadata.insert("taxonomy".to_string(), MetaType::Str(format!("{:?}", i)));
str_data.push(metadata);
expected.insert(i, 1. / 30.);
}
{
let mut metadata = Metadata::new();
metadata.insert(
"taxonomy".to_string(),
MetaType::Str(format!("2{:?}", i / 10)),
);
str_data.push(metadata);
expected.insert(20 + i / 10, 1. / 3.);
}
{
let metadata = Metadata::new();
str_data.push(metadata);
expected.insert(std::u32::MAX, 1. / 3.);
}
}
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let sub: Vec<_> = str_data.iter().collect();
let subtopics = get_subtopics(&sub, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(subtopics.len(), sub.len());
println!("Weights: {:?}", weights);
println!("expected: {:?}", expected);
assert_eq!(weights.len(), expected.len());
for (key, val) in expected.iter() {
assert!(weights.contains_key(key));
let actual_val = weights.get(key).expect("key should be in weights");
assert!((val - actual_val).abs() < 1e-6);
}
}
#[test]
fn test_err_ia() {
let mut cat1_metadata = Metadata::new();
cat1_metadata.insert("taxonomy".to_string(), MetaType::Str("1".to_string()));
let mut cat2_metadata = Metadata::new();
cat2_metadata.insert("taxonomy".to_string(), MetaType::Str("2".to_string()));
let scores = vec![
(4., &cat1_metadata),
(0., &cat2_metadata),
(2., &cat1_metadata),
(1., &cat2_metadata),
(2., &cat2_metadata),
];
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let metadata: Vec<_> = scores.iter().map(|x| x.1).collect();
let just_scores: Vec<_> = scores.iter().map(|x| x.0).collect();
let subtopics = get_subtopics(&metadata, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(0)),
0f32
);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(1)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(2)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(3)) - 0.3765625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(4)) - 0.4140625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(5)) - 0.4815625).abs() < 1e-6);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, None),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(10)),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
}
#[test]
fn test_interpolate() {
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, None), 2.0);
assert_eq!(interpolate(&values, 25, None), 2.0);
assert_eq!(interpolate(&values, 50, None), 3.0);
assert_eq!(interpolate(&values, 100, None), 4.0);
}
{
let values = vec![2.0, 4.0, 100.0];
assert_eq!(interpolate(&values, 50, None), 4.0);
}
{
// Example from wikipedia
let values = vec![15.0, 20.0, 35.0, 40.0, 50.0];
assert_eq!(interpolate(&values, 5, None), 15.0);
assert_eq!(interpolate(&values, 30, None), 20.0);
assert_eq!(interpolate(&values, 40, None), 27.5);
assert_eq!(interpolate(&values, 95, None), 50.0);
}
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, Some(1.0)), 2.0);
assert_eq!(interpolate(&values, 10, Some(1.0)), 2.2);
assert_eq!(interpolate(&values, 25, Some(1.0)), 2.5);
assert_eq!(interpolate(&values, 75, Some(1.0)), 3.5);
assert_eq!(interpolate(&values, 100, Some(1.0)), 4.0);
}
}
#[test]
fn | () {
let mut values = vec![1000.0, 20.0, 100.0];
let quantiles = vec![50];
assert_eq!(get_percentiles(&mut values, &quantiles, None), 100.0);
}
}
| test_get_percentiles | identifier_name |
mod.rs | //! Metrics
//! ---
//! Contains a set of optimization metrics
//!
//! These are useful for different scorers
extern crate es_data;
extern crate float_ord;
extern crate hashbrown;
use self::es_data::dataset::types::{MetaType, Metadata};
use self::hashbrown::HashMap;
use self::float_ord::FloatOrd;
/// Computes DCG@K for a given relevance set
fn dcg(scores: &[f32], k: usize) -> f64 {
let mut rdcg = 0f64;
for i in 0..k {
let s = scores[i];
rdcg += ((2f64).powi(s as i32) - 1.) / (2. + i as f64).log2()
}
rdcg
}
/// Computes NDCG@K for a given relevance set
pub fn ndcg(scores: &mut [f32], k: Option<usize>) -> f64 {
let size = k.unwrap_or(scores.len()).min(scores.len());
let r_dcg = dcg(scores, size);
// Sort them in ascending order
scores.sort_by_key(|v| FloatOrd(-*v));
let idcg = dcg(scores, size);
if idcg > 0.0 {
r_dcg / idcg
} else {
0.0
}
}
#[inline]
/// Gets relevance for ERR
fn get_relevance(score: f32, score_max: f32) -> f32 {
(2f32.powf(score) - 1.) / 2f32.powf(score_max)
}
/// Computes ERR. Assumes scores are sorted
pub fn get_err(scores: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(scores.len()).min(scores.len());
let score_max = scores
.iter()
.max_by_key(|x| FloatOrd(**x))
.expect("Must have a maximum score");
let mut err = 0.0;
let mut p = 1.0;
for rank in 1..=k {
let relevance = get_relevance(scores[rank - 1], *score_max);
err += p * relevance / (rank as f32);
p *= 1. - relevance;
}
err
}
/// Gets the weights for sub-topics for Discrete-ERRIA. Computes p(t | q)
pub fn get_subtopic_weights(subtopics: &[u32]) -> HashMap<u32, f32> {
let mut weights = HashMap::new();
let num_examples = subtopics.len();
if num_examples == 0 {
return weights;
}
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize]; | left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
{
let mut metadata = Metadata::new();
metadata.insert("taxonomy".to_string(), MetaType::Str(format!("{:?}", i)));
str_data.push(metadata);
expected.insert(i, 1. / 30.);
}
{
let mut metadata = Metadata::new();
metadata.insert(
"taxonomy".to_string(),
MetaType::Str(format!("2{:?}", i / 10)),
);
str_data.push(metadata);
expected.insert(20 + i / 10, 1. / 3.);
}
{
let metadata = Metadata::new();
str_data.push(metadata);
expected.insert(std::u32::MAX, 1. / 3.);
}
}
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let sub: Vec<_> = str_data.iter().collect();
let subtopics = get_subtopics(&sub, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(subtopics.len(), sub.len());
println!("Weights: {:?}", weights);
println!("expected: {:?}", expected);
assert_eq!(weights.len(), expected.len());
for (key, val) in expected.iter() {
assert!(weights.contains_key(key));
let actual_val = weights.get(key).expect("key should be in weights");
assert!((val - actual_val).abs() < 1e-6);
}
}
#[test]
fn test_err_ia() {
let mut cat1_metadata = Metadata::new();
cat1_metadata.insert("taxonomy".to_string(), MetaType::Str("1".to_string()));
let mut cat2_metadata = Metadata::new();
cat2_metadata.insert("taxonomy".to_string(), MetaType::Str("2".to_string()));
let scores = vec![
(4., &cat1_metadata),
(0., &cat2_metadata),
(2., &cat1_metadata),
(1., &cat2_metadata),
(2., &cat2_metadata),
];
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let metadata: Vec<_> = scores.iter().map(|x| x.1).collect();
let just_scores: Vec<_> = scores.iter().map(|x| x.0).collect();
let subtopics = get_subtopics(&metadata, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(0)),
0f32
);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(1)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(2)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(3)) - 0.3765625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(4)) - 0.4140625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(5)) - 0.4815625).abs() < 1e-6);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, None),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(10)),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
}
#[test]
fn test_interpolate() {
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, None), 2.0);
assert_eq!(interpolate(&values, 25, None), 2.0);
assert_eq!(interpolate(&values, 50, None), 3.0);
assert_eq!(interpolate(&values, 100, None), 4.0);
}
{
let values = vec![2.0, 4.0, 100.0];
assert_eq!(interpolate(&values, 50, None), 4.0);
}
{
// Example from wikipedia
let values = vec![15.0, 20.0, 35.0, 40.0, 50.0];
assert_eq!(interpolate(&values, 5, None), 15.0);
assert_eq!(interpolate(&values, 30, None), 20.0);
assert_eq!(interpolate(&values, 40, None), 27.5);
assert_eq!(interpolate(&values, 95, None), 50.0);
}
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, Some(1.0)), 2.0);
assert_eq!(interpolate(&values, 10, Some(1.0)), 2.2);
assert_eq!(interpolate(&values, 25, Some(1.0)), 2.5);
assert_eq!(interpolate(&values, 75, Some(1.0)), 3.5);
assert_eq!(interpolate(&values, 100, Some(1.0)), 4.0);
}
}
#[test]
fn test_get_percentiles() {
let mut values = vec![1000.0, 20.0, 100.0];
let quantiles = vec![50];
assert_eq!(get_percentiles(&mut values, &quantiles, None), 100.0);
}
} | let right = vals[pos.ceil() as usize];
let delta = pos.fract(); | random_line_split |
mod.rs | //! Metrics
//! ---
//! Contains a set of optimization metrics
//!
//! These are useful for different scorers
extern crate es_data;
extern crate float_ord;
extern crate hashbrown;
use self::es_data::dataset::types::{MetaType, Metadata};
use self::hashbrown::HashMap;
use self::float_ord::FloatOrd;
/// Computes DCG@K for a given relevance set
fn dcg(scores: &[f32], k: usize) -> f64 {
let mut rdcg = 0f64;
for i in 0..k {
let s = scores[i];
rdcg += ((2f64).powi(s as i32) - 1.) / (2. + i as f64).log2()
}
rdcg
}
/// Computes NDCG@K for a given relevance set
pub fn ndcg(scores: &mut [f32], k: Option<usize>) -> f64 {
let size = k.unwrap_or(scores.len()).min(scores.len());
let r_dcg = dcg(scores, size);
// Sort them in ascending order
scores.sort_by_key(|v| FloatOrd(-*v));
let idcg = dcg(scores, size);
if idcg > 0.0 {
r_dcg / idcg
} else {
0.0
}
}
#[inline]
/// Gets relevance for ERR
fn get_relevance(score: f32, score_max: f32) -> f32 {
(2f32.powf(score) - 1.) / 2f32.powf(score_max)
}
/// Computes ERR. Assumes scores are sorted
pub fn get_err(scores: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(scores.len()).min(scores.len());
let score_max = scores
.iter()
.max_by_key(|x| FloatOrd(**x))
.expect("Must have a maximum score");
let mut err = 0.0;
let mut p = 1.0;
for rank in 1..=k {
let relevance = get_relevance(scores[rank - 1], *score_max);
err += p * relevance / (rank as f32);
p *= 1. - relevance;
}
err
}
/// Gets the weights for sub-topics for Discrete-ERRIA. Computes p(t | q)
pub fn get_subtopic_weights(subtopics: &[u32]) -> HashMap<u32, f32> {
let mut weights = HashMap::new();
let num_examples = subtopics.len();
if num_examples == 0 |
for topic in subtopics.iter() {
let counter = weights.entry(*topic).or_insert(0.);
*counter += 1.;
}
for (_, val) in weights.iter_mut() {
*val /= num_examples as f32;
}
weights
}
/// Gets the subtopics. Run this once
/// # Arguments
///
/// * data: Data to get subtopics from
/// * field_name: field containing the topic
/// * discretize_fn specifies the name of the bucket and how to handle missing data.
pub fn get_subtopics<F>(data: &[&Metadata], field_name: &String, discretize_fn: F) -> Vec<u32>
where
F: Fn(Option<&MetaType>) -> u32,
{
let mut topics = Vec::new();
for metadata in data.iter() {
let value = metadata.get(field_name);
topics.push(discretize_fn(value));
}
topics
}
/// Computes Discrete-ERRIA. Assumes the scores are sorted.
/// # Arguments
///
/// * scores: labels
/// * subtopics: subtopic for each doc
/// * subtopic_weights: weight for each topic
/// * k_opt: top-K docs to compute this over
pub fn get_err_ia(
scores: &[f32],
subtopics: &[u32],
subtopic_weights: &HashMap<u32, f32>,
k_opt: Option<usize>,
) -> f32 {
let mut err_ia: f32 = 0.0;
for (topic, prob_topic_given_query) in subtopic_weights.iter() {
// Set the score for any doc without this topic to 0.
// Can't just filter as we need the index
let topic_scores: Vec<f32> = scores
.iter()
.enumerate()
.map(|(i, &x)| if subtopics[i] == *topic { x } else { 0f32 })
.collect();
let err_at_k_for_topic = get_err(&topic_scores, k_opt);
err_ia += prob_topic_given_query * err_at_k_for_topic;
}
err_ia
}
/// Computes cumulative values for gini coefficient
pub fn compute_cumulative_values(data: &[f32]) -> Vec<f32> {
let mut cumulative = Vec::with_capacity(data.len() + 1);
let mut total = 0.;
for val in data {
cumulative.push(total);
total += val;
}
cumulative.push(total);
if total == 0. {
return cumulative;
}
for val in cumulative.iter_mut() {
*val /= total;
}
cumulative
}
/// Compute the gini coefficient for the provided income & population
pub fn get_gini_coefficient(income_and_population: &mut [(f32, f32)]) -> f32 {
// No inequality if there are no examples.
if income_and_population.is_empty() {
return 0.;
}
// Sort the incomes and population so the cumulative wealth is below the optimal line
income_and_population.sort_by(|a, b| {
let a_ratio = a.0 / a.1;
let b_ratio = b.0 / b.1;
a_ratio.partial_cmp(&b_ratio).expect("should unwrap float")
});
let income = income_and_population
.iter()
.map(|x| x.0)
.collect::<Vec<f32>>();
let population = income_and_population
.iter()
.map(|x| x.1)
.collect::<Vec<f32>>();
// Compute cumulative populations and wealth
let wealth_cumulative = compute_cumulative_values(&income);
let population_cumulative = compute_cumulative_values(&population);
let income_total = wealth_cumulative.last().expect("Must have an income value");
let population_total = population_cumulative
.last()
.expect("Must have a population value");
// If no income to spread or no population, there is no inequality
if income_total.abs() <= 1e-6 || population_total.abs() <= 1e-6 {
return 0.;
}
let mut gini = 0.;
for i in 1..wealth_cumulative.len() {
gini += (population_cumulative[i] - population_cumulative[i - 1])
* (wealth_cumulative[i] + wealth_cumulative[i - 1]);
}
gini
}
/// Find the percentile given a set of values. This requires some interpolation
fn interpolate(vals: &[f32], percentile: usize, interpolate_arg_opt: Option<f32>) -> f32 {
let interpolate_arg = interpolate_arg_opt.unwrap_or(0.5);
let v_len = vals.len() as f32;
let pos =
(v_len + 1. - 2. * interpolate_arg) * (percentile as f32) / 100. + interpolate_arg - 1.;
if (pos.ceil() as usize) == 0 {
vals[0]
} else if (pos.floor() as usize) == (vals.len() - 1) {
vals[vals.len() - 1]
} else {
let left = vals[pos.floor() as usize];
let right = vals[pos.ceil() as usize];
let delta = pos.fract();
left * (1. - delta) + right * delta
}
}
/// Compute a set of percentiles and average them
pub fn get_percentiles(
vals: &mut [f32],
percentiles: &[usize],
interpolate_arg_opt: Option<f32>,
) -> f32 {
// Can happen at test time
if vals.is_empty() {
std::f32::NAN
} else {
vals.sort_by_key(|x| FloatOrd(*x));
let s: f32 = percentiles
.iter()
.map(|p| interpolate(&vals, *p, interpolate_arg_opt))
.sum();
s / percentiles.len() as f32
}
}
/// Computes the mean
/// # Arguments
///
/// * `scores` list of numbers to average
/// * `k_opt` number of top docs to include. If none is provided, uses all docs
pub fn get_mean(data: &[f32], k_opt: Option<usize>) -> f32 {
let k = k_opt.unwrap_or(data.len()).min(data.len());
let total = &data[..k].iter().sum::<f32>();
total / (k as f32)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_mean() {
let data = [1., 2., 6.];
assert_eq!(get_mean(&data, None), 3.);
assert_eq!(get_mean(&data, Some(2)), 1.5);
assert_eq!(get_mean(&data, Some(10)), 3.);
}
#[test]
fn test_ndcg() {
let mut t1 = vec![4., 0., 2., 1., 2.];
assert!((ndcg(&mut t1.clone(), None) - 0.96110010).abs() < 1e-6);
assert!((ndcg(&mut t1, Some(2)) - 0.8879528).abs() < 1e-6);
assert_eq!(ndcg(&mut t1, Some(0)), 0f64);
}
#[test]
fn test_err() {
let scores = vec![4., 0., 2., 1., 2.];
assert_eq!(get_err(&scores, Some(0)), 0f32);
assert!((get_err(&scores, Some(1)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(2)) - 0.9375).abs() < 1e-6);
assert!((get_err(&scores, Some(3)) - 0.94140625).abs() < 1e-6);
assert!((get_err(&scores, Some(4)) - 0.9421997).abs() < 1e-6);
assert!((get_err(&scores, Some(5)) - 0.94398493).abs() < 1e-6);
assert_eq!(get_err(&scores, None), get_err(&scores, Some(scores.len())));
assert_eq!(
get_err(&scores, Some(10)),
get_err(&scores, Some(scores.len()))
);
}
#[test]
fn test_gini() {
{
let mut data = vec![(0.4, 0.05), (0.6, 0.95)];
assert!((get_gini_coefficient(&mut data) - 0.65).abs() < 1e-6);
}
{
let mut data = vec![(0.2, 0.1), (0.8, 0.9)];
assert!((get_gini_coefficient(&mut data) - 0.9).abs() < 1e-6);
}
}
#[test]
fn test_get_subtopic_weights() {
let mut str_data = Vec::new();
let mut expected = HashMap::new();
for i in 0..10 {
{
let mut metadata = Metadata::new();
metadata.insert("taxonomy".to_string(), MetaType::Str(format!("{:?}", i)));
str_data.push(metadata);
expected.insert(i, 1. / 30.);
}
{
let mut metadata = Metadata::new();
metadata.insert(
"taxonomy".to_string(),
MetaType::Str(format!("2{:?}", i / 10)),
);
str_data.push(metadata);
expected.insert(20 + i / 10, 1. / 3.);
}
{
let metadata = Metadata::new();
str_data.push(metadata);
expected.insert(std::u32::MAX, 1. / 3.);
}
}
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let sub: Vec<_> = str_data.iter().collect();
let subtopics = get_subtopics(&sub, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(subtopics.len(), sub.len());
println!("Weights: {:?}", weights);
println!("expected: {:?}", expected);
assert_eq!(weights.len(), expected.len());
for (key, val) in expected.iter() {
assert!(weights.contains_key(key));
let actual_val = weights.get(key).expect("key should be in weights");
assert!((val - actual_val).abs() < 1e-6);
}
}
#[test]
fn test_err_ia() {
let mut cat1_metadata = Metadata::new();
cat1_metadata.insert("taxonomy".to_string(), MetaType::Str("1".to_string()));
let mut cat2_metadata = Metadata::new();
cat2_metadata.insert("taxonomy".to_string(), MetaType::Str("2".to_string()));
let scores = vec![
(4., &cat1_metadata),
(0., &cat2_metadata),
(2., &cat1_metadata),
(1., &cat2_metadata),
(2., &cat2_metadata),
];
let discretize_fn = |x: Option<&MetaType>| match x {
Some(MetaType::Str(val)) => val.parse::<u32>().expect("should be a number"),
None => std::u32::MAX,
_ => panic!("Should have some string data"),
};
let metadata: Vec<_> = scores.iter().map(|x| x.1).collect();
let just_scores: Vec<_> = scores.iter().map(|x| x.0).collect();
let subtopics = get_subtopics(&metadata, &"taxonomy".to_string(), &discretize_fn);
let weights = get_subtopic_weights(&subtopics);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(0)),
0f32
);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(1)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(2)) - 0.375).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(3)) - 0.3765625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(4)) - 0.4140625).abs() < 1e-6);
assert!((get_err_ia(&just_scores, &subtopics, &weights, Some(5)) - 0.4815625).abs() < 1e-6);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, None),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
assert_eq!(
get_err_ia(&just_scores, &subtopics, &weights, Some(10)),
get_err_ia(&just_scores, &subtopics, &weights, Some(5))
);
}
#[test]
fn test_interpolate() {
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, None), 2.0);
assert_eq!(interpolate(&values, 25, None), 2.0);
assert_eq!(interpolate(&values, 50, None), 3.0);
assert_eq!(interpolate(&values, 100, None), 4.0);
}
{
let values = vec![2.0, 4.0, 100.0];
assert_eq!(interpolate(&values, 50, None), 4.0);
}
{
// Example from wikipedia
let values = vec![15.0, 20.0, 35.0, 40.0, 50.0];
assert_eq!(interpolate(&values, 5, None), 15.0);
assert_eq!(interpolate(&values, 30, None), 20.0);
assert_eq!(interpolate(&values, 40, None), 27.5);
assert_eq!(interpolate(&values, 95, None), 50.0);
}
{
let values = vec![2.0, 4.0];
assert_eq!(interpolate(&values, 0, Some(1.0)), 2.0);
assert_eq!(interpolate(&values, 10, Some(1.0)), 2.2);
assert_eq!(interpolate(&values, 25, Some(1.0)), 2.5);
assert_eq!(interpolate(&values, 75, Some(1.0)), 3.5);
assert_eq!(interpolate(&values, 100, Some(1.0)), 4.0);
}
}
#[test]
fn test_get_percentiles() {
let mut values = vec![1000.0, 20.0, 100.0];
let quantiles = vec![50];
assert_eq!(get_percentiles(&mut values, &quantiles, None), 100.0);
}
}
| {
return weights;
} | conditional_block |
imager_prepare.py | # LOFAR IMAGING PIPELINE
# Prepare phase master
#
# 1. Create input files for individual nodes based on the input mapfile
# 2. Perform basic input parsing and input validation
# 3. Call the node scripts with correct input
# 4. validate performance
#
# Wouter Klijn
# 2012
# klijn@astron.nl
# ------------------------------------------------------------------------------
from __future__ import with_statement
import os
import sys
import copy
import lofarpipe.support.lofaringredient as ingredient
from lofarpipe.support.baserecipe import BaseRecipe
from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
from lofarpipe.support.remotecommand import ComputeJob
from lofarpipe.support.data_map import DataMap, MultiDataMap
class imager_prepare(BaseRecipe, RemoteCommandRecipeMixIn):
"""
Prepare phase master:
1. Validate input
2. Create mapfiles with input for work to be perform on the individual nodes
based on the structured input mapfile. The input mapfile contains a list
of measurement sets.
Each node computes a single subband group but needs this for all
timeslices.
3. Call the node scripts with correct input
4. validate performance
Only output the measurement nodes that finished succesfull
**Command Line arguments:**
The only command line argument is the a to a mapfile containing "all"
the measurement sets needed for creating the sky images. First ordered on
timeslice then on subband group and finaly on index in the frequency
range.
**Arguments:**
"""
inputs = {
'ndppp_exec': ingredient.ExecField(
'--ndppp-exec',
help="The full path to the ndppp executable"
),
'parset': ingredient.FileField(
'-p', '--parset',
help="The full path to a prepare parset"
),
'working_directory': ingredient.StringField(
'-w', '--working-directory',
help="Working directory used by the nodes: local data"
),
'target_mapfile': ingredient.StringField(
'--target-mapfile',
help="Contains the node and path to target files, defines"
" the number of nodes the script will start on."
),
'slices_per_image': ingredient.IntField(
'--slices-per-image',
help="The number of (time) slices for each output image"
),
'subbands_per_image': ingredient.IntField(
'--subbands-per-image',
help="The number of subbands to be collected in each output image"
),
'asciistat_executable': ingredient.ExecField(
'--asciistat-executable',
help="full path to the ascii stat executable"
),
'statplot_executable': ingredient.ExecField(
'--statplot-executable',
help="The full path to the statplot executable"
),
'msselect_executable': ingredient.ExecField(
'--msselect-executable',
help="The full path to the msselect executable "
),
'rficonsole_executable': ingredient.ExecField(
'--rficonsole-executable',
help="The full path to the rficonsole executable "
),
'mapfile': ingredient.StringField(
'--mapfile',
help="Full path of mapfile; contains a list of the "
"successfully generated and concatenated sub-band groups"
),
'slices_mapfile': ingredient.StringField(
'--slices-mapfile',
help="Path to mapfile containing the produced subband groups"
),
'raw_ms_per_image_mapfile': ingredient.StringField(
'--raw-ms-per-image-mapfile',
help="Path to mapfile containing the raw ms for each produced"
"image"
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help="Path to directory for processed measurment sets"
),
'add_beam_tables': ingredient.BoolField(
'--add_beam_tables',
default=False,
help="Developer option, adds beamtables to ms"
)
}
outputs = {
'mapfile': ingredient.FileField(
help="path to a mapfile Which contains a list of the"
"successfully generated and concatenated measurement set"
),
'slices_mapfile': ingredient.FileField(
help="Path to mapfile containing the produced subband groups"),
'raw_ms_per_image_mapfile': ingredient.FileField(
help="Path to mapfile containing the raw ms for each produced"
"image")
}
def go(self):
"""
Entry point for recipe: Called by the pipeline framework
"""
super(imager_prepare, self).go()
self.logger.info("Starting imager_prepare run")
# *********************************************************************
# input data
input_map = DataMap.load(self.inputs['args'][0])
output_map = DataMap.load(self.inputs['target_mapfile'])
slices_per_image = self.inputs['slices_per_image']
subbands_per_image = self.inputs['subbands_per_image']
# Validate input
if not self._validate_input_map(input_map, output_map, slices_per_image,
subbands_per_image):
|
# outputs
output_ms_mapfile_path = self.inputs['mapfile']
# *********************************************************************
# schedule the actual work
# TODO: Refactor this function into: load data, perform work,
# create output
node_command = " python %s" % (self.__file__.replace("master", "nodes"))
jobs = []
paths_to_image_mapfiles = []
n_subband_groups = len(output_map)
for idx_sb_group, item in enumerate(output_map):
#create the input files for this node
self.logger.debug("Creating input data subset for processing"
"on: {0}".format(item.host))
inputs_for_image_map = \
self._create_input_map_for_sbgroup(
slices_per_image, n_subband_groups,
subbands_per_image, idx_sb_group, input_map)
# Save the mapfile
job_directory = self.config.get(
"layout", "job_directory")
inputs_for_image_mapfile_path = os.path.join(
job_directory, "mapfiles",
"ms_per_image_{0}".format(idx_sb_group))
self._store_data_map(inputs_for_image_mapfile_path,
inputs_for_image_map, "inputmap for location")
#save the (input) ms, as a list of mapfiles
paths_to_image_mapfiles.append(
tuple([item.host, inputs_for_image_mapfile_path, False]))
arguments = [self.environment,
self.inputs['parset'],
self.inputs['working_directory'],
self.inputs['processed_ms_dir'],
self.inputs['ndppp_exec'],
item.file,
slices_per_image,
subbands_per_image,
inputs_for_image_mapfile_path,
self.inputs['asciistat_executable'],
self.inputs['statplot_executable'],
self.inputs['msselect_executable'],
self.inputs['rficonsole_executable'],
self.inputs['add_beam_tables']]
jobs.append(ComputeJob(item.host, node_command, arguments))
# Hand over the job(s) to the pipeline scheduler
self._schedule_jobs(jobs)
# *********************************************************************
# validate the output, cleanup, return output
if self.error.isSet(): #if one of the nodes failed
self.logger.warn("Failed prepare_imager run detected: Generating "
"new output_ms_mapfile_path without failed runs:"
" {0}".format(output_ms_mapfile_path))
concat_ms = copy.deepcopy(output_map)
slices = []
finished_runs = 0
#scan the return dict for completed key
for (item, job) in zip(concat_ms, jobs):
# only save the slices if the node has completed succesfull
if job.results["returncode"] == 0:
finished_runs += 1
slices.append(tuple([item.host,
job.results["time_slices"], False]))
else:
# Set the dataproduct to skipped!!
item.skip = True
slices.append(tuple([item.host, ["/Failed"], True]))
msg = "Failed run on {0}. NOT Created: {1} ".format(
item.host, item.file)
self.logger.warn(msg)
if finished_runs == 0:
self.logger.error("None of the started compute node finished:"
"The current recipe produced no output, aborting")
return 1
# Write the output mapfiles:
# concat.ms paths:
self._store_data_map(output_ms_mapfile_path, concat_ms,
"mapfile with concat.ms")
# timeslices
MultiDataMap(slices).save(self.inputs['slices_mapfile'])
self.logger.info(
"Wrote MultiMapfile with produces timeslice: {0}".format(
self.inputs['slices_mapfile']))
#map with actual input mss.
self._store_data_map(self.inputs["raw_ms_per_image_mapfile"],
DataMap(paths_to_image_mapfiles),
"mapfile containing (raw) input ms per image:")
# Set the return values
self.outputs['mapfile'] = output_ms_mapfile_path
self.outputs['slices_mapfile'] = self.inputs['slices_mapfile']
self.outputs['raw_ms_per_image_mapfile'] = \
self.inputs["raw_ms_per_image_mapfile"]
return 0
def _create_input_map_for_sbgroup(self, slices_per_image,
n_subband_groups, subbands_per_image, idx_sb_group, input_mapfile):
"""
Creates an input mapfile:
This is a subset of the complete input_mapfile based on the subband
details suplied: The input_mapfile is structured: First all subbands for
a complete timeslice and the the next timeslice. The result value
contains all the information needed for a single subbandgroup to be
computed on a single compute node
"""
inputs_for_image = []
# collect the inputs: first step over the time slices
for idx_slice in range(slices_per_image):
# calculate the first line for current time slice and subband group
line_idx_start = idx_slice * \
(n_subband_groups * subbands_per_image) + \
(idx_sb_group * subbands_per_image)
line_idx_end = line_idx_start + subbands_per_image
#extend inputs with the files for the current time slice
inputs_for_image.extend(input_mapfile[line_idx_start: line_idx_end])
return DataMap(inputs_for_image)
def _validate_input_map(self, input_map, output_map, slices_per_image,
subbands_per_image):
"""
Return False if the inputs supplied are incorrect:
the number if inputs and output does not match.
Return True if correct.
The number of inputs is correct iff.
len(input_map) ==
len(output_map) * slices_per_image * subbands_per_image
"""
# The output_map contains a number of path/node pairs. The final data
# dataproduct of the prepare phase: The 'input' for each of these pairs
# is a number of raw measurement sets: The number of time slices times
# the number of subbands collected into each of these time slices.
# The total length of the input map should match this.
if len(input_map) != len(output_map) * \
(slices_per_image * subbands_per_image):
self.logger.error(
"Incorrect number of input ms for supplied parameters:\n\t"
"len(input_map) = {0}\n\t"
"len(output_map) * slices_per_image * subbands_per_image = "
"{1} * {2} * {3} = {4}".format(
len(input_map), len(output_map),
slices_per_image, subbands_per_image,
len(output_map) * slices_per_image * subbands_per_image
)
)
return False
return True
if __name__ == "__main__":
sys.exit(imager_prepare().main())
| return 1 | conditional_block |
imager_prepare.py | # LOFAR IMAGING PIPELINE
# Prepare phase master
#
# 1. Create input files for individual nodes based on the input mapfile
# 2. Perform basic input parsing and input validation
# 3. Call the node scripts with correct input
# 4. validate performance
#
# Wouter Klijn
# 2012
# klijn@astron.nl
# ------------------------------------------------------------------------------
from __future__ import with_statement
import os
import sys
import copy
import lofarpipe.support.lofaringredient as ingredient
from lofarpipe.support.baserecipe import BaseRecipe
from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
from lofarpipe.support.remotecommand import ComputeJob
from lofarpipe.support.data_map import DataMap, MultiDataMap
class imager_prepare(BaseRecipe, RemoteCommandRecipeMixIn):
|
if __name__ == "__main__":
sys.exit(imager_prepare().main())
| """
Prepare phase master:
1. Validate input
2. Create mapfiles with input for work to be perform on the individual nodes
based on the structured input mapfile. The input mapfile contains a list
of measurement sets.
Each node computes a single subband group but needs this for all
timeslices.
3. Call the node scripts with correct input
4. validate performance
Only output the measurement nodes that finished succesfull
**Command Line arguments:**
The only command line argument is the a to a mapfile containing "all"
the measurement sets needed for creating the sky images. First ordered on
timeslice then on subband group and finaly on index in the frequency
range.
**Arguments:**
"""
inputs = {
'ndppp_exec': ingredient.ExecField(
'--ndppp-exec',
help="The full path to the ndppp executable"
),
'parset': ingredient.FileField(
'-p', '--parset',
help="The full path to a prepare parset"
),
'working_directory': ingredient.StringField(
'-w', '--working-directory',
help="Working directory used by the nodes: local data"
),
'target_mapfile': ingredient.StringField(
'--target-mapfile',
help="Contains the node and path to target files, defines"
" the number of nodes the script will start on."
),
'slices_per_image': ingredient.IntField(
'--slices-per-image',
help="The number of (time) slices for each output image"
),
'subbands_per_image': ingredient.IntField(
'--subbands-per-image',
help="The number of subbands to be collected in each output image"
),
'asciistat_executable': ingredient.ExecField(
'--asciistat-executable',
help="full path to the ascii stat executable"
),
'statplot_executable': ingredient.ExecField(
'--statplot-executable',
help="The full path to the statplot executable"
),
'msselect_executable': ingredient.ExecField(
'--msselect-executable',
help="The full path to the msselect executable "
),
'rficonsole_executable': ingredient.ExecField(
'--rficonsole-executable',
help="The full path to the rficonsole executable "
),
'mapfile': ingredient.StringField(
'--mapfile',
help="Full path of mapfile; contains a list of the "
"successfully generated and concatenated sub-band groups"
),
'slices_mapfile': ingredient.StringField(
'--slices-mapfile',
help="Path to mapfile containing the produced subband groups"
),
'raw_ms_per_image_mapfile': ingredient.StringField(
'--raw-ms-per-image-mapfile',
help="Path to mapfile containing the raw ms for each produced"
"image"
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help="Path to directory for processed measurment sets"
),
'add_beam_tables': ingredient.BoolField(
'--add_beam_tables',
default=False,
help="Developer option, adds beamtables to ms"
)
}
outputs = {
'mapfile': ingredient.FileField(
help="path to a mapfile Which contains a list of the"
"successfully generated and concatenated measurement set"
),
'slices_mapfile': ingredient.FileField(
help="Path to mapfile containing the produced subband groups"),
'raw_ms_per_image_mapfile': ingredient.FileField(
help="Path to mapfile containing the raw ms for each produced"
"image")
}
def go(self):
"""
Entry point for recipe: Called by the pipeline framework
"""
super(imager_prepare, self).go()
self.logger.info("Starting imager_prepare run")
# *********************************************************************
# input data
input_map = DataMap.load(self.inputs['args'][0])
output_map = DataMap.load(self.inputs['target_mapfile'])
slices_per_image = self.inputs['slices_per_image']
subbands_per_image = self.inputs['subbands_per_image']
# Validate input
if not self._validate_input_map(input_map, output_map, slices_per_image,
subbands_per_image):
return 1
# outputs
output_ms_mapfile_path = self.inputs['mapfile']
# *********************************************************************
# schedule the actual work
# TODO: Refactor this function into: load data, perform work,
# create output
node_command = " python %s" % (self.__file__.replace("master", "nodes"))
jobs = []
paths_to_image_mapfiles = []
n_subband_groups = len(output_map)
for idx_sb_group, item in enumerate(output_map):
#create the input files for this node
self.logger.debug("Creating input data subset for processing"
"on: {0}".format(item.host))
inputs_for_image_map = \
self._create_input_map_for_sbgroup(
slices_per_image, n_subband_groups,
subbands_per_image, idx_sb_group, input_map)
# Save the mapfile
job_directory = self.config.get(
"layout", "job_directory")
inputs_for_image_mapfile_path = os.path.join(
job_directory, "mapfiles",
"ms_per_image_{0}".format(idx_sb_group))
self._store_data_map(inputs_for_image_mapfile_path,
inputs_for_image_map, "inputmap for location")
#save the (input) ms, as a list of mapfiles
paths_to_image_mapfiles.append(
tuple([item.host, inputs_for_image_mapfile_path, False]))
arguments = [self.environment,
self.inputs['parset'],
self.inputs['working_directory'],
self.inputs['processed_ms_dir'],
self.inputs['ndppp_exec'],
item.file,
slices_per_image,
subbands_per_image,
inputs_for_image_mapfile_path,
self.inputs['asciistat_executable'],
self.inputs['statplot_executable'],
self.inputs['msselect_executable'],
self.inputs['rficonsole_executable'],
self.inputs['add_beam_tables']]
jobs.append(ComputeJob(item.host, node_command, arguments))
# Hand over the job(s) to the pipeline scheduler
self._schedule_jobs(jobs)
# *********************************************************************
# validate the output, cleanup, return output
if self.error.isSet(): #if one of the nodes failed
self.logger.warn("Failed prepare_imager run detected: Generating "
"new output_ms_mapfile_path without failed runs:"
" {0}".format(output_ms_mapfile_path))
concat_ms = copy.deepcopy(output_map)
slices = []
finished_runs = 0
#scan the return dict for completed key
for (item, job) in zip(concat_ms, jobs):
# only save the slices if the node has completed succesfull
if job.results["returncode"] == 0:
finished_runs += 1
slices.append(tuple([item.host,
job.results["time_slices"], False]))
else:
# Set the dataproduct to skipped!!
item.skip = True
slices.append(tuple([item.host, ["/Failed"], True]))
msg = "Failed run on {0}. NOT Created: {1} ".format(
item.host, item.file)
self.logger.warn(msg)
if finished_runs == 0:
self.logger.error("None of the started compute node finished:"
"The current recipe produced no output, aborting")
return 1
# Write the output mapfiles:
# concat.ms paths:
self._store_data_map(output_ms_mapfile_path, concat_ms,
"mapfile with concat.ms")
# timeslices
MultiDataMap(slices).save(self.inputs['slices_mapfile'])
self.logger.info(
"Wrote MultiMapfile with produces timeslice: {0}".format(
self.inputs['slices_mapfile']))
#map with actual input mss.
self._store_data_map(self.inputs["raw_ms_per_image_mapfile"],
DataMap(paths_to_image_mapfiles),
"mapfile containing (raw) input ms per image:")
# Set the return values
self.outputs['mapfile'] = output_ms_mapfile_path
self.outputs['slices_mapfile'] = self.inputs['slices_mapfile']
self.outputs['raw_ms_per_image_mapfile'] = \
self.inputs["raw_ms_per_image_mapfile"]
return 0
def _create_input_map_for_sbgroup(self, slices_per_image,
n_subband_groups, subbands_per_image, idx_sb_group, input_mapfile):
"""
Creates an input mapfile:
This is a subset of the complete input_mapfile based on the subband
details suplied: The input_mapfile is structured: First all subbands for
a complete timeslice and the the next timeslice. The result value
contains all the information needed for a single subbandgroup to be
computed on a single compute node
"""
inputs_for_image = []
# collect the inputs: first step over the time slices
for idx_slice in range(slices_per_image):
# calculate the first line for current time slice and subband group
line_idx_start = idx_slice * \
(n_subband_groups * subbands_per_image) + \
(idx_sb_group * subbands_per_image)
line_idx_end = line_idx_start + subbands_per_image
#extend inputs with the files for the current time slice
inputs_for_image.extend(input_mapfile[line_idx_start: line_idx_end])
return DataMap(inputs_for_image)
def _validate_input_map(self, input_map, output_map, slices_per_image,
subbands_per_image):
"""
Return False if the inputs supplied are incorrect:
the number if inputs and output does not match.
Return True if correct.
The number of inputs is correct iff.
len(input_map) ==
len(output_map) * slices_per_image * subbands_per_image
"""
# The output_map contains a number of path/node pairs. The final data
# dataproduct of the prepare phase: The 'input' for each of these pairs
# is a number of raw measurement sets: The number of time slices times
# the number of subbands collected into each of these time slices.
# The total length of the input map should match this.
if len(input_map) != len(output_map) * \
(slices_per_image * subbands_per_image):
self.logger.error(
"Incorrect number of input ms for supplied parameters:\n\t"
"len(input_map) = {0}\n\t"
"len(output_map) * slices_per_image * subbands_per_image = "
"{1} * {2} * {3} = {4}".format(
len(input_map), len(output_map),
slices_per_image, subbands_per_image,
len(output_map) * slices_per_image * subbands_per_image
)
)
return False
return True | identifier_body |
imager_prepare.py | # LOFAR IMAGING PIPELINE
# Prepare phase master
#
# 1. Create input files for individual nodes based on the input mapfile
# 2. Perform basic input parsing and input validation
# 3. Call the node scripts with correct input
# 4. validate performance
#
# Wouter Klijn
# 2012
# klijn@astron.nl
# ------------------------------------------------------------------------------
from __future__ import with_statement
import os
import sys
import copy
import lofarpipe.support.lofaringredient as ingredient
from lofarpipe.support.baserecipe import BaseRecipe
from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
from lofarpipe.support.remotecommand import ComputeJob
from lofarpipe.support.data_map import DataMap, MultiDataMap
class imager_prepare(BaseRecipe, RemoteCommandRecipeMixIn):
"""
Prepare phase master:
1. Validate input
2. Create mapfiles with input for work to be perform on the individual nodes
based on the structured input mapfile. The input mapfile contains a list
of measurement sets.
Each node computes a single subband group but needs this for all
timeslices.
3. Call the node scripts with correct input
4. validate performance
Only output the measurement nodes that finished succesfull
**Command Line arguments:**
The only command line argument is the a to a mapfile containing "all"
the measurement sets needed for creating the sky images. First ordered on
timeslice then on subband group and finaly on index in the frequency
range.
**Arguments:**
"""
inputs = {
'ndppp_exec': ingredient.ExecField(
'--ndppp-exec',
help="The full path to the ndppp executable"
),
'parset': ingredient.FileField(
'-p', '--parset',
help="The full path to a prepare parset"
),
'working_directory': ingredient.StringField(
'-w', '--working-directory',
help="Working directory used by the nodes: local data"
),
'target_mapfile': ingredient.StringField(
'--target-mapfile',
help="Contains the node and path to target files, defines"
" the number of nodes the script will start on."
),
'slices_per_image': ingredient.IntField(
'--slices-per-image',
help="The number of (time) slices for each output image" | '--subbands-per-image',
help="The number of subbands to be collected in each output image"
),
'asciistat_executable': ingredient.ExecField(
'--asciistat-executable',
help="full path to the ascii stat executable"
),
'statplot_executable': ingredient.ExecField(
'--statplot-executable',
help="The full path to the statplot executable"
),
'msselect_executable': ingredient.ExecField(
'--msselect-executable',
help="The full path to the msselect executable "
),
'rficonsole_executable': ingredient.ExecField(
'--rficonsole-executable',
help="The full path to the rficonsole executable "
),
'mapfile': ingredient.StringField(
'--mapfile',
help="Full path of mapfile; contains a list of the "
"successfully generated and concatenated sub-band groups"
),
'slices_mapfile': ingredient.StringField(
'--slices-mapfile',
help="Path to mapfile containing the produced subband groups"
),
'raw_ms_per_image_mapfile': ingredient.StringField(
'--raw-ms-per-image-mapfile',
help="Path to mapfile containing the raw ms for each produced"
"image"
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help="Path to directory for processed measurment sets"
),
'add_beam_tables': ingredient.BoolField(
'--add_beam_tables',
default=False,
help="Developer option, adds beamtables to ms"
)
}
outputs = {
'mapfile': ingredient.FileField(
help="path to a mapfile Which contains a list of the"
"successfully generated and concatenated measurement set"
),
'slices_mapfile': ingredient.FileField(
help="Path to mapfile containing the produced subband groups"),
'raw_ms_per_image_mapfile': ingredient.FileField(
help="Path to mapfile containing the raw ms for each produced"
"image")
}
def go(self):
"""
Entry point for recipe: Called by the pipeline framework
"""
super(imager_prepare, self).go()
self.logger.info("Starting imager_prepare run")
# *********************************************************************
# input data
input_map = DataMap.load(self.inputs['args'][0])
output_map = DataMap.load(self.inputs['target_mapfile'])
slices_per_image = self.inputs['slices_per_image']
subbands_per_image = self.inputs['subbands_per_image']
# Validate input
if not self._validate_input_map(input_map, output_map, slices_per_image,
subbands_per_image):
return 1
# outputs
output_ms_mapfile_path = self.inputs['mapfile']
# *********************************************************************
# schedule the actual work
# TODO: Refactor this function into: load data, perform work,
# create output
node_command = " python %s" % (self.__file__.replace("master", "nodes"))
jobs = []
paths_to_image_mapfiles = []
n_subband_groups = len(output_map)
for idx_sb_group, item in enumerate(output_map):
#create the input files for this node
self.logger.debug("Creating input data subset for processing"
"on: {0}".format(item.host))
inputs_for_image_map = \
self._create_input_map_for_sbgroup(
slices_per_image, n_subband_groups,
subbands_per_image, idx_sb_group, input_map)
# Save the mapfile
job_directory = self.config.get(
"layout", "job_directory")
inputs_for_image_mapfile_path = os.path.join(
job_directory, "mapfiles",
"ms_per_image_{0}".format(idx_sb_group))
self._store_data_map(inputs_for_image_mapfile_path,
inputs_for_image_map, "inputmap for location")
#save the (input) ms, as a list of mapfiles
paths_to_image_mapfiles.append(
tuple([item.host, inputs_for_image_mapfile_path, False]))
arguments = [self.environment,
self.inputs['parset'],
self.inputs['working_directory'],
self.inputs['processed_ms_dir'],
self.inputs['ndppp_exec'],
item.file,
slices_per_image,
subbands_per_image,
inputs_for_image_mapfile_path,
self.inputs['asciistat_executable'],
self.inputs['statplot_executable'],
self.inputs['msselect_executable'],
self.inputs['rficonsole_executable'],
self.inputs['add_beam_tables']]
jobs.append(ComputeJob(item.host, node_command, arguments))
# Hand over the job(s) to the pipeline scheduler
self._schedule_jobs(jobs)
# *********************************************************************
# validate the output, cleanup, return output
if self.error.isSet(): #if one of the nodes failed
self.logger.warn("Failed prepare_imager run detected: Generating "
"new output_ms_mapfile_path without failed runs:"
" {0}".format(output_ms_mapfile_path))
concat_ms = copy.deepcopy(output_map)
slices = []
finished_runs = 0
#scan the return dict for completed key
for (item, job) in zip(concat_ms, jobs):
# only save the slices if the node has completed succesfull
if job.results["returncode"] == 0:
finished_runs += 1
slices.append(tuple([item.host,
job.results["time_slices"], False]))
else:
# Set the dataproduct to skipped!!
item.skip = True
slices.append(tuple([item.host, ["/Failed"], True]))
msg = "Failed run on {0}. NOT Created: {1} ".format(
item.host, item.file)
self.logger.warn(msg)
if finished_runs == 0:
self.logger.error("None of the started compute node finished:"
"The current recipe produced no output, aborting")
return 1
# Write the output mapfiles:
# concat.ms paths:
self._store_data_map(output_ms_mapfile_path, concat_ms,
"mapfile with concat.ms")
# timeslices
MultiDataMap(slices).save(self.inputs['slices_mapfile'])
self.logger.info(
"Wrote MultiMapfile with produces timeslice: {0}".format(
self.inputs['slices_mapfile']))
#map with actual input mss.
self._store_data_map(self.inputs["raw_ms_per_image_mapfile"],
DataMap(paths_to_image_mapfiles),
"mapfile containing (raw) input ms per image:")
# Set the return values
self.outputs['mapfile'] = output_ms_mapfile_path
self.outputs['slices_mapfile'] = self.inputs['slices_mapfile']
self.outputs['raw_ms_per_image_mapfile'] = \
self.inputs["raw_ms_per_image_mapfile"]
return 0
def _create_input_map_for_sbgroup(self, slices_per_image,
n_subband_groups, subbands_per_image, idx_sb_group, input_mapfile):
"""
Creates an input mapfile:
This is a subset of the complete input_mapfile based on the subband
details suplied: The input_mapfile is structured: First all subbands for
a complete timeslice and the the next timeslice. The result value
contains all the information needed for a single subbandgroup to be
computed on a single compute node
"""
inputs_for_image = []
# collect the inputs: first step over the time slices
for idx_slice in range(slices_per_image):
# calculate the first line for current time slice and subband group
line_idx_start = idx_slice * \
(n_subband_groups * subbands_per_image) + \
(idx_sb_group * subbands_per_image)
line_idx_end = line_idx_start + subbands_per_image
#extend inputs with the files for the current time slice
inputs_for_image.extend(input_mapfile[line_idx_start: line_idx_end])
return DataMap(inputs_for_image)
def _validate_input_map(self, input_map, output_map, slices_per_image,
subbands_per_image):
"""
Return False if the inputs supplied are incorrect:
the number if inputs and output does not match.
Return True if correct.
The number of inputs is correct iff.
len(input_map) ==
len(output_map) * slices_per_image * subbands_per_image
"""
# The output_map contains a number of path/node pairs. The final data
# dataproduct of the prepare phase: The 'input' for each of these pairs
# is a number of raw measurement sets: The number of time slices times
# the number of subbands collected into each of these time slices.
# The total length of the input map should match this.
if len(input_map) != len(output_map) * \
(slices_per_image * subbands_per_image):
self.logger.error(
"Incorrect number of input ms for supplied parameters:\n\t"
"len(input_map) = {0}\n\t"
"len(output_map) * slices_per_image * subbands_per_image = "
"{1} * {2} * {3} = {4}".format(
len(input_map), len(output_map),
slices_per_image, subbands_per_image,
len(output_map) * slices_per_image * subbands_per_image
)
)
return False
return True
if __name__ == "__main__":
sys.exit(imager_prepare().main()) | ),
'subbands_per_image': ingredient.IntField( | random_line_split |
imager_prepare.py | # LOFAR IMAGING PIPELINE
# Prepare phase master
#
# 1. Create input files for individual nodes based on the input mapfile
# 2. Perform basic input parsing and input validation
# 3. Call the node scripts with correct input
# 4. validate performance
#
# Wouter Klijn
# 2012
# klijn@astron.nl
# ------------------------------------------------------------------------------
from __future__ import with_statement
import os
import sys
import copy
import lofarpipe.support.lofaringredient as ingredient
from lofarpipe.support.baserecipe import BaseRecipe
from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
from lofarpipe.support.remotecommand import ComputeJob
from lofarpipe.support.data_map import DataMap, MultiDataMap
class imager_prepare(BaseRecipe, RemoteCommandRecipeMixIn):
"""
Prepare phase master:
1. Validate input
2. Create mapfiles with input for work to be perform on the individual nodes
based on the structured input mapfile. The input mapfile contains a list
of measurement sets.
Each node computes a single subband group but needs this for all
timeslices.
3. Call the node scripts with correct input
4. validate performance
Only output the measurement nodes that finished succesfull
**Command Line arguments:**
The only command line argument is the a to a mapfile containing "all"
the measurement sets needed for creating the sky images. First ordered on
timeslice then on subband group and finaly on index in the frequency
range.
**Arguments:**
"""
inputs = {
'ndppp_exec': ingredient.ExecField(
'--ndppp-exec',
help="The full path to the ndppp executable"
),
'parset': ingredient.FileField(
'-p', '--parset',
help="The full path to a prepare parset"
),
'working_directory': ingredient.StringField(
'-w', '--working-directory',
help="Working directory used by the nodes: local data"
),
'target_mapfile': ingredient.StringField(
'--target-mapfile',
help="Contains the node and path to target files, defines"
" the number of nodes the script will start on."
),
'slices_per_image': ingredient.IntField(
'--slices-per-image',
help="The number of (time) slices for each output image"
),
'subbands_per_image': ingredient.IntField(
'--subbands-per-image',
help="The number of subbands to be collected in each output image"
),
'asciistat_executable': ingredient.ExecField(
'--asciistat-executable',
help="full path to the ascii stat executable"
),
'statplot_executable': ingredient.ExecField(
'--statplot-executable',
help="The full path to the statplot executable"
),
'msselect_executable': ingredient.ExecField(
'--msselect-executable',
help="The full path to the msselect executable "
),
'rficonsole_executable': ingredient.ExecField(
'--rficonsole-executable',
help="The full path to the rficonsole executable "
),
'mapfile': ingredient.StringField(
'--mapfile',
help="Full path of mapfile; contains a list of the "
"successfully generated and concatenated sub-band groups"
),
'slices_mapfile': ingredient.StringField(
'--slices-mapfile',
help="Path to mapfile containing the produced subband groups"
),
'raw_ms_per_image_mapfile': ingredient.StringField(
'--raw-ms-per-image-mapfile',
help="Path to mapfile containing the raw ms for each produced"
"image"
),
'processed_ms_dir': ingredient.StringField(
'--processed-ms-dir',
help="Path to directory for processed measurment sets"
),
'add_beam_tables': ingredient.BoolField(
'--add_beam_tables',
default=False,
help="Developer option, adds beamtables to ms"
)
}
outputs = {
'mapfile': ingredient.FileField(
help="path to a mapfile Which contains a list of the"
"successfully generated and concatenated measurement set"
),
'slices_mapfile': ingredient.FileField(
help="Path to mapfile containing the produced subband groups"),
'raw_ms_per_image_mapfile': ingredient.FileField(
help="Path to mapfile containing the raw ms for each produced"
"image")
}
def go(self):
"""
Entry point for recipe: Called by the pipeline framework
"""
super(imager_prepare, self).go()
self.logger.info("Starting imager_prepare run")
# *********************************************************************
# input data
input_map = DataMap.load(self.inputs['args'][0])
output_map = DataMap.load(self.inputs['target_mapfile'])
slices_per_image = self.inputs['slices_per_image']
subbands_per_image = self.inputs['subbands_per_image']
# Validate input
if not self._validate_input_map(input_map, output_map, slices_per_image,
subbands_per_image):
return 1
# outputs
output_ms_mapfile_path = self.inputs['mapfile']
# *********************************************************************
# schedule the actual work
# TODO: Refactor this function into: load data, perform work,
# create output
node_command = " python %s" % (self.__file__.replace("master", "nodes"))
jobs = []
paths_to_image_mapfiles = []
n_subband_groups = len(output_map)
for idx_sb_group, item in enumerate(output_map):
#create the input files for this node
self.logger.debug("Creating input data subset for processing"
"on: {0}".format(item.host))
inputs_for_image_map = \
self._create_input_map_for_sbgroup(
slices_per_image, n_subband_groups,
subbands_per_image, idx_sb_group, input_map)
# Save the mapfile
job_directory = self.config.get(
"layout", "job_directory")
inputs_for_image_mapfile_path = os.path.join(
job_directory, "mapfiles",
"ms_per_image_{0}".format(idx_sb_group))
self._store_data_map(inputs_for_image_mapfile_path,
inputs_for_image_map, "inputmap for location")
#save the (input) ms, as a list of mapfiles
paths_to_image_mapfiles.append(
tuple([item.host, inputs_for_image_mapfile_path, False]))
arguments = [self.environment,
self.inputs['parset'],
self.inputs['working_directory'],
self.inputs['processed_ms_dir'],
self.inputs['ndppp_exec'],
item.file,
slices_per_image,
subbands_per_image,
inputs_for_image_mapfile_path,
self.inputs['asciistat_executable'],
self.inputs['statplot_executable'],
self.inputs['msselect_executable'],
self.inputs['rficonsole_executable'],
self.inputs['add_beam_tables']]
jobs.append(ComputeJob(item.host, node_command, arguments))
# Hand over the job(s) to the pipeline scheduler
self._schedule_jobs(jobs)
# *********************************************************************
# validate the output, cleanup, return output
if self.error.isSet(): #if one of the nodes failed
self.logger.warn("Failed prepare_imager run detected: Generating "
"new output_ms_mapfile_path without failed runs:"
" {0}".format(output_ms_mapfile_path))
concat_ms = copy.deepcopy(output_map)
slices = []
finished_runs = 0
#scan the return dict for completed key
for (item, job) in zip(concat_ms, jobs):
# only save the slices if the node has completed succesfull
if job.results["returncode"] == 0:
finished_runs += 1
slices.append(tuple([item.host,
job.results["time_slices"], False]))
else:
# Set the dataproduct to skipped!!
item.skip = True
slices.append(tuple([item.host, ["/Failed"], True]))
msg = "Failed run on {0}. NOT Created: {1} ".format(
item.host, item.file)
self.logger.warn(msg)
if finished_runs == 0:
self.logger.error("None of the started compute node finished:"
"The current recipe produced no output, aborting")
return 1
# Write the output mapfiles:
# concat.ms paths:
self._store_data_map(output_ms_mapfile_path, concat_ms,
"mapfile with concat.ms")
# timeslices
MultiDataMap(slices).save(self.inputs['slices_mapfile'])
self.logger.info(
"Wrote MultiMapfile with produces timeslice: {0}".format(
self.inputs['slices_mapfile']))
#map with actual input mss.
self._store_data_map(self.inputs["raw_ms_per_image_mapfile"],
DataMap(paths_to_image_mapfiles),
"mapfile containing (raw) input ms per image:")
# Set the return values
self.outputs['mapfile'] = output_ms_mapfile_path
self.outputs['slices_mapfile'] = self.inputs['slices_mapfile']
self.outputs['raw_ms_per_image_mapfile'] = \
self.inputs["raw_ms_per_image_mapfile"]
return 0
def _create_input_map_for_sbgroup(self, slices_per_image,
n_subband_groups, subbands_per_image, idx_sb_group, input_mapfile):
"""
Creates an input mapfile:
This is a subset of the complete input_mapfile based on the subband
details suplied: The input_mapfile is structured: First all subbands for
a complete timeslice and the the next timeslice. The result value
contains all the information needed for a single subbandgroup to be
computed on a single compute node
"""
inputs_for_image = []
# collect the inputs: first step over the time slices
for idx_slice in range(slices_per_image):
# calculate the first line for current time slice and subband group
line_idx_start = idx_slice * \
(n_subband_groups * subbands_per_image) + \
(idx_sb_group * subbands_per_image)
line_idx_end = line_idx_start + subbands_per_image
#extend inputs with the files for the current time slice
inputs_for_image.extend(input_mapfile[line_idx_start: line_idx_end])
return DataMap(inputs_for_image)
def | (self, input_map, output_map, slices_per_image,
subbands_per_image):
"""
Return False if the inputs supplied are incorrect:
the number if inputs and output does not match.
Return True if correct.
The number of inputs is correct iff.
len(input_map) ==
len(output_map) * slices_per_image * subbands_per_image
"""
# The output_map contains a number of path/node pairs. The final data
# dataproduct of the prepare phase: The 'input' for each of these pairs
# is a number of raw measurement sets: The number of time slices times
# the number of subbands collected into each of these time slices.
# The total length of the input map should match this.
if len(input_map) != len(output_map) * \
(slices_per_image * subbands_per_image):
self.logger.error(
"Incorrect number of input ms for supplied parameters:\n\t"
"len(input_map) = {0}\n\t"
"len(output_map) * slices_per_image * subbands_per_image = "
"{1} * {2} * {3} = {4}".format(
len(input_map), len(output_map),
slices_per_image, subbands_per_image,
len(output_map) * slices_per_image * subbands_per_image
)
)
return False
return True
if __name__ == "__main__":
sys.exit(imager_prepare().main())
| _validate_input_map | identifier_name |
room.go | /*
房间
*/
package engine
import (
"bytes"
"fmt"
"sort"
"strconv"
"strings"
"time"
. "kelei.com/utils/common"
"kelei.com/utils/logger"
)
/*
游戏规则
默认版{
1. 出牌时间15秒
2. 自动出牌1次托管
}
录制版{
1. 出牌时间30秒
2. 自动出牌不托管
}
*/
const (
GameRule_Normal = iota //默认版
GameRule_Record //录制版
)
const (
Match_JD = iota //经典
Match_HYTW //好友同玩
Match_HXS //海选赛
)
const (
CARDMODE_RANDOM = iota //随机
CARDMODE_NOWASH //不洗牌
)
const (
GAMETYPE_REGULAR = iota //常规赛
GAMETYPE_DOUBLE //加倍赛
)
const (
HANDLETYPE_CALL = iota //叫地主
HANDLETYPE_RUSH //抢地主
)
const (
RoomType_Primary = iota //初级
RoomType_Intermediate //中级
RoomType_Advanced //高级
RoomType_Master //大师
RoomType_Tribute //进贡
)
const (
SetController_NewCycle = iota //新一轮
SetController_Press //压牌
SetController_Pass //要不了
SetController_NoChange //没有变化
SetController_Liuju //流局
)
const (
RoomStatus_Setout = iota //准备
RoomStatus_Deal //发牌(可明牌)
RoomStatus_Handle //叫地主、抢地主、加倍(可明牌)
RoomStatus_Liuju //流局
RoomStatus_Match //开赛
)
const (
MatchingStatus_Run = iota //进行中
MatchingStatus_Pause //暂停
MatchingStatus_Over //结束
)
const (
PlayWaitTime = 10 //要不起的等待时间
PlayWaitTime_Long = 20 //其它的等待时间
)
type Room struct {
id string //id
matchid int //比赛类型
roomtype int //房间类型
pcount int //人数
status int //房间状态
matchingStatus int //开赛后的状态
users []*User //玩家列表
userids []string //玩家UserID集合
idleusers map[string]*User //未落座玩家列表
idleuserids []string //未落座玩家UserID集合
cuser *User //牌权的玩家
cards []Card //当前牌
cardsuser *User //当前牌的玩家
playTime int //出牌的次数
playRound int //出牌的轮次
users_cards map[string]string //当前轮所有人的出牌信息
inning int //当前局数
innings int //总局数
inningRegular int //常规赛局数
setCtlMsg []string //设置牌权的内容,推送残局的时候用
surplusBKingCount int //剩余大王数量
surplusSKingCount int //剩余小王数量
surplusTwoCount int //剩余2数量
cardinality int //基数
baseScore int //底分
multiple int //倍数
liujuMultiple int //流局倍数
playWaitTime int //要不起等待时间
playWaitTime_Long int //其它等待时间
gameRule int //游戏规则
firstController *User //第一个出牌的人
judgmentUser *User //裁判
records []*string //所有的记录(回放用)
dealMode int //发牌模式
cardMode int //牌的模式(随机、不洗牌)
gameType int //游戏类型
baseCards []Card //底牌
landlord *User //地主
farmers []*User //农民
canHandleUser *User //当前可操作的玩家
canCallLandlordUser *User //可叫地主的玩家
landlordPlayCardCount int //地主出牌次数
farmerPlayCardCount int //农民出牌次数
councilTask *Task //本局任务
usersVideoIntegral []int //玩家积分列表
springStatus int //春天的状态(0无1春天2反春)
}
func (r *Room) GetRoomID() *string {
return &r.id
}
func (r *Room) SetRoomID(roomid string) {
r.id = roomid
}
//根据玩法规则配置房间
func (r *Room) configRoomByGameRule() {
r.playWaitTime = PlayWaitTime
r.playWaitTime_Long = PlayWaitTime_Long
r.setGameRule(r.GetGameRuleConfig())
if r.getGameRule() == GameRule_Record {
r.playWaitTime = 10
r.playWaitTime_Long = 20
}
}
//重置
func (r *Room) reset() {
r.userids = nil
r.setPlayTime(0)
r.setPlayRound(0)
r.setSurplusBKingCount(4)
r.setSurplusSKingCount(4)
r.setSurplusTwoCount(16)
r.setControllerUser(nil)
r.setCurrentCards([]Card{})
r.setCurrentCardsUser(nil)
r.setSetCtlMsg([]string{})
r.setBaseScore(0)
r.setMultiple(1)
r.setLandlord(nil)
r.setLandlordPlayCardCount(0)
r.setFarmerPlayCardCount(0)
for _, user := range r.getUsers() {
if user != nil {
user.resume()
}
}
r.users_cards = make(map[string]string, pcount)
}
//设置房间的基础信息
func (r *Room) setRoomBaseInfo() {
allRoomData := *r.getAllRoomData()
arrAllRoomData := strings.Split(allRoomData, "|")
for _, roomData := range arrAllRoomData {
arrRoomData_s := strings.Split(roomData, "$")
arrRoomData := StrArrToIntArr(arrRoomData_s)
roomType, _, multiple := arrRoomData[0], arrRoomData[1], arrRoomData[2]
if roomType == r.GetRoomType() {
r.setMultiple(multiple)
break
}
}
}
//是否赛前玩家操作中
func (r *Room) isHandling() bool {
if r.GetRoomStatus() == RoomStatus_Handle {
return true
}
return false
}
//是否正在比赛
func (r *Room) isMatching() bool {
if r.GetRoomStatus() == RoomStatus_Setout {
return false
}
return true
}
//获取游戏规则
func (r *Room) getGameRule() int {
return r.gameRule
}
//设置游戏规则
func (r *Room) setGameRule(gameRule int) {
r.gameRule = gameRule
}
//获取发牌模式
func (r *Room) getDealMode() int {
return r.dealMode
}
//设置发牌模式
func (r *Room) setDealMode(dealMode int) {
r.dealMode = dealMode
}
//获取牌的模式
func (r *Room) GetCardMode() int {
return r.cardMode
}
//设置牌的模式
func (r *Room) SetCardMode(cardMode int) {
r.cardMode = cardMode
}
//获取游戏模式
func (r *Room) getGameType() int {
return r.gameType
}
//设置游戏模式
func (r *Room) setGameType(gameType int) {
r.gameType = gameType
}
//获取底牌
func (r *Room) getBaseCards() []Card {
return r.baseCards
}
//设置底牌
func (r *Room) setBaseCards(baseCards []Card) {
r.baseCards = baseCards
}
//获取地主
func (r *Room) getLandlord() *User {
return r.landlord
}
//设置地主
func (r *Room) setLandlord(landlord *User) {
r.landlord = landlord
}
//获取农民
func (r *Room) getFarmers() []*User {
return r.farmers
}
//设置农民
func (r *Room) setFarmers(users []*User) {
r.farmers = users
}
//获取当前可操作的玩家
func (r *Room) getCanHandleUser() *User {
return r.canHandleUser
}
/*
设置当前可操作的玩家
push:Handle_Push,userid,操作类型,当前底分,赛制
des:操作类型(0叫地主 1抢地主)
赛制(0常规赛 1加倍赛)
*/
func (r *Room) setCanHandleUser(canHandleUser *User, handleType int) {
r.canHandleUser = canHandleUser
message := fmt.Sprintf("%s,%d,%d,%d", *canHandleUser.getUserID(), handleType, r.getBaseScore(), r.getGameType())
pushMessageToUsers("Handle_Push", []string{message}, r.getUserIDs())
r.pushJudgment("Handle_Push", message)
}
/*
设置当前可操作的玩家并设置倒计时
*/
func (r *Room) setCanHandleUserAndSetCountDown(canHandleUser *User, handleType int) {
canHandleUser.countDown_handle(time.Second * 10)
r.setCanHandleUser(canHandleUser, handleType)
}
//获取可以叫地主的玩家
func (r *Room) getCanCallLandlordUser() *User {
return r.canCallLandlordUser
}
//设置可以叫地主的玩家
func (r *Room) setCanCallLandlordUser(canCallLandlordUser *User) {
r.canCallLandlordUser = canCallLandlordUser
}
//获取地主出牌次数
func (r *Room) getLandlordPlayCardCount() int {
return r.landlordPlayCardCount
}
//设置地主出牌次数
func (r *Room) setLandlordPlayCardCount(count int) {
r.landlordPlayCardCount = count
}
//累加地主出牌次数
func (r *Room) updteLandlordPlayCardCount() {
r.landlordPlayCardCount += 1
}
//获取农民出牌次数
func (r *Room) getFarmerPlayCardCount() int {
return r.farmerPlayCardCount
}
//设置农民出牌次数
func (r *Room) setFarmerPlayCardCount(count int) {
r.farmerPlayCardCount = count
}
//累加农民出牌次数
func (r *Room) updteFarmerPlayCardCount() {
r.farmerPlayCardCount += 1
}
//获取本局任务
func (r *Room) getCouncilTask() *Task {
return r.councilTask
}
//设置本局任务
func (r *Room) setCouncilTask(councilTask *Task) {
r.councilTask = councilTask
}
//获取所有玩家的积分
func (r *Room) getUsersVideoIntegral() []int {
return r.usersVideoIntegral
}
//获取春天的状态
func (r *Room) getSpringStatus() int {
return r.springStatus
}
//设置春天的状态
func (r *Room) setSpringStatus(springStatus int) {
r.springStatus = springStatus
}
//根据userid获取玩家积分
func (r *Room) getUserVideoIntegral(user *User) int {
userIndex := user.getIndex()
return r.getUsersVideoIntegral()[userIndex]
}
//根据userid设置玩家积分
func (r *Room) setUserVideoIntegral(user *User, videoIntegral int) {
userIndex := user.getIndex()
r.getUsersVideoIntegral()[userIndex] = videoIntegral
}
//重开
func (r *Room) reStart() {
r.resetUsers()
r.closeUserCountDown()
r.SetRoomStatus(RoomStatus_Setout)
r.reset()
}
//玩家转变成地主
func (r *Room) userTurnLandlord(user *User) {
logger.Debugf("%s 成为地主", *user.getUID())
user.setLandlord(true)
r.setLandlord(user)
farmers := []*User{}
for _, u := range r.getUsers() {
if u != user {
farmers = append(farmers, u)
}
}
r.setFarmers(farmers)
r.addCardsToLandlord()
r.showBaseCards(nil)
r.openDouble()
}
/*
亮底牌
push:BaseCards_Push,地主userid,cardid$cardid$cardid,底牌类型,底牌倍数,是否加入牌中
*/
func (r *Room) showBaseCards(user *User) {
if r.getLandlord() == nil {
return
}
// r.setBaseCards([]Card{Card{Suit: 1, Priority: 1}, Card{Suit: 1, Priority: 2}, Card{Suit: 1, Priority: 3}})
cards := | ltiple := r.getBaseCardsInfo()
userids := []string{}
addToCards := 0
if user == nil { //只执行一次(地主出现的时候)
//根据底牌加倍
if multiple > 1 {
r.setMultiple(r.getMultiple() * multiple)
r.pushMultiple()
}
userids = r.getUserIDs()
addToCards = 1
} else { //短线重连进来的
userids = []string{*user.getUserID()}
}
message := fmt.Sprintf("%s,%s,%d,%d,%d", *r.getLandlord().getUserID(), *r.getCardsID(cards), cardsType, multiple, addToCards)
if user == nil {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
r.pushJudgment("BaseCards_Push", message)
} else {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
}
}
//将底牌放入地主牌面中
func (r *Room) addCardsToLandlord() {
cards := r.getBaseCards()
landlord := r.getLandlord()
if landlord != nil {
var tmpCards CardList
tmpCards = landlord.getCards()
tmpCards = append(tmpCards, cards...)
sort.Sort(tmpCards)
for i := 0; i < len(tmpCards); i++ {
tmpCards[i].Index = i
}
landlord.setCards(tmpCards)
}
}
/*
获取牌的类型(-1不是特殊底牌 0豹子 1同花 2顺子 3王炸 4同花顺)
*/
func (r *Room) getBaseCardsInfo() (cardsType int, multiple int) {
cardsType = -1
multiple = 1
var cards CardList = r.getBaseCards()
shunzi := []int{}
tonghua := map[int]bool{}
baozi := map[int]bool{}
wangzha := map[int]bool{}
for _, card := range cards {
if card.Priority < Priority_Two {
if len(shunzi) == 0 {
shunzi = append(shunzi, card.Priority)
} else {
if shunzi[len(shunzi)-1]+1 == card.Priority {
shunzi = append(shunzi, card.Priority)
}
}
}
tonghua[card.Suit] = true
baozi[card.Priority] = true
if card.Priority >= Priority_SKing {
wangzha[card.Priority] = true
}
}
isShunzi := len(shunzi) == 3
isTonghua := len(tonghua) == 1
isBaozi := len(baozi) == 1
isWangzha := len(wangzha) == 2
isTonghuaShun := isShunzi && isTonghua
if isTonghuaShun {
cardsType = 4
multiple = 4
} else if isWangzha && false {
cardsType = 3
multiple = 2
} else if isShunzi {
cardsType = 2
multiple = 2
} else if isTonghua {
cardsType = 1
multiple = 2
} else if isBaozi {
cardsType = 0
multiple = 2
}
return cardsType, multiple
}
//获取牌的ID列表
func (u *Room) getCardsID(cards []Card) *string {
buff := bytes.Buffer{}
for _, card := range cards {
buff.WriteString(fmt.Sprintf("%d$", card.ID))
}
cardsid := RemoveLastChar(buff)
return cardsid
}
//获取开赛后的状态
func (r *Room) getMatchingStatus() int {
return r.matchingStatus
}
//设置开赛后的状态
func (r *Room) setMatchingStatus(matchingStatus int) {
r.matchingStatus = matchingStatus
}
//获取裁判
func (r *Room) getJudgmentUser() *User {
return r.judgmentUser
}
//设置裁判
func (r *Room) setJudgmentUser(judgmentUser *User) {
r.judgmentUser = judgmentUser
}
//获取房间基数
func (r *Room) getCardinality() int {
return r.cardinality
}
//设置房间基数
func (r *Room) setCardinality(cardinality int) {
r.cardinality = cardinality
}
//获取房间底分
func (r *Room) getBaseScore() int {
return r.baseScore
}
//设置房间底分
func (r *Room) setBaseScore(baseScore int) {
r.baseScore = baseScore
}
/*
推送倍率
push:Multiple_Push,倍数
*/
func (r *Room) pushMultiple() {
multiple := strconv.Itoa(r.getRealityMultiple())
pushMessageToUsers("Multiple_Push", []string{multiple}, r.getUserIDs())
r.pushJudgment("Multiple_Push", multiple)
}
//获取房间倍数
func (r *Room) getMultiple() int {
return r.multiple
}
//设置房间倍数
func (r *Room) setMultiple(multiple int) {
r.multiple = multiple
}
//两倍房间倍数并推送
func (r *Room) doubleMultiple() {
r.setMultiple(r.getMultiple() * 2)
r.pushMultiple()
}
//三倍房间倍数并推送
func (r *Room) tripleMultiple() {
r.setMultiple(r.getMultiple() * 3)
r.pushMultiple()
}
//获取流局倍数
func (r *Room) getLiujuMultiple() int {
return r.liujuMultiple
}
//设置流局倍数
func (r *Room) setLiujuMultiple(liujuMultiple int) {
r.liujuMultiple = liujuMultiple
}
//获取房间真实倍数
func (r *Room) getRealityMultiple() int {
return r.getMultiple() * r.getLiujuMultiple()
}
//更新出牌的轮次
func (r *Room) updatePlayRound() int {
r.playRound += 1
return r.playRound
}
//获取出牌的轮次
func (r *Room) getPlayRound() int {
return r.playRound
}
//设置出牌的轮次
func (r *Room) setPlayRound(playRound int) {
r.playRound = playRound
}
//更新出牌的次数
func (r *Room) updatePlayTime() int {
r.playTime += 1
return r.playTime
}
//获取出牌的次数
func (r *Room) getPlayTime() int {
return r.playTime
}
//获取出牌的次数
func (r *Room) setPlayTime(playTime int) {
r.playTime = playTime
}
//获取剩余大王的数量
func (r *Room) getSurplusBKingCount() int {
return r.surplusBKingCount
}
//设置剩余大王的数量
func (r *Room) setSurplusBKingCount(v int) {
r.surplusBKingCount = v
}
//更新剩余大王的数量
func (r *Room) updateSurplusBKingCount() {
r.surplusBKingCount = r.surplusBKingCount - 1
}
//获取剩余小王的数量
func (r *Room) getSurplusSKingCount() int {
return r.surplusSKingCount
}
//设置剩余小王的数量
func (r *Room) setSurplusSKingCount(v int) {
r.surplusSKingCount = v
}
//更新剩余小王的数量
func (r *Room) updateSurplusSKingCount() {
r.surplusSKingCount = r.surplusSKingCount - 1
}
//获取剩余2的数量
func (r *Room) getSurplusTwoCount() int {
return r.surplusTwoCount
}
//设置剩余2的数量
func (r *Room) setSurplusTwoCount(v int) {
r.surplusTwoCount = v
}
//更新剩余2的数量
func (r *Room) updateSurplusTwoCount() {
r.surplusTwoCount = r.surplusTwoCount - 1
}
//获取设置牌权的命令
func (r *Room) getSetCtlMsg() []string {
return r.setCtlMsg
}
//设置牌权的内容,推送残局时候用
func (r *Room) setSetCtlMsg(setCtlMsg []string) {
r.setCtlMsg = setCtlMsg
}
//获取初始牌数量是否完整
func (r *Room) initCardCountIsIntegrity() bool {
return cardCount == perCapitaCardCount
}
//获取房间人数
func (r *Room) GetPCount() int {
return r.pcount
}
//更新房间人数
func (r *Room) updatePCount(v int) {
r.pcount = r.pcount + v
}
//获取房间观战人数
func (r *Room) GetIdlePCount() int {
return len(r.idleusers)
}
//根据index获取玩家
func (r *Room) getUserByIndex(index int) *User {
return r.users[index]
}
//获取房间入座人数
func (r *Room) getUserCount() int {
count := 0
for _, user := range r.users {
if user != nil {
count += 1
}
}
return count
}
//获取准备中的玩家数量
func (r *Room) getSetoutCount() int {
count := 0
for _, user := range r.users {
if user != nil {
if user.getStatus() == UserStatus_Setout {
count += 1
}
}
}
return count
}
/*
获取玩家UserID字符串集合
in:是否刷新
*/
func (r *Room) getUserIDs(args ...bool) []string {
if len(args) > 0 {
if args[0] {
r.userids = nil
}
}
if r.userids == nil {
r.userids = []string{}
for _, user := range r.users {
if user != nil {
r.userids = append(r.userids, *user.userid)
}
}
}
return r.userids
}
/*
获取未落座玩家UserID字符串集合
in:是否刷新
*/
func (r *Room) getIdleUserIDs(args ...bool) []string {
if len(args) > 0 {
if args[0] {
r.idleuserids = nil
}
}
if r.idleuserids == nil {
r.idleuserids = []string{}
for _, user := range r.idleusers {
if user != nil {
r.idleuserids = append(r.idleuserids, *user.getUserID())
}
}
}
return r.idleuserids
}
/*
获取(UserID+IdleUserID)字符串集合
in:是否刷新
*/
func (r *Room) getAllUserIDs() []string {
userids := r.getUserIDs(true)
idleuserids := r.getIdleUserIDs(true)
userids = InsertStringSlice(userids, idleuserids, len(userids))
return userids
}
//获取比赛类型
func (r *Room) GetMatchID() int {
return r.matchid
}
//设置比赛类型
func (r *Room) setMatchID(matchID int) {
r.matchid = matchID
}
//获取总轮次
func (r *Room) getInnings() int {
return r.innings
}
//设置当前轮次
func (r *Room) setInnings(innings int) {
r.innings = innings
}
//获取当前轮次
func (r *Room) getInning() int {
return r.inning
}
//设置当前轮次
func (r *Room) setInning(inning int) {
r.inning = inning
}
//获取常规赛局数
func (r *Room) getInningRegular() int {
return r.inningRegular
}
//设置常规赛局数
func (r *Room) setInningRegular(inningRegular int) {
r.inningRegular = inningRegular
}
//获取房间类型
func (r *Room) GetRoomType() int {
return r.roomtype
}
//设置房间类型
func (r *Room) setRoomType(roomType int) {
r.roomtype = roomType
}
//获取牌权玩家
func (r *Room) getControllerUser() *User {
return r.cuser
}
//设置牌权玩家
func (r *Room) setControllerUser(user *User) {
r.cuser = user
}
//获取当前牌
func (r *Room) getCurrentCards() []Card {
return r.cards
}
//设置当前牌
func (r *Room) setCurrentCards(cards []Card) {
r.cards = cards
}
//获取当前牌的玩家
func (r *Room) getCurrentCardsUser() *User {
return r.cardsuser
}
//设置当前牌的玩家
func (r *Room) setCurrentCardsUser(user *User) {
r.cardsuser = user
}
//获取房间状态
func (r *Room) GetRoomStatus() int {
return r.status
}
//设置房间状态
func (r *Room) SetRoomStatus(status int) {
r.status = status
}
//获取落座的所有玩家
func (r *Room) getUsers() []*User {
return r.users
}
//获取未落座的所有玩家
func (r *Room) getIdleUsers() map[string]*User {
return r.idleusers
}
/*
把房间中所有玩家在负载均衡服务器上的信息都删除
重置玩家
*/
func (r *Room) deleteUsersInfo() {
users := r.getUsers()
for _, user := range users {
if user != nil {
user.deleteUserInfo()
}
}
}
/*
重置房间中所有的玩家
*/
func (r *Room) resetUsers() {
users := r.getUsers()
for _, user := range users {
if user != nil {
user.reset()
}
}
}
//关闭房间
func (r *Room) close() {
RoomManage.removeRoom(r)
}
//给裁判提送信息
func (r *Room) pushJudgment(funcName string, message string) {
if judgmentUser := r.getJudgmentUser(); judgmentUser != nil {
judgmentUser.push(funcName, &message)
}
}
//设置所有人托管状态
func (r *Room) SetAllUsersTrusteeshipStatus(status bool) {
for _, user := range r.getUsers() {
if user != nil {
user.trusteeship = status
}
}
}
/*
所有选手端是否在线
*/
func (r *Room) AllUsersOnlinePush() {
for _, user := range r.getUsers() {
if user != nil {
status := 0
if user.getOnline() {
status = 1
}
r.pushJudgment("Online_Push", fmt.Sprintf("%s|%d", *user.getUserID(), status))
}
}
}
| r.getBaseCards()
cardsType, mu | identifier_body |
room.go | /*
房间
*/
package engine
import (
"bytes"
"fmt"
"sort"
"strconv"
"strings"
"time"
. "kelei.com/utils/common"
"kelei.com/utils/logger"
)
/*
游戏规则
默认版{
1. 出牌时间15秒
2. 自动出牌1次托管
}
录制版{
1. 出牌时间30秒
2. 自动出牌不托管
}
*/
const (
GameRule_Normal = iota //默认版
GameRule_Record //录制版
)
const (
Match_JD = iota //经典
Match_HYTW //好友同玩
Match_HXS //海选赛
)
const (
CARDMODE_RANDOM = iota //随机
CARDMODE_NOWASH //不洗牌
)
const (
GAMETYPE_REGULAR = iota //常规赛
GAMETYPE_DOUBLE //加倍赛
)
const (
HANDLETYPE_CALL = iota //叫地主
HANDLETYPE_RUSH //抢地主
)
const (
RoomType_Primary = iota //初级
RoomType_Intermediate //中级
RoomType_Advanced //高级
RoomType_Master //大师
RoomType_Tribute //进贡
)
const (
SetController_NewCycle = iota //新一轮
SetController_Press //压牌
SetController_Pass //要不了
SetController_NoChange //没有变化
SetController_Liuju //流局
)
const (
RoomStatus_Setout = iota //准备
RoomStatus_Deal //发牌(可明牌)
RoomStatus_Handle //叫地主、抢地主、加倍(可明牌)
RoomStatus_Liuju //流局
RoomStatus_Match //开赛
)
const (
MatchingStatus_Run = iota //进行中
MatchingStatus_Pause //暂停
MatchingStatus_Over //结束
)
const (
PlayWaitTime = 10 //要不起的等待时间
PlayWaitTime_Long = 20 //其它的等待时间
)
type Room struct {
id string //id
matchid int //比赛类型
roomtype int //房间类型
pcount int //人数
status int //房间状态
matchingStatus int //开赛后的状态
users []*User //玩家列表
userids []string //玩家UserID集合
idleusers map[string]*User //未落座玩家列表
idleuserids []string //未落座玩家UserID集合
cuser *User //牌权的玩家
cards []Card //当前牌
cardsuser *User //当前牌的玩家
playTime int //出牌的次数
playRound int //出牌的轮次
users_cards map[string]string //当前轮所有人的出牌信息
inning int //当前局数
innings int //总局数
inningRegular int //常规赛局数
setCtlMsg []string //设置牌权的内容,推送残局的时候用
surplusBKingCount int //剩余大王数量
surplusSKingCount int //剩余小王数量
surplusTwoCount int //剩余2数量
cardinality int //基数
baseScore int //底分
multiple int //倍数
liujuMultiple int //流局倍数
playWaitTime int //要不起等待时间
playWaitTime_Long int //其它等待时间
gameRule int //游戏规则
firstController *User //第一个出牌的人
judgmentUser *User //裁判
records []*string //所有的记录(回放用)
dealMode int //发牌模式
cardMode int //牌的模式(随机、不洗牌)
gameType int //游戏类型
baseCards []Card //底牌
landlord *User //地主
farmers []*User //农民
canHandleUser *User //当前可操作的玩家
canCallLandlordUser *User //可叫地主的玩家
landlordPlayCardCount int //地主出牌次数
farmerPlayCardCount int //农民出牌次数
councilTask *Task //本局任务
usersVideoIntegral []int //玩家积分列表
springStatus int //春天的状态(0无1春天2反春)
}
func (r *Room) GetRoomID() *string {
return &r.id
}
func (r *Room) SetRoomID(roomid string) {
r.id = roomid
}
//根据玩法规则配置房间
func (r *Room) configRoomByGameRule() {
r.playWaitTime = PlayWaitTime
r.playWaitTime_Long = PlayWaitTime_Long
r.setGameRule(r.GetGameRuleConfig())
if r.getGameRule() == GameRule_Record {
r.playWaitTime = 10
r.playWaitTime_Long = 20
}
}
//重置
func (r *Room) reset() {
r.userids = nil
r.setPlayTime(0)
r.setPlayRound(0)
r.setSurplusBKingCount(4)
r.setSurplusSKingCount(4)
r.setSurplusTwoCount(16)
r.setControllerUser(nil)
r.setCurrentCards([]Card{})
r.setCurrentCardsUser(nil)
r.setSetCtlMsg([]string{})
r.setBaseScore(0)
r.setMultiple(1)
r.setLandlord(nil)
r.setLandlordPlayCardCount(0)
r.setFarmerPlayCardCount(0)
for _, user := range r.getUsers() {
if user != nil {
user.resume()
}
}
r.users_cards = make(map[string]string, pcount)
}
//设置房间的基础信息
func (r *Room) setRoomBaseInfo() {
allRoomData := *r.getAllRoomData()
arrAllRoomData := strings.Split(allRoomData, "|")
for _, roomData := range arrAllRoomData {
arrRoomData_s := strings.Split(roomData, "$")
arrRoomData := StrArrToIntArr(arrRoomData_s)
roomType, _, multiple := arrRoomData[0], arrRoomData[1], arrRoomData[2]
if roomType == r.GetRoomType() {
r.setMultiple(multiple)
break
}
}
}
//是否赛前玩家操作中
func (r *Room) isHandling() bool {
if r.GetRoomStatus() == RoomStatus_Handle {
return true
}
return false
}
//是否正在比赛
func (r *Room) isMatching() bool {
if r.GetRoomStatus() == RoomStatus_Setout {
return false
}
return true
}
//获取游戏规则
func (r *Room) getGameRule() int {
return r.gameRule
}
//设置游戏规则
func (r *Room) setGameRule(gameRule int) {
r.gameRule = gameRule
}
//获取发牌模式
func (r *Room) getDealMode() int {
return r.dealMode
}
//设置发牌模式
func (r *Room) setDealMode(dealMode int) {
r.dealMode = dealMode
}
//获取牌的模式
func (r *Room) GetCardMode() int {
return r.cardMode
}
//设置牌的模式
func (r *Room) SetCardMode(cardMode int) {
r.cardMode = cardMode
}
//获取游戏模式
func (r *Room) getGameType() int {
return r.gameType
}
//设置游戏模式
func (r *Room) setGameType(gameType int) {
r.gameType = gameType
}
//获取底牌
func (r *Room) getBaseCards() []Card {
return r.baseCards
}
//设置底牌
func (r *Room) setBaseCards(baseCards []Card) {
r.baseCards = baseCards
}
//获取地主
func (r *Room) getLandlord() *User {
return r.landlord
}
//设置地主
func (r *Room) setLandlord(landlord *User) {
r.landlord = landlord
}
//获取农民
func (r *Room) getFarmers() []*User {
return r.farmers
}
//设置农民
func (r *Room) setFarmers(users []*User) {
r.farmers = users
}
//获取当前可操作的玩家
func (r *Room) getCanHandleUser() *User {
return r.canHandleUser
}
/*
设置当前可操作的玩家
push:Handle_Push,userid,操作类型,当前底分,赛制
des:操作类型(0叫地主 1抢地主)
赛制(0常规赛 1加倍赛)
*/
func (r *Room) setCanHandleUser(canHandleUser *User, handleType int) {
r.canHandleUser = canHandleUser
message := fmt.Sprintf("%s,%d,%d,%d", *canHandleUser.getUserID(), handleType, r.getBaseScore(), r.getGameType())
pushMessageToUsers("Handle_Push", []string{message}, r.getUserIDs())
r.pushJudgment("Handle_Push", message)
}
/*
设置当前可操作的玩家并设置倒计时
*/
func (r *Room) setCanHandleUserAndSetCountDown(canHandleUser *User, handleType int) {
canHandleUser.countDown_handle(time.Second * 10)
r.setCanHandleUser(canHandleUser, handleType)
}
//获取可以叫地主的玩家
func (r *Room) getCanCallLandlordUser() *User {
return r.canCallLandlordUser
}
//设置可以叫地主的玩家
func (r *Room) setCanCallLandlordUser(canCallLandlordUser *User) {
r.canCallLandlordUser = canCallLandlordUser
}
//获取地主出牌次数
func (r *Room) getLandlordPlayCardCount() int {
return r.landlordPlayCardCount
}
//设置地主出牌次数
func (r *Room) setLandlordPlayCardCount(count int) {
r.landlordPlayCardCount = count
}
//累加地主出牌次数
func (r *Room) updteLandlordPlayCardCount() {
r.landlordPlayCardCount += 1
}
//获取农民出牌次数
func (r *Room) getFarmerPlayCardCount() int {
return r.farmerPlayCardCount
}
//设置农民出牌次数
func (r *Room) setFarmerPlayCardCount(count int) {
r.farmerPlayCardCount = count
}
//累加农民出牌次数
func (r *Room) updteFarmerPlayCardCount() {
r.farmerPlayCardCount += 1
}
//获取本局任务
func (r *Room) getCouncilTask() *Task {
return r.councilTask
}
//设置本局任务
func (r *Room) setCouncilTask(councilTask *Task) {
r.councilTask = councilTask
}
//获取所有玩家的积分
func (r *Room) getUsersVideoIntegral() []int {
return r.usersVideoIntegral
}
//获取春天的状态
func (r *Room) getSpringStatus() int {
return r.springStatus
}
//设置春天的状态
func (r *Room) setSpringStatus(springStatus int) {
r.springStatus = springStatus
}
//根据userid获取玩家积分
func (r *Room) getUserVideoIntegral(user *User) int {
userIndex := user.getIndex()
return r.getUsersVideoIntegral()[userIndex]
}
//根据userid设置玩家积分
func (r *Room) setUserVideoIntegral(user *User, videoIntegral int) {
userIndex := user.getIndex()
r.getUsersVideoIntegral()[userIndex] = videoIntegral
}
//重开
func (r *Room) reStart() {
r.resetUsers()
r.closeUserCountDown()
r.SetRoomStatus(RoomStatus_Setout)
r.reset()
}
//玩家转变成地主
func (r *Room) userTurnLandlord(user *User) {
logger.Debugf("%s 成为地主", *user.getUID())
user.setLandlord(true)
r.setLandlord(user)
farmers := []*User{}
for _, u := range r.getUsers() {
if u != user {
farmers = append(farmers, u)
}
}
r.setFarmers(farmers)
r.addCardsToLandlord()
r.showBaseCards(nil)
r.openDouble()
}
/*
亮底牌
push:BaseCards_Push,地主userid,cardid$cardid$cardid,底牌类型,底牌倍数,是否加入牌中
*/
func (r *Room) showBaseCards(user *User) {
if r.getLandlord() == nil {
return
}
// r.setBaseCards([]Card{Card{Suit: 1, Priority: 1}, Card{Suit: 1, Priority: 2}, Card{Suit: 1, Priority: 3}})
cards := r.getBaseCards()
cardsType, multiple := r.getBaseCardsInfo()
userids := []string{}
addToCards := 0
if user == nil { //只执行一次(地主出现的时候)
//根据底牌加倍
if multiple > 1 {
r.setMultiple(r.getMultiple() * multiple)
r.pushMultiple()
}
userids = r.getUserIDs()
addToCards = 1
} else { //短线重连进来的
userids = []string{*user.getUserID()}
}
message := fmt.Sprintf("%s,%s,%d,%d,%d", *r.getLandlord().getUserID(), *r.getCardsID(cards), cardsType, multiple, addToCards)
if user == nil {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
r.pushJudgment("BaseCards_Push", message)
} else {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
}
}
//将底牌放入地主牌面中
func (r *Room) addCardsToLandlord() {
cards := r.getBaseCards()
landlord := r.getLandlord()
if landlord != nil {
var tmpCards CardList
tmpCards = landlord.getCards()
tmpCards = append(tmpCards, cards...)
sort.Sort(tmpCards)
for i := 0; i < len(tmpCards); i++ {
tmpCards[i].Index = i
}
landlord.setCards(tmpCards)
}
}
/*
获取牌的类型(-1不是特殊底牌 0豹子 1同花 2顺子 3王炸 4同花顺)
*/
func (r *Room) getBaseCardsInfo() (cardsType int, multiple int) {
cardsType = -1
multiple = 1
var cards CardList = r.getBaseCards()
shunzi := []int{}
tonghua := map[int]bool{}
baozi := map[int]bool{}
wangzha := map[int]bool{}
for _, card := range cards {
if card.Priority < Priority_Two {
if len(shunzi) == 0 {
shunzi = append(shunzi, card.Priority)
} else {
if shunzi[len(shunzi)-1]+1 == card.Priority {
shunzi = append(shunzi, card.Priority)
}
}
}
tonghua[card.Suit] = true
baozi[card.Priority] = true
if card.Priority >= Priority_SKing {
wangzha[card.Priority] = true
}
}
isShunzi := len(shunzi) == 3
isTonghua := len(tonghua) == 1
isBaozi := len(baozi) == 1
isWangzha := len(wangzha) == 2
isTonghuaShun := isShunzi && isTonghua
if isTonghuaShun {
cardsType = 4
multiple = 4
} else if isWangzha && false {
cardsType = 3
multiple = 2
} else if isShunzi {
cardsType = 2
multiple = 2
} else if isTonghua {
cardsType = 1
multiple = 2
} else if isBaozi {
cardsType = 0
multiple = 2
}
return cardsType, multiple
}
//获取牌的ID列表
func (u *Room) getCardsID(cards []Card) *string {
buff := bytes.Buffer{}
for _, card := range cards {
buff.WriteString(fmt.Sprintf("%d$", card.ID))
}
cardsid := RemoveLastChar(buff)
return cardsid
}
//获取开赛后的状态
func (r *Room) getMatchingStatus() int {
return r.matchingStatus
}
//设置开赛后的状态
func (r *Room) setMatchingStatus(matchingStatus int) {
r.matchingStatus = matchingStatus
}
//获取裁判
func (r *Room) getJudgmentUser() *User {
return r.judgmentUser
}
//设置裁判
func (r *Room) setJudgmentUser(judgmentUser *User) {
r.judgmentUser = judgmentUser
}
//获取房间基数
func (r *Room) getCardinality() int {
return r.cardinality
}
//设置房间基数
func (r *Room) setCardinality(cardinality int) {
r.cardinality = cardinality
}
//获取房间底分
func (r *Room) getBaseScore() int {
return r.baseScore
}
//设置房间底分
func (r *Room) setBaseScore(baseScore int) {
r.baseScore = baseScore
}
/*
推送倍率
push:Multiple_Push,倍数
*/
func (r *Room) pushMultiple() {
multiple := strconv.Itoa(r.getRealityMultiple())
pushMessageToUsers("Multiple_Push", []string{multiple}, r.getUserIDs())
r.pushJudgment("Multiple_Push", multiple)
}
//获取房间倍数
func (r *Room) getMultiple() int {
return r.multiple
}
//设置房间倍数
func (r *Room) setMultiple(multiple int) {
r.multiple = multiple
}
//两倍房间倍数并推送
func (r *Room) doubleMultiple() {
r.setMultiple(r.getMultiple() * 2)
r.pushMultiple()
}
//三倍房间倍数并推送
func (r *Room) tripleMultiple() {
r.setMultiple(r.getMultiple() * 3)
r.pushMultiple()
}
//获取流局倍数
func (r *Room) getLiujuMultiple() int {
return r.liujuMultiple
}
//设置流局倍数
func (r *Room) setLiujuMultiple(liujuMultiple int) {
r.liujuMultiple = liujuMultiple
}
//获取房间真实倍数
func (r *Room) getRealityMultiple() int {
return r.getMultiple() * r.getLiujuMultiple()
}
//更新出牌的轮次
func (r *Room) updatePlayRound() int {
r.playRound += 1
return r.playRound
}
//获取出牌的轮次
func (r *Room) getPlayRound() int {
return r.playRound
}
//设置出牌的轮次
func (r *Room) setPlayRound(playRound int) {
r.playRound = playRound
}
//更新出牌的次数
func (r *Room) updatePlayTime() int {
r.playTime += 1
return r.playTime
}
//获取出牌的次数
func (r *Room) getPlayTime() int {
return r.playTime
}
//获取出牌的次数
func (r *Room) setPlayTime(playTime int) {
r.playTime = playTime
}
//获取剩余大王的数量
func (r *Room) getSurplusBKingCount() int {
return r.surplusBKingCount
}
//设置剩余大王的数量
func (r *Room) setSurplusBKingCount(v int) {
r.surplusBKingCount = v
}
//更新剩余大王的数量
func (r *Room) updateSurplusBKingCount() {
r.surplusBKingCount = r.surplusBKingCount - 1
}
//获取剩余小王的数量
func (r *Room) getSurplusSKingCount() int {
return r.surplusSKingCount
}
//设置剩余小王的数量
func (r *Room) setSurplusSKingCount(v int) {
r.surplusSKingCount = v
}
//更新剩余小王的数量
func (r *Room) updateSurplusSKingCount() {
r.surplusSKingCount = r.surplusSKingCount - 1
}
//获取剩余2的数量
func (r *Room) getSurplusTwoCount() int {
return r.surplusTwoCount
}
//设置剩余2的数量
func (r *Room) setSurplusTwoCount(v int) {
r.surplusTwoCount = v
}
//更新剩余2的数量
func (r *Room) updateSurplusTwoCount() {
r.surplusTwoCount = r.surplusTwoCount - 1
}
//获取设置牌权的命令
func (r *Room) getSetCtlMsg() []string {
return r.setCtlMsg
}
//设置牌权的内容,推送残局时候用
func (r *Room) setSetCtlMsg(setCtlMsg []string) {
r.setCtlMsg = setCtlMsg
}
//获取初始牌数量是否完整
func (r *Room) initCardCountIsIntegrity() bool {
return cardCount == perCapitaCardCount
}
//获取房间人数
func (r *Room) GetPCount() int {
return r.pcount
}
//更新房间人数
func (r *Room) updatePCount(v int) {
r.pcount = r.pcount + v
}
//获取房间观战人数
func (r *Room) GetIdlePCount() int {
return len(r.idleusers)
}
//根据index获取玩家
func (r *Room) getUserByIndex(index int) *User {
return r.users[index]
}
//获取房间入座人数
func (r *Room) getUserCount() int {
count := 0
for _, user := range r.users {
if user != nil {
count += 1
}
}
return count
}
//获取准备中的玩家数量
func (r *Room) getSetoutCount() int {
count := 0
for _, user := range r.users {
if user != nil {
if user.getStatus() == UserStatus_Setout {
count += 1
}
}
}
return count
}
/*
获取玩家UserID字符串集合
in:是否刷新
*/
func (r *Room) getUserIDs(args ...bool) []string {
if len(args) > 0 {
if args[0] {
| }
}
if r.userids == nil {
r.userids = []string{}
for _, user := range r.users {
if user != nil {
r.userids = append(r.userids, *user.userid)
}
}
}
return r.userids
}
/*
获取未落座玩家UserID字符串集合
in:是否刷新
*/
func (r *Room) getIdleUserIDs(args ...bool) []string {
if len(args) > 0 {
if args[0] {
r.idleuserids = nil
}
}
if r.idleuserids == nil {
r.idleuserids = []string{}
for _, user := range r.idleusers {
if user != nil {
r.idleuserids = append(r.idleuserids, *user.getUserID())
}
}
}
return r.idleuserids
}
/*
获取(UserID+IdleUserID)字符串集合
in:是否刷新
*/
func (r *Room) getAllUserIDs() []string {
userids := r.getUserIDs(true)
idleuserids := r.getIdleUserIDs(true)
userids = InsertStringSlice(userids, idleuserids, len(userids))
return userids
}
//获取比赛类型
func (r *Room) GetMatchID() int {
return r.matchid
}
//设置比赛类型
func (r *Room) setMatchID(matchID int) {
r.matchid = matchID
}
//获取总轮次
func (r *Room) getInnings() int {
return r.innings
}
//设置当前轮次
func (r *Room) setInnings(innings int) {
r.innings = innings
}
//获取当前轮次
func (r *Room) getInning() int {
return r.inning
}
//设置当前轮次
func (r *Room) setInning(inning int) {
r.inning = inning
}
//获取常规赛局数
func (r *Room) getInningRegular() int {
return r.inningRegular
}
//设置常规赛局数
func (r *Room) setInningRegular(inningRegular int) {
r.inningRegular = inningRegular
}
//获取房间类型
func (r *Room) GetRoomType() int {
return r.roomtype
}
//设置房间类型
func (r *Room) setRoomType(roomType int) {
r.roomtype = roomType
}
//获取牌权玩家
func (r *Room) getControllerUser() *User {
return r.cuser
}
//设置牌权玩家
func (r *Room) setControllerUser(user *User) {
r.cuser = user
}
//获取当前牌
func (r *Room) getCurrentCards() []Card {
return r.cards
}
//设置当前牌
func (r *Room) setCurrentCards(cards []Card) {
r.cards = cards
}
//获取当前牌的玩家
func (r *Room) getCurrentCardsUser() *User {
return r.cardsuser
}
//设置当前牌的玩家
func (r *Room) setCurrentCardsUser(user *User) {
r.cardsuser = user
}
//获取房间状态
func (r *Room) GetRoomStatus() int {
return r.status
}
//设置房间状态
func (r *Room) SetRoomStatus(status int) {
r.status = status
}
//获取落座的所有玩家
func (r *Room) getUsers() []*User {
return r.users
}
//获取未落座的所有玩家
func (r *Room) getIdleUsers() map[string]*User {
return r.idleusers
}
/*
把房间中所有玩家在负载均衡服务器上的信息都删除
重置玩家
*/
func (r *Room) deleteUsersInfo() {
users := r.getUsers()
for _, user := range users {
if user != nil {
user.deleteUserInfo()
}
}
}
/*
重置房间中所有的玩家
*/
func (r *Room) resetUsers() {
users := r.getUsers()
for _, user := range users {
if user != nil {
user.reset()
}
}
}
//关闭房间
func (r *Room) close() {
RoomManage.removeRoom(r)
}
//给裁判提送信息
func (r *Room) pushJudgment(funcName string, message string) {
if judgmentUser := r.getJudgmentUser(); judgmentUser != nil {
judgmentUser.push(funcName, &message)
}
}
//设置所有人托管状态
func (r *Room) SetAllUsersTrusteeshipStatus(status bool) {
for _, user := range r.getUsers() {
if user != nil {
user.trusteeship = status
}
}
}
/*
所有选手端是否在线
*/
func (r *Room) AllUsersOnlinePush() {
for _, user := range r.getUsers() {
if user != nil {
status := 0
if user.getOnline() {
status = 1
}
r.pushJudgment("Online_Push", fmt.Sprintf("%s|%d", *user.getUserID(), status))
}
}
}
| r.userids = nil
| identifier_name |
room.go | /*
房间
*/
package engine
import (
"bytes"
"fmt"
"sort"
"strconv"
"strings"
"time"
. "kelei.com/utils/common"
"kelei.com/utils/logger"
)
/*
游戏规则
默认版{
1. 出牌时间15秒
2. 自动出牌1次托管
}
录制版{
1. 出牌时间30秒
2. 自动出牌不托管
}
*/
const (
GameRule_Normal = iota //默认版
GameRule_Record //录制版
)
const (
Match_JD = iota //经典
Match_HYTW //好友同玩
Match_HXS //海选赛
)
const (
CARDMODE_RANDOM = iota //随机
CARDMODE_NOWASH //不洗牌
)
const (
GAMETYPE_REGULAR = iota //常规赛
GAMETYPE_DOUBLE //加倍赛
)
const (
HANDLETYPE_CALL = iota //叫地主
HANDLETYPE_RUSH //抢地主
)
const (
RoomType_Primary = iota //初级
RoomType_Intermediate //中级
RoomType_Advanced //高级
RoomType_Master //大师
RoomType_Tribute //进贡
)
const (
SetController_NewCycle = iota //新一轮
SetController_Press //压牌
SetController_Pass //要不了
SetController_NoChange //没有变化
SetController_Liuju //流局
)
const (
RoomStatus_Setout = iota //准备
RoomStatus_Deal //发牌(可明牌)
RoomStatus_Handle //叫地主、抢地主、加倍(可明牌)
RoomStatus_Liuju //流局
RoomStatus_Match //开赛
)
const (
MatchingStatus_Run = iota //进行中
MatchingStatus_Pause //暂停
MatchingStatus_Over //结束
)
const (
PlayWaitTime = 10 //要不起的等待时间
PlayWaitTime_Long = 20 //其它的等待时间
)
type Room struct {
id string //id
matchid int //比赛类型
roomtype int //房间类型
pcount int //人数
status int //房间状态
matchingStatus int //开赛后的状态
users []*User //玩家列表
userids []string //玩家UserID集合
idleusers map[string]*User //未落座玩家列表
idleuserids []string //未落座玩家UserID集合
cuser *User //牌权的玩家
cards []Card //当前牌
cardsuser *User //当前牌的玩家
playTime int //出牌的次数
playRound int //出牌的轮次
users_cards map[string]string //当前轮所有人的出牌信息
inning int //当前局数
innings int //总局数
inningRegular int //常规赛局数
setCtlMsg []string //设置牌权的内容,推送残局的时候用
surplusBKingCount int //剩余大王数量
surplusSKingCount int //剩余小王数量
surplusTwoCount int //剩余2数量
cardinality int //基数
baseScore int //底分
multiple int //倍数
liujuMultiple int //流局倍数
playWaitTime int //要不起等待时间
playWaitTime_Long int //其它等待时间
gameRule int //游戏规则
firstController *User //第一个出牌的人
judgmentUser *User //裁判
records []*string //所有的记录(回放用)
dealMode int //发牌模式
cardMode int //牌的模式(随机、不洗牌)
gameType int //游戏类型
baseCards []Card //底牌
landlord *User //地主
farmers []*User //农民
canHandleUser *User //当前可操作的玩家
canCallLandlordUser *User //可叫地主的玩家
landlordPlayCardCount int //地主出牌次数
farmerPlayCardCount int //农民出牌次数
councilTask *Task //本局任务
usersVideoIntegral []int //玩家积分列表
springStatus int //春天的状态(0无1春天2反春)
}
func (r *Room) GetRoomID() *string {
return &r.id
}
func (r *Room) SetRoomID(roomid string) {
r.id = roomid
}
//根据玩法规则配置房间
func (r *Room) configRoomByGameRule() {
r.playWaitTime = PlayWaitTime
r.playWaitTime_Long = PlayWaitTime_Long
r.setGameRule(r.GetGameRuleConfig())
if r.getGameRule() == GameRule_Record {
r.playWaitTime = 10
r.playWaitTime_Long = 20
}
}
//重置
func (r *Room) reset() {
r.userids = nil
r.setPlayTime(0)
r.setPlayRound(0)
r.setSurplusBKingCount(4)
r.setSurplusSKingCount(4)
r.setSurplusTwoCount(16)
r.setControllerUser(nil)
r.setCurrentCards([]Card{})
r.setCurrentCardsUser(nil)
r.setSetCtlMsg([]string{})
r.setBaseScore(0)
r.setMultiple(1)
r.setLandlord(nil)
r.setLandlordPlayCardCount(0)
r.setFarmerPlayCardCount(0)
for _, user := range r.getUsers() {
if user != nil {
user.resume()
}
}
r.users_cards = make(map[string]string, pcount)
}
//设置房间的基础信息
func (r *Room) setRoomBaseInfo() {
allRoomData := *r.getAllRoomData()
arrAllRoomData := strings.Split(allRoomData, "|")
for _, roomData := range arrAllRoomData {
arrRoomData_s := strings.Split(roomData, "$")
arrRoomData := StrArrToIntArr(arrRoomData_s)
roomType, _, multiple := arrRoomData[0], arrRoomData[1], arrRoomData[2]
if roomType == r.GetRoomType() {
r.setMultiple(multiple)
break
}
}
}
//是否赛前玩家操作中
func (r *Room) isHandling() bool {
if r.GetRoomStatus() == RoomStatus_Handle {
return true
}
return false
}
//是否正在比赛
func (r *Room) isMatching() bool {
if r.GetRoomStatus() == RoomStatus_Setout {
return false
}
return true
}
//获取游戏规则
func (r *Room) getGameRule() int {
return r.gameRule
}
//设置游戏规则
func (r *Room) setGameRule(gameRule int) {
r.gameRule = gameRule
}
//获取发牌模式
func (r *Room) getDealMode() int {
return r.dealMode
}
//设置发牌模式
func (r *Room) setDealMode(dealMode int) {
r.dealMode = dealMode
}
//获取牌的模式
func (r *Room) GetCardMode() int {
return r.cardMode
}
//设置牌的模式
func (r *Room) SetCardMode(cardMode int) {
r.cardMode = cardMode
}
//获取游戏模式
func (r *Room) getGameType() int {
return r.gameType
}
//设置游戏模式
func (r *Room) setGameType(gameType int) {
r.gameType = gameType
}
//获取底牌
func (r *Room) getBaseCards() []Card {
return r.baseCards
}
//设置底牌
func (r *Room) setBaseCards(baseCards []Card) {
r.baseCards = baseCards
}
//获取地主
func (r *Room) getLandlord() *User {
return r.landlord
}
//设置地主
func (r *Room) setLandlord(landlord *User) {
r.landlord = landlord
}
//获取农民
func (r *Room) getFarmers() []*User {
return r.farmers
}
//设置农民
func (r *Room) setFarmers(users []*User) {
r.farmers = users
}
//获取当前可操作的玩家
func (r *Room) getCanHandleUser() *User {
return r.canHandleUser
}
/*
设置当前可操作的玩家
push:Handle_Push,userid,操作类型,当前底分,赛制
des:操作类型(0叫地主 1抢地主)
赛制(0常规赛 1加倍赛)
*/
func (r *Room) setCanHandleUser(canHandleUser *User, handleType int) {
r.canHandleUser = canHandleUser
message := fmt.Sprintf("%s,%d,%d,%d", *canHandleUser.getUserID(), handleType, r.getBaseScore(), r.getGameType())
pushMessageToUsers("Handle_Push", []string{message}, r.getUserIDs())
r.pushJudgment("Handle_Push", message)
}
/*
设置当前可操作的玩家并设置倒计时
*/
func (r *Room) setCanHandleUserAndSetCountDown(canHandleUser *User, handleType int) {
canHandleUser.countDown_handle(time.Second * 10)
r.setCanHandleUser(canHandleUser, handleType)
}
//获取可以叫地主的玩家
func (r *Room) getCanCallLandlordUser() *User {
return r.canCallLandlordUser
}
//设置可以叫地主的玩家
func (r *Room) setCanCallLandlordUser(canCallLandlordUser *User) {
r.canCallLandlordUser = canCallLandlordUser
}
//获取地主出牌次数
func (r *Room) getLandlordPlayCardCount() int {
return r.landlordPlayCardCount
}
//设置地主出牌次数
func (r *Room) setLandlordPlayCardCount(count int) {
r.landlordPlayCardCount = count
}
//累加地主出牌次数
func (r *Room) updteLandlordPlayCardCount() {
r.landlordPlayCardCount += 1
}
//获取农民出牌次数
func (r *Room) getFarmerPlayCardCount() int {
return r.farmerPlayCardCount
}
//设置农民出牌次数
func (r *Room) setFarmerPlayCardCount(count int) {
r.farmerPlayCardCount = count
}
//累加农民出牌次数
func (r *Room) updteFarmerPlayCardCount() {
r.farmerPlayCardCount += 1
}
//获取本局任务
func (r *Room) getCouncilTask() *Task {
return r.councilTask
}
//设置本局任务
func (r *Room) setCouncilTask(councilTask *Task) {
r.councilTask = councilTask
}
//获取所有玩家的积分
func (r *Room) getUsersVideoIntegral() []int {
return r.usersVideoIntegral
}
//获取春天的状态
func (r *Room) getSpringStatus() int {
return r.springStatus
}
//设置春天的状态
func (r *Room) setSpringStatus(springStatus int) {
r.springStatus = springStatus
}
//根据userid获取玩家积分
func (r *Room) getUserVideoIntegral(user *User) int {
userIndex := user.getIndex()
return r.getUsersVideoIntegral()[userIndex]
}
//根据userid设置玩家积分
func (r *Room) setUserVideoIntegral(user *User, videoIntegral int) {
userIndex := user.getIndex()
r.getUsersVideoIntegral()[userIndex] = videoIntegral
}
//重开
func (r *Room) reStart() {
r.resetUsers()
r.closeUserCountDown()
r.SetRoomStatus(RoomStatus_Setout)
r.reset()
}
//玩家转变成地主
func (r *Room) userTurnLandlord(user *User) {
logger.Debugf("%s 成为地主", *user.getUID())
user.setLandlord(true)
r.setLandlord(user)
farmers := []*User{}
for _, u := range r.getUsers() {
if u != user {
farmers = append(farmers, u)
}
}
r.setFarmers(farmers)
r.addCardsToLandlord()
r.showBaseCards(nil)
r.openDouble()
}
/*
亮底牌
push:BaseCards_Push,地主userid,cardid$cardid$cardid,底牌类型,底牌倍数,是否加入牌中
*/
func (r *Room) showBaseCards(user *User) {
if r.getLandlord() == nil {
return
}
// r.setBaseCards([]Card{Card{Suit: 1, Priority: 1}, Card{Suit: 1, Priority: 2}, Card{Suit: 1, Priority: 3}})
cards := r.getBaseCards()
cardsType, multiple := r.getBaseCardsInfo()
userids := []string{}
addToCards := 0
if user == nil { //只执行一次(地主出现的时候)
//根据底牌加倍
if multiple > 1 {
r.setMultiple(r.getMultiple() * multiple)
r.pushMultiple()
}
userids = r.getUserIDs()
addToCards = 1
} else { //短线重连进来的
userids = []string{*user.getUserID()}
}
message := fmt.Sprintf("%s,%s,%d,%d,%d", *r.getLandlord().getUserID(), *r.getCardsID(cards), cardsType, multiple, addToCards)
if user == nil {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
r.pushJudgment("BaseCards_Push", message)
} else {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
}
}
//将底牌放入地主牌面中
func (r *Room) addCardsToLandlord() {
cards := r.getBaseCards()
landlord := r.getLandlord()
if landlord != nil {
var tmpCards CardList
tmpCards = landlord.getCards()
tmpCards = append(tmpCards, cards...)
sort.Sort(tmpCards)
for i := 0; i < len(tmpCards); i++ {
tmpCards[i].Index = i
}
landlord.setCards(tmpCards)
}
}
/*
获取牌的类型(-1不是特殊底牌 0豹子 1同花 2顺子 3王炸 4同花顺)
*/
func (r *Room) getBaseCardsInfo() (cardsType int, multiple int) {
cardsType = -1
multiple = 1
var cards CardList = r.getBaseCards()
shunzi := []int{}
tonghua := map[int]bool{}
baozi := map[int]bool{}
wangzha := map[int]bool{}
for _, card := range cards {
if card.Priority < Priority_Two {
if len(shunzi) == 0 {
shunzi = append(shunzi, card.Priority)
} else {
if shunzi[len(shunzi)-1]+1 == card.Priority {
shunzi = append(shunzi, card.Priority)
}
}
}
tonghua[card.Suit] = true
baozi[card.Priority] = true
if card.Priority >= Priority_SKing {
wangzha[card.Priority] = true
}
}
isShunzi := len(shunzi) == 3
isTonghua := len(tonghua) == 1
isBaozi := len(baozi) == 1
isWangzha := len(wangzha) == 2
isTonghuaShun := isShunzi && isTonghua
if isTonghuaShun {
cardsType = 4
multiple = 4
} else if isWangzha && false {
cardsType = 3
multiple = 2
} else if isShunzi {
cardsType = 2
multiple = 2
} else if isTonghua {
cardsType = 1
multiple = 2
} else if isBaozi {
cardsType = 0
multiple = 2
}
return cardsType, multiple
}
//获取牌的ID列表
func (u *Room) getCardsID(cards []Card) *string {
buff := bytes.Buffer{}
for _, card := range cards {
buff.WriteString(fmt.Sprintf("%d$", card.ID))
}
cardsid := RemoveLastChar(buff)
return cardsid
}
//获取开赛后的状态
func (r *Room) getMatchingStatus() int {
return r.matchingStatus
}
//设置开赛后的状态
func (r *Room) setMatchingStatus(matchingStatus int) {
r.matchingStatus = matchingStatus
}
//获取裁判
func (r *Room) getJudgmentUser() *User {
return r.judgmentUser
}
//设置裁判
func (r *Room) setJudgmentUser(judgmentUser *User) {
r.judgmentUser = judgmentUser
}
//获取房间基数
func (r *Room) getCardinality() int {
return r.cardinality
}
//设置房间基数
func (r *Room) setCardinality(cardinality int) {
r.cardinality = cardinality
}
//获取房间底分
func (r *Room) getBaseScore() int {
return r.baseScore
}
//设置房间底分
func (r *Room) setBaseScore(baseScore int) {
r.baseScore = baseScore
}
/*
推送倍率
push:Multiple_Push,倍数
*/
func (r *Room) pushMultiple() {
multiple := strconv.Itoa(r.getRealityMultiple())
pus | ing{multiple}, r.getUserIDs())
r.pushJudgment("Multiple_Push", multiple)
}
//获取房间倍数
func (r *Room) getMultiple() int {
return r.multiple
}
//设置房间倍数
func (r *Room) setMultiple(multiple int) {
r.multiple = multiple
}
//两倍房间倍数并推送
func (r *Room) doubleMultiple() {
r.setMultiple(r.getMultiple() * 2)
r.pushMultiple()
}
//三倍房间倍数并推送
func (r *Room) tripleMultiple() {
r.setMultiple(r.getMultiple() * 3)
r.pushMultiple()
}
//获取流局倍数
func (r *Room) getLiujuMultiple() int {
return r.liujuMultiple
}
//设置流局倍数
func (r *Room) setLiujuMultiple(liujuMultiple int) {
r.liujuMultiple = liujuMultiple
}
//获取房间真实倍数
func (r *Room) getRealityMultiple() int {
return r.getMultiple() * r.getLiujuMultiple()
}
//更新出牌的轮次
func (r *Room) updatePlayRound() int {
r.playRound += 1
return r.playRound
}
//获取出牌的轮次
func (r *Room) getPlayRound() int {
return r.playRound
}
//设置出牌的轮次
func (r *Room) setPlayRound(playRound int) {
r.playRound = playRound
}
//更新出牌的次数
func (r *Room) updatePlayTime() int {
r.playTime += 1
return r.playTime
}
//获取出牌的次数
func (r *Room) getPlayTime() int {
return r.playTime
}
//获取出牌的次数
func (r *Room) setPlayTime(playTime int) {
r.playTime = playTime
}
//获取剩余大王的数量
func (r *Room) getSurplusBKingCount() int {
return r.surplusBKingCount
}
//设置剩余大王的数量
func (r *Room) setSurplusBKingCount(v int) {
r.surplusBKingCount = v
}
//更新剩余大王的数量
func (r *Room) updateSurplusBKingCount() {
r.surplusBKingCount = r.surplusBKingCount - 1
}
//获取剩余小王的数量
func (r *Room) getSurplusSKingCount() int {
return r.surplusSKingCount
}
//设置剩余小王的数量
func (r *Room) setSurplusSKingCount(v int) {
r.surplusSKingCount = v
}
//更新剩余小王的数量
func (r *Room) updateSurplusSKingCount() {
r.surplusSKingCount = r.surplusSKingCount - 1
}
//获取剩余2的数量
func (r *Room) getSurplusTwoCount() int {
return r.surplusTwoCount
}
//设置剩余2的数量
func (r *Room) setSurplusTwoCount(v int) {
r.surplusTwoCount = v
}
//更新剩余2的数量
func (r *Room) updateSurplusTwoCount() {
r.surplusTwoCount = r.surplusTwoCount - 1
}
//获取设置牌权的命令
func (r *Room) getSetCtlMsg() []string {
return r.setCtlMsg
}
//设置牌权的内容,推送残局时候用
func (r *Room) setSetCtlMsg(setCtlMsg []string) {
r.setCtlMsg = setCtlMsg
}
//获取初始牌数量是否完整
func (r *Room) initCardCountIsIntegrity() bool {
return cardCount == perCapitaCardCount
}
//获取房间人数
func (r *Room) GetPCount() int {
return r.pcount
}
//更新房间人数
func (r *Room) updatePCount(v int) {
r.pcount = r.pcount + v
}
//获取房间观战人数
func (r *Room) GetIdlePCount() int {
return len(r.idleusers)
}
//根据index获取玩家
func (r *Room) getUserByIndex(index int) *User {
return r.users[index]
}
//获取房间入座人数
func (r *Room) getUserCount() int {
count := 0
for _, user := range r.users {
if user != nil {
count += 1
}
}
return count
}
//获取准备中的玩家数量
func (r *Room) getSetoutCount() int {
count := 0
for _, user := range r.users {
if user != nil {
if user.getStatus() == UserStatus_Setout {
count += 1
}
}
}
return count
}
/*
获取玩家UserID字符串集合
in:是否刷新
*/
func (r *Room) getUserIDs(args ...bool) []string {
if len(args) > 0 {
if args[0] {
r.userids = nil
}
}
if r.userids == nil {
r.userids = []string{}
for _, user := range r.users {
if user != nil {
r.userids = append(r.userids, *user.userid)
}
}
}
return r.userids
}
/*
获取未落座玩家UserID字符串集合
in:是否刷新
*/
func (r *Room) getIdleUserIDs(args ...bool) []string {
if len(args) > 0 {
if args[0] {
r.idleuserids = nil
}
}
if r.idleuserids == nil {
r.idleuserids = []string{}
for _, user := range r.idleusers {
if user != nil {
r.idleuserids = append(r.idleuserids, *user.getUserID())
}
}
}
return r.idleuserids
}
/*
获取(UserID+IdleUserID)字符串集合
in:是否刷新
*/
func (r *Room) getAllUserIDs() []string {
userids := r.getUserIDs(true)
idleuserids := r.getIdleUserIDs(true)
userids = InsertStringSlice(userids, idleuserids, len(userids))
return userids
}
//获取比赛类型
func (r *Room) GetMatchID() int {
return r.matchid
}
//设置比赛类型
func (r *Room) setMatchID(matchID int) {
r.matchid = matchID
}
//获取总轮次
func (r *Room) getInnings() int {
return r.innings
}
//设置当前轮次
func (r *Room) setInnings(innings int) {
r.innings = innings
}
//获取当前轮次
func (r *Room) getInning() int {
return r.inning
}
//设置当前轮次
func (r *Room) setInning(inning int) {
r.inning = inning
}
//获取常规赛局数
func (r *Room) getInningRegular() int {
return r.inningRegular
}
//设置常规赛局数
func (r *Room) setInningRegular(inningRegular int) {
r.inningRegular = inningRegular
}
//获取房间类型
func (r *Room) GetRoomType() int {
return r.roomtype
}
//设置房间类型
func (r *Room) setRoomType(roomType int) {
r.roomtype = roomType
}
//获取牌权玩家
func (r *Room) getControllerUser() *User {
return r.cuser
}
//设置牌权玩家
func (r *Room) setControllerUser(user *User) {
r.cuser = user
}
//获取当前牌
func (r *Room) getCurrentCards() []Card {
return r.cards
}
//设置当前牌
func (r *Room) setCurrentCards(cards []Card) {
r.cards = cards
}
//获取当前牌的玩家
func (r *Room) getCurrentCardsUser() *User {
return r.cardsuser
}
//设置当前牌的玩家
func (r *Room) setCurrentCardsUser(user *User) {
r.cardsuser = user
}
//获取房间状态
func (r *Room) GetRoomStatus() int {
return r.status
}
//设置房间状态
func (r *Room) SetRoomStatus(status int) {
r.status = status
}
//获取落座的所有玩家
func (r *Room) getUsers() []*User {
return r.users
}
//获取未落座的所有玩家
func (r *Room) getIdleUsers() map[string]*User {
return r.idleusers
}
/*
把房间中所有玩家在负载均衡服务器上的信息都删除
重置玩家
*/
func (r *Room) deleteUsersInfo() {
users := r.getUsers()
for _, user := range users {
if user != nil {
user.deleteUserInfo()
}
}
}
/*
重置房间中所有的玩家
*/
func (r *Room) resetUsers() {
users := r.getUsers()
for _, user := range users {
if user != nil {
user.reset()
}
}
}
//关闭房间
func (r *Room) close() {
RoomManage.removeRoom(r)
}
//给裁判提送信息
func (r *Room) pushJudgment(funcName string, message string) {
if judgmentUser := r.getJudgmentUser(); judgmentUser != nil {
judgmentUser.push(funcName, &message)
}
}
//设置所有人托管状态
func (r *Room) SetAllUsersTrusteeshipStatus(status bool) {
for _, user := range r.getUsers() {
if user != nil {
user.trusteeship = status
}
}
}
/*
所有选手端是否在线
*/
func (r *Room) AllUsersOnlinePush() {
for _, user := range r.getUsers() {
if user != nil {
status := 0
if user.getOnline() {
status = 1
}
r.pushJudgment("Online_Push", fmt.Sprintf("%s|%d", *user.getUserID(), status))
}
}
}
| hMessageToUsers("Multiple_Push", []str | conditional_block |
room.go | /*
房间
*/
package engine
import (
"bytes"
"fmt"
"sort"
"strconv"
"strings"
"time"
. "kelei.com/utils/common"
"kelei.com/utils/logger"
)
/*
游戏规则
默认版{
1. 出牌时间15秒
2. 自动出牌1次托管
}
录制版{
1. 出牌时间30秒
2. 自动出牌不托管
}
*/
const (
GameRule_Normal = iota //默认版
GameRule_Record //录制版
)
const (
Match_JD = iota //经典
Match_HYTW //好友同玩
Match_HXS //海选赛
)
const (
CARDMODE_RANDOM = iota //随机
CARDMODE_NOWASH //不洗牌
)
const (
GAMETYPE_REGULAR = iota //常规赛
GAMETYPE_DOUBLE //加倍赛
)
const (
HANDLETYPE_CALL = iota //叫地主
HANDLETYPE_RUSH //抢地主
)
const (
RoomType_Primary = iota //初级
RoomType_Intermediate //中级
RoomType_Advanced //高级
RoomType_Master //大师
RoomType_Tribute //进贡
)
const (
SetController_NewCycle = iota //新一轮
SetController_Press //压牌
SetController_Pass //要不了
SetController_NoChange //没有变化
SetController_Liuju //流局
)
const (
RoomStatus_Setout = iota //准备
RoomStatus_Deal //发牌(可明牌)
RoomStatus_Handle //叫地主、抢地主、加倍(可明牌)
RoomStatus_Liuju //流局
RoomStatus_Match //开赛
)
const (
MatchingStatus_Run = iota //进行中
MatchingStatus_Pause //暂停
MatchingStatus_Over //结束
)
const (
PlayWaitTime = 10 //要不起的等待时间
PlayWaitTime_Long = 20 //其它的等待时间
)
type Room struct {
id string //id
matchid int //比赛类型
roomtype int //房间类型
pcount int //人数
status int //房间状态
matchingStatus int //开赛后的状态
users []*User //玩家列表
userids []string //玩家UserID集合
idleusers map[string]*User //未落座玩家列表
idleuserids []string //未落座玩家UserID集合
cuser *User //牌权的玩家
cards []Card //当前牌
cardsuser *User //当前牌的玩家
playTime int //出牌的次数
playRound int //出牌的轮次
users_cards map[string]string //当前轮所有人的出牌信息
inning int //当前局数
innings int //总局数
inningRegular int //常规赛局数
setCtlMsg []string //设置牌权的内容,推送残局的时候用
surplusBKingCount int //剩余大王数量
surplusSKingCount int //剩余小王数量
surplusTwoCount int //剩余2数量
cardinality int //基数
baseScore int //底分
multiple int //倍数
liujuMultiple int //流局倍数
playWaitTime int //要不起等待时间
playWaitTime_Long int //其它等待时间
gameRule int //游戏规则
firstController *User //第一个出牌的人
judgmentUser *User //裁判
records []*string //所有的记录(回放用)
dealMode int //发牌模式
cardMode int //牌的模式(随机、不洗牌)
gameType int //游戏类型
baseCards []Card //底牌
landlord *User //地主
farmers []*User //农民
canHandleUser *User //当前可操作的玩家
canCallLandlordUser *User //可叫地主的玩家
landlordPlayCardCount int //地主出牌次数
farmerPlayCardCount int //农民出牌次数
councilTask *Task //本局任务
usersVideoIntegral []int //玩家积分列表
springStatus int //春天的状态(0无1春天2反春)
}
func (r *Room) GetRoomID() *string {
return &r.id
}
func (r *Room) SetRoomID(roomid string) {
r.id = roomid
}
//根据玩法规则配置房间
func (r *Room) configRoomByGameRule() {
r.playWaitTime = PlayWaitTime
r.playWaitTime_Long = PlayWaitTime_Long
r.setGameRule(r.GetGameRuleConfig())
if r.getGameRule() == GameRule_Record {
r.playWaitTime = 10
r.playWaitTime_Long = 20
}
}
//重置
func (r *Room) reset() {
r.userids = nil
r.setPlayTime(0)
r.setPlayRound(0)
r.setSurplusBKingCount(4)
r.setSurplusSKingCount(4)
r.setSurplusTwoCount(16)
r.setControllerUser(nil)
r.setCurrentCards([]Card{})
r.setCurrentCardsUser(nil)
r.setSetCtlMsg([]string{})
r.setBaseScore(0)
r.setMultiple(1)
r.setLandlord(nil)
r.setLandlordPlayCardCount(0)
r.setFarmerPlayCardCount(0)
for _, user := range r.getUsers() {
if user != nil {
user.resume()
}
}
r.users_cards = make(map[string]string, pcount)
}
//设置房间的基础信息
func (r *Room) setRoomBaseInfo() {
allRoomData := *r.getAllRoomData()
arrAllRoomData := strings.Split(allRoomData, "|")
for _, roomData := range arrAllRoomData {
arrRoomData_s := strings.Split(roomData, "$")
arrRoomData := StrArrToIntArr(arrRoomData_s)
roomType, _, multiple := arrRoomData[0], arrRoomData[1], arrRoomData[2]
if roomType == r.GetRoomType() {
r.setMultiple(multiple)
break
}
}
}
//是否赛前玩家操作中
func (r *Room) isHandling() bool {
if r.GetRoomStatus() == RoomStatus_Handle {
return true
}
return false
}
//是否正在比赛
func (r *Room) isMatching() bool {
if r.GetRoomStatus() == RoomStatus_Setout {
return false
}
return true
}
//获取游戏规则
func (r *Room) getGameRule() int {
return r.gameRule
}
//设置游戏规则
func (r *Room) setGameRule(gameRule int) {
r.gameRule = gameRule
}
//获取发牌模式
func (r *Room) getDealMode() int {
return r.dealMode
}
//设置发牌模式
func (r *Room) setDealMode(dealMode int) {
r.dealMode = dealMode
}
//获取牌的模式
func (r *Room) GetCardMode() int {
return r.cardMode
}
//设置牌的模式
func (r *Room) SetCardMode(cardMode int) {
r.cardMode = cardMode
}
//获取游戏模式
func (r *Room) getGameType() int {
return r.gameType
}
//设置游戏模式
func (r *Room) setGameType(gameType int) {
r.gameType = gameType
}
//获取底牌
func (r *Room) getBaseCards() []Card {
return r.baseCards
}
//设置底牌
func (r *Room) setBaseCards(baseCards []Card) {
r.baseCards = baseCards
}
//获取地主
func (r *Room) getLandlord() *User {
return r.landlord
}
//设置地主
func (r *Room) setLandlord(landlord *User) {
r.landlord = landlord
}
//获取农民
func (r *Room) getFarmers() []*User {
return r.farmers
}
//设置农民
func (r *Room) setFarmers(users []*User) {
r.farmers = users
}
//获取当前可操作的玩家
func (r *Room) getCanHandleUser() *User {
return r.canHandleUser
}
/*
设置当前可操作的玩家
push:Handle_Push,userid,操作类型,当前底分,赛制
des:操作类型(0叫地主 1抢地主)
赛制(0常规赛 1加倍赛)
*/
func (r *Room) setCanHandleUser(canHandleUser *User, handleType int) {
r.canHandleUser = canHandleUser
message := fmt.Sprintf("%s,%d,%d,%d", *canHandleUser.getUserID(), handleType, r.getBaseScore(), r.getGameType())
pushMessageToUsers("Handle_Push", []string{message}, r.getUserIDs())
r.pushJudgment("Handle_Push", message)
}
/*
设置当前可操作的玩家并设置倒计时
*/
func (r *Room) setCanHandleUserAndSetCountDown(canHandleUser *User, handleType int) {
canHandleUser.countDown_handle(time.Second * 10)
r.setCanHandleUser(canHandleUser, handleType)
}
//获取可以叫地主的玩家
func (r *Room) getCanCallLandlordUser() *User {
return r.canCallLandlordUser
}
//设置可以叫地主的玩家
func (r *Room) setCanCallLandlordUser(canCallLandlordUser *User) {
r.canCallLandlordUser = canCallLandlordUser
}
//获取地主出牌次数
func (r *Room) getLandlordPlayCardCount() int {
return r.landlordPlayCardCount
}
//设置地主出牌次数
func (r *Room) setLandlordPlayCardCount(count int) {
r.landlordPlayCardCount = count
}
//累加地主出牌次数
func (r *Room) updteLandlordPlayCardCount() {
r.landlordPlayCardCount += 1
}
//获取农民出牌次数
func (r *Room) getFarmerPlayCardCount() int {
return r.farmerPlayCardCount
}
//设置农民出牌次数
func (r *Room) setFarmerPlayCardCount(count int) {
r.farmerPlayCardCount = count
}
//累加农民出牌次数
func (r *Room) updteFarmerPlayCardCount() {
r.farmerPlayCardCount += 1
}
//获取本局任务
func (r *Room) getCouncilTask() *Task {
return r.councilTask
}
//设置本局任务
func (r *Room) setCouncilTask(councilTask *Task) {
r.councilTask = councilTask
}
//获取所有玩家的积分
func (r *Room) getUsersVideoIntegral() []int {
return r.usersVideoIntegral
}
//获取春天的状态
func (r *Room) getSpringStatus() int {
return r.springStatus
}
//设置春天的状态
func (r *Room) setSpringStatus(springStatus int) {
r.springStatus = springStatus
}
//根据userid获取玩家积分
func (r *Room) getUserVideoIntegral(user *User) int {
userIndex := user.getIndex()
return r.getUsersVideoIntegral()[userIndex]
}
//根据userid设置玩家积分
func (r *Room) setUserVideoIntegral(user *User, videoIntegral int) {
userIndex := user.getIndex()
r.getUsersVideoIntegral()[userIndex] = videoIntegral
}
//重开
func (r *Room) reStart() {
r.resetUsers()
r.closeUserCountDown()
r.SetRoomStatus(RoomStatus_Setout)
r.reset()
}
//玩家转变成地主
func (r *Room) userTurnLandlord(user *User) {
logger.Debugf("%s 成为地主", *user.getUID())
user.setLandlord(true)
r.setLandlord(user)
farmers := []*User{}
for _, u := range r.getUsers() {
if u != user {
farmers = append(farmers, u)
}
}
r.setFarmers(farmers)
r.addCardsToLandlord()
r.showBaseCards(nil)
r.openDouble()
}
/*
亮底牌
push:BaseCards_Push,地主userid,cardid$cardid$cardid,底牌类型,底牌倍数,是否加入牌中
*/
func (r *Room) showBaseCards(user *User) {
if r.getLandlord() == nil {
return
}
// r.setBaseCards([]Card{Card{Suit: 1, Priority: 1}, Card{Suit: 1, Priority: 2}, Card{Suit: 1, Priority: 3}})
cards := r.getBaseCards()
cardsType, multiple := r.getBaseCardsInfo()
userids := []string{}
addToCards := 0
if user == nil { //只执行一次(地主出现的时候)
//根据底牌加倍
if multiple > 1 {
r.setMultiple(r.getMultiple() * multiple)
r.pushMultiple()
}
userids = r.getUserIDs()
addToCards = 1
} else { //短线重连进来的
userids = []string{*user.getUserID()}
}
message := fmt.Sprintf("%s,%s,%d,%d,%d", *r.getLandlord().getUserID(), *r.getCardsID(cards), cardsType, multiple, addToCards)
if user == nil {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
r.pushJudgment("BaseCards_Push", message)
} else {
pushMessageToUsers("BaseCards_Push", []string{message}, userids)
}
}
//将底牌放入地主牌面中
func (r *Room) addCardsToLandlord() {
cards := r.getBaseCards()
landlord := r.getLandlord()
if landlord != nil {
var tmpCards CardList
tmpCards = landlord.getCards()
tmpCards = append(tmpCards, cards...)
sort.Sort(tmpCards)
for i := 0; i < len(tmpCards); i++ {
tmpCards[i].Index = i
} |
/*
获取牌的类型(-1不是特殊底牌 0豹子 1同花 2顺子 3王炸 4同花顺)
*/
func (r *Room) getBaseCardsInfo() (cardsType int, multiple int) {
cardsType = -1
multiple = 1
var cards CardList = r.getBaseCards()
shunzi := []int{}
tonghua := map[int]bool{}
baozi := map[int]bool{}
wangzha := map[int]bool{}
for _, card := range cards {
if card.Priority < Priority_Two {
if len(shunzi) == 0 {
shunzi = append(shunzi, card.Priority)
} else {
if shunzi[len(shunzi)-1]+1 == card.Priority {
shunzi = append(shunzi, card.Priority)
}
}
}
tonghua[card.Suit] = true
baozi[card.Priority] = true
if card.Priority >= Priority_SKing {
wangzha[card.Priority] = true
}
}
isShunzi := len(shunzi) == 3
isTonghua := len(tonghua) == 1
isBaozi := len(baozi) == 1
isWangzha := len(wangzha) == 2
isTonghuaShun := isShunzi && isTonghua
if isTonghuaShun {
cardsType = 4
multiple = 4
} else if isWangzha && false {
cardsType = 3
multiple = 2
} else if isShunzi {
cardsType = 2
multiple = 2
} else if isTonghua {
cardsType = 1
multiple = 2
} else if isBaozi {
cardsType = 0
multiple = 2
}
return cardsType, multiple
}
//获取牌的ID列表
func (u *Room) getCardsID(cards []Card) *string {
buff := bytes.Buffer{}
for _, card := range cards {
buff.WriteString(fmt.Sprintf("%d$", card.ID))
}
cardsid := RemoveLastChar(buff)
return cardsid
}
//获取开赛后的状态
func (r *Room) getMatchingStatus() int {
return r.matchingStatus
}
//设置开赛后的状态
func (r *Room) setMatchingStatus(matchingStatus int) {
r.matchingStatus = matchingStatus
}
//获取裁判
func (r *Room) getJudgmentUser() *User {
return r.judgmentUser
}
//设置裁判
func (r *Room) setJudgmentUser(judgmentUser *User) {
r.judgmentUser = judgmentUser
}
//获取房间基数
func (r *Room) getCardinality() int {
return r.cardinality
}
//设置房间基数
func (r *Room) setCardinality(cardinality int) {
r.cardinality = cardinality
}
//获取房间底分
func (r *Room) getBaseScore() int {
return r.baseScore
}
//设置房间底分
func (r *Room) setBaseScore(baseScore int) {
r.baseScore = baseScore
}
/*
推送倍率
push:Multiple_Push,倍数
*/
func (r *Room) pushMultiple() {
multiple := strconv.Itoa(r.getRealityMultiple())
pushMessageToUsers("Multiple_Push", []string{multiple}, r.getUserIDs())
r.pushJudgment("Multiple_Push", multiple)
}
//获取房间倍数
func (r *Room) getMultiple() int {
return r.multiple
}
//设置房间倍数
func (r *Room) setMultiple(multiple int) {
r.multiple = multiple
}
//两倍房间倍数并推送
func (r *Room) doubleMultiple() {
r.setMultiple(r.getMultiple() * 2)
r.pushMultiple()
}
//三倍房间倍数并推送
func (r *Room) tripleMultiple() {
r.setMultiple(r.getMultiple() * 3)
r.pushMultiple()
}
//获取流局倍数
func (r *Room) getLiujuMultiple() int {
return r.liujuMultiple
}
//设置流局倍数
func (r *Room) setLiujuMultiple(liujuMultiple int) {
r.liujuMultiple = liujuMultiple
}
//获取房间真实倍数
func (r *Room) getRealityMultiple() int {
return r.getMultiple() * r.getLiujuMultiple()
}
//更新出牌的轮次
func (r *Room) updatePlayRound() int {
r.playRound += 1
return r.playRound
}
//获取出牌的轮次
func (r *Room) getPlayRound() int {
return r.playRound
}
//设置出牌的轮次
func (r *Room) setPlayRound(playRound int) {
r.playRound = playRound
}
//更新出牌的次数
func (r *Room) updatePlayTime() int {
r.playTime += 1
return r.playTime
}
//获取出牌的次数
func (r *Room) getPlayTime() int {
return r.playTime
}
//获取出牌的次数
func (r *Room) setPlayTime(playTime int) {
r.playTime = playTime
}
//获取剩余大王的数量
func (r *Room) getSurplusBKingCount() int {
return r.surplusBKingCount
}
//设置剩余大王的数量
func (r *Room) setSurplusBKingCount(v int) {
r.surplusBKingCount = v
}
//更新剩余大王的数量
func (r *Room) updateSurplusBKingCount() {
r.surplusBKingCount = r.surplusBKingCount - 1
}
//获取剩余小王的数量
func (r *Room) getSurplusSKingCount() int {
return r.surplusSKingCount
}
//设置剩余小王的数量
func (r *Room) setSurplusSKingCount(v int) {
r.surplusSKingCount = v
}
//更新剩余小王的数量
func (r *Room) updateSurplusSKingCount() {
r.surplusSKingCount = r.surplusSKingCount - 1
}
//获取剩余2的数量
func (r *Room) getSurplusTwoCount() int {
return r.surplusTwoCount
}
//设置剩余2的数量
func (r *Room) setSurplusTwoCount(v int) {
r.surplusTwoCount = v
}
//更新剩余2的数量
func (r *Room) updateSurplusTwoCount() {
r.surplusTwoCount = r.surplusTwoCount - 1
}
//获取设置牌权的命令
func (r *Room) getSetCtlMsg() []string {
return r.setCtlMsg
}
//设置牌权的内容,推送残局时候用
func (r *Room) setSetCtlMsg(setCtlMsg []string) {
r.setCtlMsg = setCtlMsg
}
//获取初始牌数量是否完整
func (r *Room) initCardCountIsIntegrity() bool {
return cardCount == perCapitaCardCount
}
//获取房间人数
func (r *Room) GetPCount() int {
return r.pcount
}
//更新房间人数
func (r *Room) updatePCount(v int) {
r.pcount = r.pcount + v
}
//获取房间观战人数
func (r *Room) GetIdlePCount() int {
return len(r.idleusers)
}
//根据index获取玩家
func (r *Room) getUserByIndex(index int) *User {
return r.users[index]
}
//获取房间入座人数
func (r *Room) getUserCount() int {
count := 0
for _, user := range r.users {
if user != nil {
count += 1
}
}
return count
}
//获取准备中的玩家数量
func (r *Room) getSetoutCount() int {
count := 0
for _, user := range r.users {
if user != nil {
if user.getStatus() == UserStatus_Setout {
count += 1
}
}
}
return count
}
/*
获取玩家UserID字符串集合
in:是否刷新
*/
func (r *Room) getUserIDs(args ...bool) []string {
if len(args) > 0 {
if args[0] {
r.userids = nil
}
}
if r.userids == nil {
r.userids = []string{}
for _, user := range r.users {
if user != nil {
r.userids = append(r.userids, *user.userid)
}
}
}
return r.userids
}
/*
获取未落座玩家UserID字符串集合
in:是否刷新
*/
func (r *Room) getIdleUserIDs(args ...bool) []string {
if len(args) > 0 {
if args[0] {
r.idleuserids = nil
}
}
if r.idleuserids == nil {
r.idleuserids = []string{}
for _, user := range r.idleusers {
if user != nil {
r.idleuserids = append(r.idleuserids, *user.getUserID())
}
}
}
return r.idleuserids
}
/*
获取(UserID+IdleUserID)字符串集合
in:是否刷新
*/
func (r *Room) getAllUserIDs() []string {
userids := r.getUserIDs(true)
idleuserids := r.getIdleUserIDs(true)
userids = InsertStringSlice(userids, idleuserids, len(userids))
return userids
}
//获取比赛类型
func (r *Room) GetMatchID() int {
return r.matchid
}
//设置比赛类型
func (r *Room) setMatchID(matchID int) {
r.matchid = matchID
}
//获取总轮次
func (r *Room) getInnings() int {
return r.innings
}
//设置当前轮次
func (r *Room) setInnings(innings int) {
r.innings = innings
}
//获取当前轮次
func (r *Room) getInning() int {
return r.inning
}
//设置当前轮次
func (r *Room) setInning(inning int) {
r.inning = inning
}
//获取常规赛局数
func (r *Room) getInningRegular() int {
return r.inningRegular
}
//设置常规赛局数
func (r *Room) setInningRegular(inningRegular int) {
r.inningRegular = inningRegular
}
//获取房间类型
func (r *Room) GetRoomType() int {
return r.roomtype
}
//设置房间类型
func (r *Room) setRoomType(roomType int) {
r.roomtype = roomType
}
//获取牌权玩家
func (r *Room) getControllerUser() *User {
return r.cuser
}
//设置牌权玩家
func (r *Room) setControllerUser(user *User) {
r.cuser = user
}
//获取当前牌
func (r *Room) getCurrentCards() []Card {
return r.cards
}
//设置当前牌
func (r *Room) setCurrentCards(cards []Card) {
r.cards = cards
}
//获取当前牌的玩家
func (r *Room) getCurrentCardsUser() *User {
return r.cardsuser
}
//设置当前牌的玩家
func (r *Room) setCurrentCardsUser(user *User) {
r.cardsuser = user
}
//获取房间状态
func (r *Room) GetRoomStatus() int {
return r.status
}
//设置房间状态
func (r *Room) SetRoomStatus(status int) {
r.status = status
}
//获取落座的所有玩家
func (r *Room) getUsers() []*User {
return r.users
}
//获取未落座的所有玩家
func (r *Room) getIdleUsers() map[string]*User {
return r.idleusers
}
/*
把房间中所有玩家在负载均衡服务器上的信息都删除
重置玩家
*/
func (r *Room) deleteUsersInfo() {
users := r.getUsers()
for _, user := range users {
if user != nil {
user.deleteUserInfo()
}
}
}
/*
重置房间中所有的玩家
*/
func (r *Room) resetUsers() {
users := r.getUsers()
for _, user := range users {
if user != nil {
user.reset()
}
}
}
//关闭房间
func (r *Room) close() {
RoomManage.removeRoom(r)
}
//给裁判提送信息
func (r *Room) pushJudgment(funcName string, message string) {
if judgmentUser := r.getJudgmentUser(); judgmentUser != nil {
judgmentUser.push(funcName, &message)
}
}
//设置所有人托管状态
func (r *Room) SetAllUsersTrusteeshipStatus(status bool) {
for _, user := range r.getUsers() {
if user != nil {
user.trusteeship = status
}
}
}
/*
所有选手端是否在线
*/
func (r *Room) AllUsersOnlinePush() {
for _, user := range r.getUsers() {
if user != nil {
status := 0
if user.getOnline() {
status = 1
}
r.pushJudgment("Online_Push", fmt.Sprintf("%s|%d", *user.getUserID(), status))
}
}
} | landlord.setCards(tmpCards)
}
} | random_line_split |
workload_placement_nodelabel.go | /*
* Copyright 2022 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tests
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
nrtv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
e2ereslist "github.com/openshift-kni/numaresources-operator/internal/resourcelist"
schedutils "github.com/openshift-kni/numaresources-operator/test/e2e/sched/utils"
serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies"
"github.com/openshift-kni/numaresources-operator/test/utils/nodes"
"github.com/openshift-kni/numaresources-operator/test/utils/nrosched"
"github.com/openshift-kni/numaresources-operator/test/utils/objects"
e2ewait "github.com/openshift-kni/numaresources-operator/test/utils/objects/wait"
e2epadder "github.com/openshift-kni/numaresources-operator/test/utils/padder"
)
type getNodeAffinityFunc func(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity
var _ = Describe("[serial][disruptive][scheduler] numaresources workload placement considering node selector", func() {
var fxt *e2efixture.Fixture
var padder *e2epadder.Padder
var nrtList nrtv1alpha1.NodeResourceTopologyList
var nrts []nrtv1alpha1.NodeResourceTopology
BeforeEach(func() {
Expect(serialconfig.Config).ToNot(BeNil())
Expect(serialconfig.Config.Ready()).To(BeTrue(), "NUMA fixture initialization failed")
var err error
fxt, err = e2efixture.Setup("e2e-test-workload-placement-nodesel")
Expect(err).ToNot(HaveOccurred(), "unable to setup test fixture")
padder, err = e2epadder.New(fxt.Client, fxt.Namespace.Name)
Expect(err).ToNot(HaveOccurred())
err = fxt.Client.List(context.TODO(), &nrtList)
Expect(err).ToNot(HaveOccurred())
// we're ok with any TM policy as long as the updater can handle it,
// we use this as proxy for "there is valid NRT data for at least X nodes
policies := []nrtv1alpha1.TopologyManagerPolicy{
nrtv1alpha1.SingleNUMANodeContainerLevel,
nrtv1alpha1.SingleNUMANodePodLevel,
}
nrts = e2enrt.FilterByPolicies(nrtList.Items, policies)
if len(nrts) < 2 {
Skip(fmt.Sprintf("not enough nodes with valid policy - found %d", len(nrts)))
}
// Note that this test, being part of "serial", expects NO OTHER POD being scheduled
// in between, so we consider this information current and valid when the It()s run.
})
AfterEach(func() {
err := padder.Clean()
Expect(err).NotTo(HaveOccurred())
err = e2efixture.Teardown(fxt)
Expect(err).NotTo(HaveOccurred())
})
// note we hardcode the values we need here and when we pad node.
// This is ugly, but automatically computing the values is not straightforward
// and will we want to start lean and mean.
Context("with two labeled nodes with two NUMA zones", func() {
labelName := "size"
labelValueMedium := "medium"
labelValueLarge := "large"
var targetNodeName, alternativeNodeName string
var requiredRes corev1.ResourceList
var nrtCandidates []nrtv1alpha1.NodeResourceTopology
var targetNodeNRTInitial *nrtv1alpha1.NodeResourceTopology
BeforeEach(func() {
requiredNUMAZones := 2
By(fmt.Sprintf("filtering available nodes with at least %d NUMA zones", requiredNUMAZones))
nrtCandidates = e2enrt.FilterZoneCountEqual(nrts, requiredNUMAZones)
neededNodes := 2
if len(nrtCandidates) < neededNodes {
Skip(fmt.Sprintf("not enough nodes with %d NUMA Zones: found %d, needed %d", requiredNUMAZones, len(nrtCandidates), neededNodes))
}
// TODO: this should be >= 5x baseload
requiredRes = corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("16Gi"),
}
// WARNING: This should be calculated as 3/4 of requiredRes
paddingRes := corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("12"),
corev1.ResourceMemory: resource.MustParse("12Gi"),
}
By("filtering available nodes with allocatable resources on at least one NUMA zone that can match request")
nrtCandidates = e2enrt.FilterAnyZoneMatchingResources(nrtCandidates, requiredRes)
if len(nrtCandidates) < neededNodes {
Skip(fmt.Sprintf("not enough nodes with NUMA zones each of them can match requests: found %d, needed: %d, request: %v", len(nrtCandidates), neededNodes, requiredRes))
}
nrtCandidateNames := e2enrt.AccumulateNames(nrtCandidates)
var ok bool
targetNodeName, ok = nrtCandidateNames.PopAny()
Expect(ok).To(BeTrue(), "cannot select a target node among %#v", nrtCandidateNames.List())
By(fmt.Sprintf("selecting target node we expect the pod will be scheduled into: %q", targetNodeName))
alternativeNodeName, ok = nrtCandidateNames.PopAny()
Expect(ok).To(BeTrue(), "cannot select an alternative target node among %#v", nrtCandidateNames.List())
By(fmt.Sprintf("selecting alternative node candidate for the scheduling: %q", alternativeNodeName))
// we need to also pad one of the labeled nodes.
nrtToPadNames := append(nrtCandidateNames.List(), alternativeNodeName)
By(fmt.Sprintf("Padding all other candidate nodes: %v", nrtToPadNames))
var paddingPods []*corev1.Pod
for nIdx, nodeName := range nrtToPadNames {
nrtInfo, err := e2enrt.FindFromList(nrtCandidates, nodeName)
Expect(err).NotTo(HaveOccurred(), "missing NRT info for %q", nodeName)
baseload, err := nodes.GetLoad(fxt.K8sClient, nodeName)
Expect(err).NotTo(HaveOccurred(), "cannot get the base load for %q", nodeName)
for zIdx, zone := range nrtInfo.Zones {
zoneRes := paddingRes.DeepCopy() // to be extra safe
if zIdx == 0 { // any zone is fine
baseload.Apply(zoneRes)
}
podName := fmt.Sprintf("padding%d-%d", nIdx, zIdx)
padPod, err := makePaddingPod(fxt.Namespace.Name, podName, zone, zoneRes)
Expect(err).NotTo(HaveOccurred(), "unable to create padding pod %q on zone %q", podName, zone.Name)
padPod, err = pinPodTo(padPod, nodeName, zone.Name)
Expect(err).NotTo(HaveOccurred(), "unable to pin pod %q to zone %q", podName, zone.Name)
err = fxt.Client.Create(context.TODO(), padPod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q on zone %q", podName, zone.Name)
paddingPods = append(paddingPods, padPod)
}
}
By("Waiting for padding pods to be ready")
failedPodIds := e2ewait.ForPaddingPodsRunning(fxt, paddingPods)
Expect(failedPodIds).To(BeEmpty(), "some padding pods have failed to run")
var err error
targetNodeNRTInitial, err = e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
})
It("[test_id:47598][tier2] should place the pod in the node with available resources in one NUMA zone and fulfilling node selector", func() {
By(fmt.Sprintf("Labeling nodes %q and %q with label %q:%q", targetNodeName, alternativeNodeName, labelName, labelValueMedium))
unlabelTarget, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
defer func() {
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
}()
unlabelAlternative, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
defer func() {
err := unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}()
By("Scheduling the testing pod")
pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod")
pod.Spec.SchedulerName = serialconfig.Config.SchedulerName
pod.Spec.Containers[0].Resources.Limits = requiredRes
pod.Spec.NodeSelector = map[string]string{
labelName: labelValueMedium,
}
err = fxt.Client.Create(context.TODO(), pod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q", pod.Name)
By("waiting for pod to be running")
updatedPod, err := e2ewait.ForPodPhase(fxt.Client, pod.Namespace, pod.Name, corev1.PodRunning, 1*time.Minute)
if err != nil {
_ = objects.LogEventsForPod(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name)
}
Expect(err).NotTo(HaveOccurred())
By("checking the pod has been scheduled in the proper node")
Expect(updatedPod.Spec.NodeName).To(Equal(targetNodeName))
By(fmt.Sprintf("checking the pod was scheduled with the topology aware scheduler %q", serialconfig.Config.SchedulerName))
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
})
Context("label two nodes with different label values but both matching the node affinity of the deployment pod of the test", func() {
var unlabelTarget, unlabelAlternative func() error
nodesUnlabeled := false
BeforeEach(func() {
By(fmt.Sprintf("Labeling target node %q with label %q:%q and the alternative node %q with label %q:%q", targetNodeName, labelName, labelValueLarge, alternativeNodeName, labelName, labelValueMedium))
var err error
unlabelTarget, err = labelNodeWithValue(fxt.Client, labelName, labelValueLarge, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
unlabelAlternative, err = labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
})
AfterEach(func() {
if !nodesUnlabeled {
/*if we are here this means one of these:
1. the test failed before getting to the step where it removes the labels
2. the test failed to remove the labels during the test's check so try again here
Note that unlabeling an already unlabeled node will not result in an error,
so this condition is only to avoid extra minor operations
*/
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}
})
DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources",
func(getNodeAffFunc getNodeAffinityFunc) {
affinity := getNodeAffFunc(labelName, []string{labelValueLarge, labelValueMedium}, corev1.NodeSelectorOpIn)
By(fmt.Sprintf("create a deployment with one guaranteed pod with node affinity property: %+v ", affinity.NodeAffinity))
deploymentName := "test-dp"
var replicas int32 = 1
podLabels := map[string]string{
"test": "test-dp",
}
deployment := objects.NewTestDeployment(replicas, podLabels, nil, fxt.Namespace.Name, deploymentName, objects.PauseImage, []string{objects.PauseCommand}, []string{})
deployment.Spec.Template.Spec.SchedulerName = serialconfig.Config.SchedulerName
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = requiredRes
deployment.Spec.Template.Spec.Affinity = affinity
klog.Infof("create the test deployment with requests %s", e2ereslist.ToString(requiredRes))
err := fxt.Client.Create(context.TODO(), deployment)
Expect(err).NotTo(HaveOccurred(), "unable to create deployment %q", deployment.Name)
By("waiting for deployment to be up & running")
dpRunningTimeout := 1 * time.Minute
dpRunningPollInterval := 10 * time.Second
err = e2ewait.ForDeploymentComplete(fxt.Client, deployment, dpRunningPollInterval, dpRunningTimeout)
Expect(err).NotTo(HaveOccurred(), "Deployment %q not up & running after %v", deployment.Name, dpRunningTimeout)
By(fmt.Sprintf("checking deployment pods have been scheduled with the topology aware scheduler %q and in the proper node %q", serialconfig.Config.SchedulerName, targetNodeName))
pods, err := schedutils.ListPodsByDeployment(fxt.Client, *deployment)
Expect(err).NotTo(HaveOccurred(), "Unable to get pods from Deployment %q: %v", deployment.Name, err)
for _, pod := range pods {
Expect(pod.Spec.NodeName).To(Equal(targetNodeName), "pod %s/%s is scheduled on node %q but expected to be on the target node %q", pod.Namespace, pod.Name, targetNodeName)
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
}
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
By("unlabel nodes during execution and check that the test's pod was not evicted due to shaked matching criteria") | err = unlabelTarget()
//if at least on of the unlabling failed, set nodesUnlabeled to false to try again in afterEach
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
//check that it didn't stop running for some time
By(fmt.Sprintf("ensuring the deployment %q keep being ready", deployment.Name))
Eventually(func() bool {
updatedDp := &appsv1.Deployment{}
err := fxt.Client.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDp)
Expect(err).ToNot(HaveOccurred())
return e2ewait.IsDeploymentComplete(deployment, &updatedDp.Status)
}, time.Second*30, time.Second*5).Should(BeTrue(), "deployment %q became unready", deployment.Name)
},
Entry("[test_id:47597] should be able to schedule pod with affinity property requiredDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution),
Entry("[test_id:49843] should be able to schedule pod with affinity property prefferdDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution),
)
})
})
})
func createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{*nodeSelTerm},
},
},
}
return aff
}
func createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
prefTerm := &corev1.PreferredSchedulingTerm{
Weight: 1,
Preference: *nodeSelTerm,
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{*prefTerm},
},
}
return aff
} | nodesUnlabeled = true | random_line_split |
workload_placement_nodelabel.go | /*
* Copyright 2022 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tests
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
nrtv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
e2ereslist "github.com/openshift-kni/numaresources-operator/internal/resourcelist"
schedutils "github.com/openshift-kni/numaresources-operator/test/e2e/sched/utils"
serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies"
"github.com/openshift-kni/numaresources-operator/test/utils/nodes"
"github.com/openshift-kni/numaresources-operator/test/utils/nrosched"
"github.com/openshift-kni/numaresources-operator/test/utils/objects"
e2ewait "github.com/openshift-kni/numaresources-operator/test/utils/objects/wait"
e2epadder "github.com/openshift-kni/numaresources-operator/test/utils/padder"
)
type getNodeAffinityFunc func(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity
var _ = Describe("[serial][disruptive][scheduler] numaresources workload placement considering node selector", func() {
var fxt *e2efixture.Fixture
var padder *e2epadder.Padder
var nrtList nrtv1alpha1.NodeResourceTopologyList
var nrts []nrtv1alpha1.NodeResourceTopology
BeforeEach(func() {
Expect(serialconfig.Config).ToNot(BeNil())
Expect(serialconfig.Config.Ready()).To(BeTrue(), "NUMA fixture initialization failed")
var err error
fxt, err = e2efixture.Setup("e2e-test-workload-placement-nodesel")
Expect(err).ToNot(HaveOccurred(), "unable to setup test fixture")
padder, err = e2epadder.New(fxt.Client, fxt.Namespace.Name)
Expect(err).ToNot(HaveOccurred())
err = fxt.Client.List(context.TODO(), &nrtList)
Expect(err).ToNot(HaveOccurred())
// we're ok with any TM policy as long as the updater can handle it,
// we use this as proxy for "there is valid NRT data for at least X nodes
policies := []nrtv1alpha1.TopologyManagerPolicy{
nrtv1alpha1.SingleNUMANodeContainerLevel,
nrtv1alpha1.SingleNUMANodePodLevel,
}
nrts = e2enrt.FilterByPolicies(nrtList.Items, policies)
if len(nrts) < 2 {
Skip(fmt.Sprintf("not enough nodes with valid policy - found %d", len(nrts)))
}
// Note that this test, being part of "serial", expects NO OTHER POD being scheduled
// in between, so we consider this information current and valid when the It()s run.
})
AfterEach(func() {
err := padder.Clean()
Expect(err).NotTo(HaveOccurred())
err = e2efixture.Teardown(fxt)
Expect(err).NotTo(HaveOccurred())
})
// note we hardcode the values we need here and when we pad node.
// This is ugly, but automatically computing the values is not straightforward
// and will we want to start lean and mean.
Context("with two labeled nodes with two NUMA zones", func() {
labelName := "size"
labelValueMedium := "medium"
labelValueLarge := "large"
var targetNodeName, alternativeNodeName string
var requiredRes corev1.ResourceList
var nrtCandidates []nrtv1alpha1.NodeResourceTopology
var targetNodeNRTInitial *nrtv1alpha1.NodeResourceTopology
BeforeEach(func() {
requiredNUMAZones := 2
By(fmt.Sprintf("filtering available nodes with at least %d NUMA zones", requiredNUMAZones))
nrtCandidates = e2enrt.FilterZoneCountEqual(nrts, requiredNUMAZones)
neededNodes := 2
if len(nrtCandidates) < neededNodes {
Skip(fmt.Sprintf("not enough nodes with %d NUMA Zones: found %d, needed %d", requiredNUMAZones, len(nrtCandidates), neededNodes))
}
// TODO: this should be >= 5x baseload
requiredRes = corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("16Gi"),
}
// WARNING: This should be calculated as 3/4 of requiredRes
paddingRes := corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("12"),
corev1.ResourceMemory: resource.MustParse("12Gi"),
}
By("filtering available nodes with allocatable resources on at least one NUMA zone that can match request")
nrtCandidates = e2enrt.FilterAnyZoneMatchingResources(nrtCandidates, requiredRes)
if len(nrtCandidates) < neededNodes {
Skip(fmt.Sprintf("not enough nodes with NUMA zones each of them can match requests: found %d, needed: %d, request: %v", len(nrtCandidates), neededNodes, requiredRes))
}
nrtCandidateNames := e2enrt.AccumulateNames(nrtCandidates)
var ok bool
targetNodeName, ok = nrtCandidateNames.PopAny()
Expect(ok).To(BeTrue(), "cannot select a target node among %#v", nrtCandidateNames.List())
By(fmt.Sprintf("selecting target node we expect the pod will be scheduled into: %q", targetNodeName))
alternativeNodeName, ok = nrtCandidateNames.PopAny()
Expect(ok).To(BeTrue(), "cannot select an alternative target node among %#v", nrtCandidateNames.List())
By(fmt.Sprintf("selecting alternative node candidate for the scheduling: %q", alternativeNodeName))
// we need to also pad one of the labeled nodes.
nrtToPadNames := append(nrtCandidateNames.List(), alternativeNodeName)
By(fmt.Sprintf("Padding all other candidate nodes: %v", nrtToPadNames))
var paddingPods []*corev1.Pod
for nIdx, nodeName := range nrtToPadNames {
nrtInfo, err := e2enrt.FindFromList(nrtCandidates, nodeName)
Expect(err).NotTo(HaveOccurred(), "missing NRT info for %q", nodeName)
baseload, err := nodes.GetLoad(fxt.K8sClient, nodeName)
Expect(err).NotTo(HaveOccurred(), "cannot get the base load for %q", nodeName)
for zIdx, zone := range nrtInfo.Zones {
zoneRes := paddingRes.DeepCopy() // to be extra safe
if zIdx == 0 { // any zone is fine
baseload.Apply(zoneRes)
}
podName := fmt.Sprintf("padding%d-%d", nIdx, zIdx)
padPod, err := makePaddingPod(fxt.Namespace.Name, podName, zone, zoneRes)
Expect(err).NotTo(HaveOccurred(), "unable to create padding pod %q on zone %q", podName, zone.Name)
padPod, err = pinPodTo(padPod, nodeName, zone.Name)
Expect(err).NotTo(HaveOccurred(), "unable to pin pod %q to zone %q", podName, zone.Name)
err = fxt.Client.Create(context.TODO(), padPod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q on zone %q", podName, zone.Name)
paddingPods = append(paddingPods, padPod)
}
}
By("Waiting for padding pods to be ready")
failedPodIds := e2ewait.ForPaddingPodsRunning(fxt, paddingPods)
Expect(failedPodIds).To(BeEmpty(), "some padding pods have failed to run")
var err error
targetNodeNRTInitial, err = e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
})
It("[test_id:47598][tier2] should place the pod in the node with available resources in one NUMA zone and fulfilling node selector", func() {
By(fmt.Sprintf("Labeling nodes %q and %q with label %q:%q", targetNodeName, alternativeNodeName, labelName, labelValueMedium))
unlabelTarget, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
defer func() {
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
}()
unlabelAlternative, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
defer func() {
err := unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}()
By("Scheduling the testing pod")
pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod")
pod.Spec.SchedulerName = serialconfig.Config.SchedulerName
pod.Spec.Containers[0].Resources.Limits = requiredRes
pod.Spec.NodeSelector = map[string]string{
labelName: labelValueMedium,
}
err = fxt.Client.Create(context.TODO(), pod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q", pod.Name)
By("waiting for pod to be running")
updatedPod, err := e2ewait.ForPodPhase(fxt.Client, pod.Namespace, pod.Name, corev1.PodRunning, 1*time.Minute)
if err != nil {
_ = objects.LogEventsForPod(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name)
}
Expect(err).NotTo(HaveOccurred())
By("checking the pod has been scheduled in the proper node")
Expect(updatedPod.Spec.NodeName).To(Equal(targetNodeName))
By(fmt.Sprintf("checking the pod was scheduled with the topology aware scheduler %q", serialconfig.Config.SchedulerName))
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
})
Context("label two nodes with different label values but both matching the node affinity of the deployment pod of the test", func() {
var unlabelTarget, unlabelAlternative func() error
nodesUnlabeled := false
BeforeEach(func() {
By(fmt.Sprintf("Labeling target node %q with label %q:%q and the alternative node %q with label %q:%q", targetNodeName, labelName, labelValueLarge, alternativeNodeName, labelName, labelValueMedium))
var err error
unlabelTarget, err = labelNodeWithValue(fxt.Client, labelName, labelValueLarge, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
unlabelAlternative, err = labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
})
AfterEach(func() {
if !nodesUnlabeled {
/*if we are here this means one of these:
1. the test failed before getting to the step where it removes the labels
2. the test failed to remove the labels during the test's check so try again here
Note that unlabeling an already unlabeled node will not result in an error,
so this condition is only to avoid extra minor operations
*/
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}
})
DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources",
func(getNodeAffFunc getNodeAffinityFunc) {
affinity := getNodeAffFunc(labelName, []string{labelValueLarge, labelValueMedium}, corev1.NodeSelectorOpIn)
By(fmt.Sprintf("create a deployment with one guaranteed pod with node affinity property: %+v ", affinity.NodeAffinity))
deploymentName := "test-dp"
var replicas int32 = 1
podLabels := map[string]string{
"test": "test-dp",
}
deployment := objects.NewTestDeployment(replicas, podLabels, nil, fxt.Namespace.Name, deploymentName, objects.PauseImage, []string{objects.PauseCommand}, []string{})
deployment.Spec.Template.Spec.SchedulerName = serialconfig.Config.SchedulerName
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = requiredRes
deployment.Spec.Template.Spec.Affinity = affinity
klog.Infof("create the test deployment with requests %s", e2ereslist.ToString(requiredRes))
err := fxt.Client.Create(context.TODO(), deployment)
Expect(err).NotTo(HaveOccurred(), "unable to create deployment %q", deployment.Name)
By("waiting for deployment to be up & running")
dpRunningTimeout := 1 * time.Minute
dpRunningPollInterval := 10 * time.Second
err = e2ewait.ForDeploymentComplete(fxt.Client, deployment, dpRunningPollInterval, dpRunningTimeout)
Expect(err).NotTo(HaveOccurred(), "Deployment %q not up & running after %v", deployment.Name, dpRunningTimeout)
By(fmt.Sprintf("checking deployment pods have been scheduled with the topology aware scheduler %q and in the proper node %q", serialconfig.Config.SchedulerName, targetNodeName))
pods, err := schedutils.ListPodsByDeployment(fxt.Client, *deployment)
Expect(err).NotTo(HaveOccurred(), "Unable to get pods from Deployment %q: %v", deployment.Name, err)
for _, pod := range pods {
Expect(pod.Spec.NodeName).To(Equal(targetNodeName), "pod %s/%s is scheduled on node %q but expected to be on the target node %q", pod.Namespace, pod.Name, targetNodeName)
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
}
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
By("unlabel nodes during execution and check that the test's pod was not evicted due to shaked matching criteria")
nodesUnlabeled = true
err = unlabelTarget()
//if at least on of the unlabling failed, set nodesUnlabeled to false to try again in afterEach
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
//check that it didn't stop running for some time
By(fmt.Sprintf("ensuring the deployment %q keep being ready", deployment.Name))
Eventually(func() bool {
updatedDp := &appsv1.Deployment{}
err := fxt.Client.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDp)
Expect(err).ToNot(HaveOccurred())
return e2ewait.IsDeploymentComplete(deployment, &updatedDp.Status)
}, time.Second*30, time.Second*5).Should(BeTrue(), "deployment %q became unready", deployment.Name)
},
Entry("[test_id:47597] should be able to schedule pod with affinity property requiredDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution),
Entry("[test_id:49843] should be able to schedule pod with affinity property prefferdDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution),
)
})
})
})
func createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{*nodeSelTerm},
},
},
}
return aff
}
func | (labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
prefTerm := &corev1.PreferredSchedulingTerm{
Weight: 1,
Preference: *nodeSelTerm,
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{*prefTerm},
},
}
return aff
}
| createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution | identifier_name |
workload_placement_nodelabel.go | /*
* Copyright 2022 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tests
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
nrtv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
e2ereslist "github.com/openshift-kni/numaresources-operator/internal/resourcelist"
schedutils "github.com/openshift-kni/numaresources-operator/test/e2e/sched/utils"
serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies"
"github.com/openshift-kni/numaresources-operator/test/utils/nodes"
"github.com/openshift-kni/numaresources-operator/test/utils/nrosched"
"github.com/openshift-kni/numaresources-operator/test/utils/objects"
e2ewait "github.com/openshift-kni/numaresources-operator/test/utils/objects/wait"
e2epadder "github.com/openshift-kni/numaresources-operator/test/utils/padder"
)
type getNodeAffinityFunc func(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity
var _ = Describe("[serial][disruptive][scheduler] numaresources workload placement considering node selector", func() {
var fxt *e2efixture.Fixture
var padder *e2epadder.Padder
var nrtList nrtv1alpha1.NodeResourceTopologyList
var nrts []nrtv1alpha1.NodeResourceTopology
BeforeEach(func() {
Expect(serialconfig.Config).ToNot(BeNil())
Expect(serialconfig.Config.Ready()).To(BeTrue(), "NUMA fixture initialization failed")
var err error
fxt, err = e2efixture.Setup("e2e-test-workload-placement-nodesel")
Expect(err).ToNot(HaveOccurred(), "unable to setup test fixture")
padder, err = e2epadder.New(fxt.Client, fxt.Namespace.Name)
Expect(err).ToNot(HaveOccurred())
err = fxt.Client.List(context.TODO(), &nrtList)
Expect(err).ToNot(HaveOccurred())
// we're ok with any TM policy as long as the updater can handle it,
// we use this as proxy for "there is valid NRT data for at least X nodes
policies := []nrtv1alpha1.TopologyManagerPolicy{
nrtv1alpha1.SingleNUMANodeContainerLevel,
nrtv1alpha1.SingleNUMANodePodLevel,
}
nrts = e2enrt.FilterByPolicies(nrtList.Items, policies)
if len(nrts) < 2 {
Skip(fmt.Sprintf("not enough nodes with valid policy - found %d", len(nrts)))
}
// Note that this test, being part of "serial", expects NO OTHER POD being scheduled
// in between, so we consider this information current and valid when the It()s run.
})
AfterEach(func() {
err := padder.Clean()
Expect(err).NotTo(HaveOccurred())
err = e2efixture.Teardown(fxt)
Expect(err).NotTo(HaveOccurred())
})
// note we hardcode the values we need here and when we pad node.
// This is ugly, but automatically computing the values is not straightforward
// and will we want to start lean and mean.
Context("with two labeled nodes with two NUMA zones", func() {
labelName := "size"
labelValueMedium := "medium"
labelValueLarge := "large"
var targetNodeName, alternativeNodeName string
var requiredRes corev1.ResourceList
var nrtCandidates []nrtv1alpha1.NodeResourceTopology
var targetNodeNRTInitial *nrtv1alpha1.NodeResourceTopology
BeforeEach(func() {
requiredNUMAZones := 2
By(fmt.Sprintf("filtering available nodes with at least %d NUMA zones", requiredNUMAZones))
nrtCandidates = e2enrt.FilterZoneCountEqual(nrts, requiredNUMAZones)
neededNodes := 2
if len(nrtCandidates) < neededNodes {
Skip(fmt.Sprintf("not enough nodes with %d NUMA Zones: found %d, needed %d", requiredNUMAZones, len(nrtCandidates), neededNodes))
}
// TODO: this should be >= 5x baseload
requiredRes = corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("16Gi"),
}
// WARNING: This should be calculated as 3/4 of requiredRes
paddingRes := corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("12"),
corev1.ResourceMemory: resource.MustParse("12Gi"),
}
By("filtering available nodes with allocatable resources on at least one NUMA zone that can match request")
nrtCandidates = e2enrt.FilterAnyZoneMatchingResources(nrtCandidates, requiredRes)
if len(nrtCandidates) < neededNodes {
Skip(fmt.Sprintf("not enough nodes with NUMA zones each of them can match requests: found %d, needed: %d, request: %v", len(nrtCandidates), neededNodes, requiredRes))
}
nrtCandidateNames := e2enrt.AccumulateNames(nrtCandidates)
var ok bool
targetNodeName, ok = nrtCandidateNames.PopAny()
Expect(ok).To(BeTrue(), "cannot select a target node among %#v", nrtCandidateNames.List())
By(fmt.Sprintf("selecting target node we expect the pod will be scheduled into: %q", targetNodeName))
alternativeNodeName, ok = nrtCandidateNames.PopAny()
Expect(ok).To(BeTrue(), "cannot select an alternative target node among %#v", nrtCandidateNames.List())
By(fmt.Sprintf("selecting alternative node candidate for the scheduling: %q", alternativeNodeName))
// we need to also pad one of the labeled nodes.
nrtToPadNames := append(nrtCandidateNames.List(), alternativeNodeName)
By(fmt.Sprintf("Padding all other candidate nodes: %v", nrtToPadNames))
var paddingPods []*corev1.Pod
for nIdx, nodeName := range nrtToPadNames {
nrtInfo, err := e2enrt.FindFromList(nrtCandidates, nodeName)
Expect(err).NotTo(HaveOccurred(), "missing NRT info for %q", nodeName)
baseload, err := nodes.GetLoad(fxt.K8sClient, nodeName)
Expect(err).NotTo(HaveOccurred(), "cannot get the base load for %q", nodeName)
for zIdx, zone := range nrtInfo.Zones {
zoneRes := paddingRes.DeepCopy() // to be extra safe
if zIdx == 0 { // any zone is fine
baseload.Apply(zoneRes)
}
podName := fmt.Sprintf("padding%d-%d", nIdx, zIdx)
padPod, err := makePaddingPod(fxt.Namespace.Name, podName, zone, zoneRes)
Expect(err).NotTo(HaveOccurred(), "unable to create padding pod %q on zone %q", podName, zone.Name)
padPod, err = pinPodTo(padPod, nodeName, zone.Name)
Expect(err).NotTo(HaveOccurred(), "unable to pin pod %q to zone %q", podName, zone.Name)
err = fxt.Client.Create(context.TODO(), padPod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q on zone %q", podName, zone.Name)
paddingPods = append(paddingPods, padPod)
}
}
By("Waiting for padding pods to be ready")
failedPodIds := e2ewait.ForPaddingPodsRunning(fxt, paddingPods)
Expect(failedPodIds).To(BeEmpty(), "some padding pods have failed to run")
var err error
targetNodeNRTInitial, err = e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
})
It("[test_id:47598][tier2] should place the pod in the node with available resources in one NUMA zone and fulfilling node selector", func() {
By(fmt.Sprintf("Labeling nodes %q and %q with label %q:%q", targetNodeName, alternativeNodeName, labelName, labelValueMedium))
unlabelTarget, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
defer func() {
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
}()
unlabelAlternative, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
defer func() {
err := unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}()
By("Scheduling the testing pod")
pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod")
pod.Spec.SchedulerName = serialconfig.Config.SchedulerName
pod.Spec.Containers[0].Resources.Limits = requiredRes
pod.Spec.NodeSelector = map[string]string{
labelName: labelValueMedium,
}
err = fxt.Client.Create(context.TODO(), pod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q", pod.Name)
By("waiting for pod to be running")
updatedPod, err := e2ewait.ForPodPhase(fxt.Client, pod.Namespace, pod.Name, corev1.PodRunning, 1*time.Minute)
if err != nil {
_ = objects.LogEventsForPod(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name)
}
Expect(err).NotTo(HaveOccurred())
By("checking the pod has been scheduled in the proper node")
Expect(updatedPod.Spec.NodeName).To(Equal(targetNodeName))
By(fmt.Sprintf("checking the pod was scheduled with the topology aware scheduler %q", serialconfig.Config.SchedulerName))
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
})
Context("label two nodes with different label values but both matching the node affinity of the deployment pod of the test", func() {
var unlabelTarget, unlabelAlternative func() error
nodesUnlabeled := false
BeforeEach(func() {
By(fmt.Sprintf("Labeling target node %q with label %q:%q and the alternative node %q with label %q:%q", targetNodeName, labelName, labelValueLarge, alternativeNodeName, labelName, labelValueMedium))
var err error
unlabelTarget, err = labelNodeWithValue(fxt.Client, labelName, labelValueLarge, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
unlabelAlternative, err = labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
})
AfterEach(func() {
if !nodesUnlabeled {
/*if we are here this means one of these:
1. the test failed before getting to the step where it removes the labels
2. the test failed to remove the labels during the test's check so try again here
Note that unlabeling an already unlabeled node will not result in an error,
so this condition is only to avoid extra minor operations
*/
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}
})
DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources",
func(getNodeAffFunc getNodeAffinityFunc) {
affinity := getNodeAffFunc(labelName, []string{labelValueLarge, labelValueMedium}, corev1.NodeSelectorOpIn)
By(fmt.Sprintf("create a deployment with one guaranteed pod with node affinity property: %+v ", affinity.NodeAffinity))
deploymentName := "test-dp"
var replicas int32 = 1
podLabels := map[string]string{
"test": "test-dp",
}
deployment := objects.NewTestDeployment(replicas, podLabels, nil, fxt.Namespace.Name, deploymentName, objects.PauseImage, []string{objects.PauseCommand}, []string{})
deployment.Spec.Template.Spec.SchedulerName = serialconfig.Config.SchedulerName
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = requiredRes
deployment.Spec.Template.Spec.Affinity = affinity
klog.Infof("create the test deployment with requests %s", e2ereslist.ToString(requiredRes))
err := fxt.Client.Create(context.TODO(), deployment)
Expect(err).NotTo(HaveOccurred(), "unable to create deployment %q", deployment.Name)
By("waiting for deployment to be up & running")
dpRunningTimeout := 1 * time.Minute
dpRunningPollInterval := 10 * time.Second
err = e2ewait.ForDeploymentComplete(fxt.Client, deployment, dpRunningPollInterval, dpRunningTimeout)
Expect(err).NotTo(HaveOccurred(), "Deployment %q not up & running after %v", deployment.Name, dpRunningTimeout)
By(fmt.Sprintf("checking deployment pods have been scheduled with the topology aware scheduler %q and in the proper node %q", serialconfig.Config.SchedulerName, targetNodeName))
pods, err := schedutils.ListPodsByDeployment(fxt.Client, *deployment)
Expect(err).NotTo(HaveOccurred(), "Unable to get pods from Deployment %q: %v", deployment.Name, err)
for _, pod := range pods {
Expect(pod.Spec.NodeName).To(Equal(targetNodeName), "pod %s/%s is scheduled on node %q but expected to be on the target node %q", pod.Namespace, pod.Name, targetNodeName)
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
}
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
By("unlabel nodes during execution and check that the test's pod was not evicted due to shaked matching criteria")
nodesUnlabeled = true
err = unlabelTarget()
//if at least on of the unlabling failed, set nodesUnlabeled to false to try again in afterEach
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
//check that it didn't stop running for some time
By(fmt.Sprintf("ensuring the deployment %q keep being ready", deployment.Name))
Eventually(func() bool {
updatedDp := &appsv1.Deployment{}
err := fxt.Client.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDp)
Expect(err).ToNot(HaveOccurred())
return e2ewait.IsDeploymentComplete(deployment, &updatedDp.Status)
}, time.Second*30, time.Second*5).Should(BeTrue(), "deployment %q became unready", deployment.Name)
},
Entry("[test_id:47597] should be able to schedule pod with affinity property requiredDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution),
Entry("[test_id:49843] should be able to schedule pod with affinity property prefferdDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution),
)
})
})
})
func createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity |
func createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
prefTerm := &corev1.PreferredSchedulingTerm{
Weight: 1,
Preference: *nodeSelTerm,
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{*prefTerm},
},
}
return aff
}
| {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{*nodeSelTerm},
},
},
}
return aff
} | identifier_body |
workload_placement_nodelabel.go | /*
* Copyright 2022 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tests
import (
"context"
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
nrtv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
e2ereslist "github.com/openshift-kni/numaresources-operator/internal/resourcelist"
schedutils "github.com/openshift-kni/numaresources-operator/test/e2e/sched/utils"
serialconfig "github.com/openshift-kni/numaresources-operator/test/e2e/serial/config"
e2efixture "github.com/openshift-kni/numaresources-operator/test/utils/fixture"
e2enrt "github.com/openshift-kni/numaresources-operator/test/utils/noderesourcetopologies"
"github.com/openshift-kni/numaresources-operator/test/utils/nodes"
"github.com/openshift-kni/numaresources-operator/test/utils/nrosched"
"github.com/openshift-kni/numaresources-operator/test/utils/objects"
e2ewait "github.com/openshift-kni/numaresources-operator/test/utils/objects/wait"
e2epadder "github.com/openshift-kni/numaresources-operator/test/utils/padder"
)
type getNodeAffinityFunc func(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity
var _ = Describe("[serial][disruptive][scheduler] numaresources workload placement considering node selector", func() {
var fxt *e2efixture.Fixture
var padder *e2epadder.Padder
var nrtList nrtv1alpha1.NodeResourceTopologyList
var nrts []nrtv1alpha1.NodeResourceTopology
BeforeEach(func() {
Expect(serialconfig.Config).ToNot(BeNil())
Expect(serialconfig.Config.Ready()).To(BeTrue(), "NUMA fixture initialization failed")
var err error
fxt, err = e2efixture.Setup("e2e-test-workload-placement-nodesel")
Expect(err).ToNot(HaveOccurred(), "unable to setup test fixture")
padder, err = e2epadder.New(fxt.Client, fxt.Namespace.Name)
Expect(err).ToNot(HaveOccurred())
err = fxt.Client.List(context.TODO(), &nrtList)
Expect(err).ToNot(HaveOccurred())
// we're ok with any TM policy as long as the updater can handle it,
// we use this as proxy for "there is valid NRT data for at least X nodes
policies := []nrtv1alpha1.TopologyManagerPolicy{
nrtv1alpha1.SingleNUMANodeContainerLevel,
nrtv1alpha1.SingleNUMANodePodLevel,
}
nrts = e2enrt.FilterByPolicies(nrtList.Items, policies)
if len(nrts) < 2 {
Skip(fmt.Sprintf("not enough nodes with valid policy - found %d", len(nrts)))
}
// Note that this test, being part of "serial", expects NO OTHER POD being scheduled
// in between, so we consider this information current and valid when the It()s run.
})
AfterEach(func() {
err := padder.Clean()
Expect(err).NotTo(HaveOccurred())
err = e2efixture.Teardown(fxt)
Expect(err).NotTo(HaveOccurred())
})
// note we hardcode the values we need here and when we pad node.
// This is ugly, but automatically computing the values is not straightforward
// and will we want to start lean and mean.
Context("with two labeled nodes with two NUMA zones", func() {
labelName := "size"
labelValueMedium := "medium"
labelValueLarge := "large"
var targetNodeName, alternativeNodeName string
var requiredRes corev1.ResourceList
var nrtCandidates []nrtv1alpha1.NodeResourceTopology
var targetNodeNRTInitial *nrtv1alpha1.NodeResourceTopology
BeforeEach(func() {
requiredNUMAZones := 2
By(fmt.Sprintf("filtering available nodes with at least %d NUMA zones", requiredNUMAZones))
nrtCandidates = e2enrt.FilterZoneCountEqual(nrts, requiredNUMAZones)
neededNodes := 2
if len(nrtCandidates) < neededNodes {
Skip(fmt.Sprintf("not enough nodes with %d NUMA Zones: found %d, needed %d", requiredNUMAZones, len(nrtCandidates), neededNodes))
}
// TODO: this should be >= 5x baseload
requiredRes = corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("16"),
corev1.ResourceMemory: resource.MustParse("16Gi"),
}
// WARNING: This should be calculated as 3/4 of requiredRes
paddingRes := corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("12"),
corev1.ResourceMemory: resource.MustParse("12Gi"),
}
By("filtering available nodes with allocatable resources on at least one NUMA zone that can match request")
nrtCandidates = e2enrt.FilterAnyZoneMatchingResources(nrtCandidates, requiredRes)
if len(nrtCandidates) < neededNodes {
Skip(fmt.Sprintf("not enough nodes with NUMA zones each of them can match requests: found %d, needed: %d, request: %v", len(nrtCandidates), neededNodes, requiredRes))
}
nrtCandidateNames := e2enrt.AccumulateNames(nrtCandidates)
var ok bool
targetNodeName, ok = nrtCandidateNames.PopAny()
Expect(ok).To(BeTrue(), "cannot select a target node among %#v", nrtCandidateNames.List())
By(fmt.Sprintf("selecting target node we expect the pod will be scheduled into: %q", targetNodeName))
alternativeNodeName, ok = nrtCandidateNames.PopAny()
Expect(ok).To(BeTrue(), "cannot select an alternative target node among %#v", nrtCandidateNames.List())
By(fmt.Sprintf("selecting alternative node candidate for the scheduling: %q", alternativeNodeName))
// we need to also pad one of the labeled nodes.
nrtToPadNames := append(nrtCandidateNames.List(), alternativeNodeName)
By(fmt.Sprintf("Padding all other candidate nodes: %v", nrtToPadNames))
var paddingPods []*corev1.Pod
for nIdx, nodeName := range nrtToPadNames {
nrtInfo, err := e2enrt.FindFromList(nrtCandidates, nodeName)
Expect(err).NotTo(HaveOccurred(), "missing NRT info for %q", nodeName)
baseload, err := nodes.GetLoad(fxt.K8sClient, nodeName)
Expect(err).NotTo(HaveOccurred(), "cannot get the base load for %q", nodeName)
for zIdx, zone := range nrtInfo.Zones {
zoneRes := paddingRes.DeepCopy() // to be extra safe
if zIdx == 0 { // any zone is fine
baseload.Apply(zoneRes)
}
podName := fmt.Sprintf("padding%d-%d", nIdx, zIdx)
padPod, err := makePaddingPod(fxt.Namespace.Name, podName, zone, zoneRes)
Expect(err).NotTo(HaveOccurred(), "unable to create padding pod %q on zone %q", podName, zone.Name)
padPod, err = pinPodTo(padPod, nodeName, zone.Name)
Expect(err).NotTo(HaveOccurred(), "unable to pin pod %q to zone %q", podName, zone.Name)
err = fxt.Client.Create(context.TODO(), padPod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q on zone %q", podName, zone.Name)
paddingPods = append(paddingPods, padPod)
}
}
By("Waiting for padding pods to be ready")
failedPodIds := e2ewait.ForPaddingPodsRunning(fxt, paddingPods)
Expect(failedPodIds).To(BeEmpty(), "some padding pods have failed to run")
var err error
targetNodeNRTInitial, err = e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
})
It("[test_id:47598][tier2] should place the pod in the node with available resources in one NUMA zone and fulfilling node selector", func() {
By(fmt.Sprintf("Labeling nodes %q and %q with label %q:%q", targetNodeName, alternativeNodeName, labelName, labelValueMedium))
unlabelTarget, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
defer func() {
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
}()
unlabelAlternative, err := labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
defer func() {
err := unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}()
By("Scheduling the testing pod")
pod := objects.NewTestPodPause(fxt.Namespace.Name, "testpod")
pod.Spec.SchedulerName = serialconfig.Config.SchedulerName
pod.Spec.Containers[0].Resources.Limits = requiredRes
pod.Spec.NodeSelector = map[string]string{
labelName: labelValueMedium,
}
err = fxt.Client.Create(context.TODO(), pod)
Expect(err).NotTo(HaveOccurred(), "unable to create pod %q", pod.Name)
By("waiting for pod to be running")
updatedPod, err := e2ewait.ForPodPhase(fxt.Client, pod.Namespace, pod.Name, corev1.PodRunning, 1*time.Minute)
if err != nil {
_ = objects.LogEventsForPod(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name)
}
Expect(err).NotTo(HaveOccurred())
By("checking the pod has been scheduled in the proper node")
Expect(updatedPod.Spec.NodeName).To(Equal(targetNodeName))
By(fmt.Sprintf("checking the pod was scheduled with the topology aware scheduler %q", serialconfig.Config.SchedulerName))
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", updatedPod.Namespace, updatedPod.Name, serialconfig.Config.SchedulerName)
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
})
Context("label two nodes with different label values but both matching the node affinity of the deployment pod of the test", func() {
var unlabelTarget, unlabelAlternative func() error
nodesUnlabeled := false
BeforeEach(func() {
By(fmt.Sprintf("Labeling target node %q with label %q:%q and the alternative node %q with label %q:%q", targetNodeName, labelName, labelValueLarge, alternativeNodeName, labelName, labelValueMedium))
var err error
unlabelTarget, err = labelNodeWithValue(fxt.Client, labelName, labelValueLarge, targetNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", targetNodeName)
unlabelAlternative, err = labelNodeWithValue(fxt.Client, labelName, labelValueMedium, alternativeNodeName)
Expect(err).NotTo(HaveOccurred(), "unable to label node %q", alternativeNodeName)
})
AfterEach(func() {
if !nodesUnlabeled {
/*if we are here this means one of these:
1. the test failed before getting to the step where it removes the labels
2. the test failed to remove the labels during the test's check so try again here
Note that unlabeling an already unlabeled node will not result in an error,
so this condition is only to avoid extra minor operations
*/
err := unlabelTarget()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil {
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
}
}
})
DescribeTable("[tier2] a guaranteed deployment pod with nodeAffinity should be scheduled on one NUMA zone on a matching labeled node with enough resources",
func(getNodeAffFunc getNodeAffinityFunc) {
affinity := getNodeAffFunc(labelName, []string{labelValueLarge, labelValueMedium}, corev1.NodeSelectorOpIn)
By(fmt.Sprintf("create a deployment with one guaranteed pod with node affinity property: %+v ", affinity.NodeAffinity))
deploymentName := "test-dp"
var replicas int32 = 1
podLabels := map[string]string{
"test": "test-dp",
}
deployment := objects.NewTestDeployment(replicas, podLabels, nil, fxt.Namespace.Name, deploymentName, objects.PauseImage, []string{objects.PauseCommand}, []string{})
deployment.Spec.Template.Spec.SchedulerName = serialconfig.Config.SchedulerName
deployment.Spec.Template.Spec.Containers[0].Resources.Limits = requiredRes
deployment.Spec.Template.Spec.Affinity = affinity
klog.Infof("create the test deployment with requests %s", e2ereslist.ToString(requiredRes))
err := fxt.Client.Create(context.TODO(), deployment)
Expect(err).NotTo(HaveOccurred(), "unable to create deployment %q", deployment.Name)
By("waiting for deployment to be up & running")
dpRunningTimeout := 1 * time.Minute
dpRunningPollInterval := 10 * time.Second
err = e2ewait.ForDeploymentComplete(fxt.Client, deployment, dpRunningPollInterval, dpRunningTimeout)
Expect(err).NotTo(HaveOccurred(), "Deployment %q not up & running after %v", deployment.Name, dpRunningTimeout)
By(fmt.Sprintf("checking deployment pods have been scheduled with the topology aware scheduler %q and in the proper node %q", serialconfig.Config.SchedulerName, targetNodeName))
pods, err := schedutils.ListPodsByDeployment(fxt.Client, *deployment)
Expect(err).NotTo(HaveOccurred(), "Unable to get pods from Deployment %q: %v", deployment.Name, err)
for _, pod := range pods {
Expect(pod.Spec.NodeName).To(Equal(targetNodeName), "pod %s/%s is scheduled on node %q but expected to be on the target node %q", pod.Namespace, pod.Name, targetNodeName)
schedOK, err := nrosched.CheckPODWasScheduledWith(fxt.K8sClient, pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
Expect(err).ToNot(HaveOccurred())
Expect(schedOK).To(BeTrue(), "pod %s/%s not scheduled with expected scheduler %s", pod.Namespace, pod.Name, serialconfig.Config.SchedulerName)
}
By("Verifing the NRT statistics are updated")
targetNodeNRTCurrent, err := e2enrt.FindFromList(nrtCandidates, targetNodeName)
Expect(err).NotTo(HaveOccurred())
Expect(e2enrt.CheckEqualAvailableResources(*targetNodeNRTInitial, *targetNodeNRTCurrent)).To(BeTrue(), "target node %q initial resources and current resources are different", targetNodeName)
By("unlabel nodes during execution and check that the test's pod was not evicted due to shaked matching criteria")
nodesUnlabeled = true
err = unlabelTarget()
//if at least on of the unlabling failed, set nodesUnlabeled to false to try again in afterEach
if err != nil {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", targetNodeName, err)
}
err = unlabelAlternative()
if err != nil |
//check that it didn't stop running for some time
By(fmt.Sprintf("ensuring the deployment %q keep being ready", deployment.Name))
Eventually(func() bool {
updatedDp := &appsv1.Deployment{}
err := fxt.Client.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDp)
Expect(err).ToNot(HaveOccurred())
return e2ewait.IsDeploymentComplete(deployment, &updatedDp.Status)
}, time.Second*30, time.Second*5).Should(BeTrue(), "deployment %q became unready", deployment.Name)
},
Entry("[test_id:47597] should be able to schedule pod with affinity property requiredDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution),
Entry("[test_id:49843] should be able to schedule pod with affinity property prefferdDuringSchedulingIgnoredDuringExecution on the available node with feasible numa zone", createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution),
)
})
})
})
func createNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{*nodeSelTerm},
},
},
}
return aff
}
func createNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(labelName string, labelValue []string, selectOperator corev1.NodeSelectorOperator) *corev1.Affinity {
nodeSelReq := &corev1.NodeSelectorRequirement{
Key: labelName,
Operator: selectOperator,
Values: labelValue,
}
nodeSelTerm := &corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{*nodeSelReq},
MatchFields: []corev1.NodeSelectorRequirement{},
}
prefTerm := &corev1.PreferredSchedulingTerm{
Weight: 1,
Preference: *nodeSelTerm,
}
aff := &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{*prefTerm},
},
}
return aff
}
| {
nodesUnlabeled = false
klog.Errorf("Error while trying to unlabel node %q. %v", alternativeNodeName, err)
} | conditional_block |
pix2pix_GAN.py | from keras.optimizers import Adam
from keras.initializers import RandomNormal
from keras.models import Model
from keras.models import Input
from keras.models import Sequential, model_from_json
from keras.layers import Conv2D
from keras.layers import LeakyReLU
from keras.layers import Activation
from keras.layers import Concatenate
from keras.layers import BatchNormalization
from keras.layers import Conv2DTranspose
from keras.layers import Dropout
from keras.utils.vis_utils import plot_model
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib as mpl
import os
from pandas import DataFrame
import pandas as pd
from PIL import Image
from random import randint
import copy
import shutil
import glob
# define the discriminator model
def define_discriminator(image_shape, learning_rate_discriminator = 0.0002):
# weight initialization
init = RandomNormal(stddev=0.02)
# source image input
in_src_image = Input(shape=image_shape)
# target image input
in_target_image = Input(shape=image_shape)
# concatenate images channel-wise
merged = Concatenate()([in_src_image, in_target_image])
# C64
d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(merged)
d = LeakyReLU(alpha=0.2)(d)
# C128
d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C256
d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C512
d = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# second last output layer
d = Conv2D(512, (4,4), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# patch output
d = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
patch_out = Activation('sigmoid')(d)
# define model
model = Model([in_src_image, in_target_image], patch_out)
# compile model
opt = Adam(lr=learning_rate_discriminator, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, loss_weights=[0.5])
return model
# define an encoder block
def define_encoder_block(layer_in, n_filters, batchnorm=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add downsampling layer
g = Conv2D(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# conditionally add batch normalization
if batchnorm:
g = BatchNormalization()(g, training=True)
# leaky relu activation
g = LeakyReLU(alpha=0.2)(g)
return g
# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# add batch normalization
g = BatchNormalization()(g, training=True)
# conditionally add dropout
if dropout:
g = Dropout(0.5)(g, training=True)
# merge with skip connection
g = Concatenate()([g, skip_in])
# relu activation
g = Activation('relu')(g)
return g
# define the standalone generator model
def | (image_shape=(128,128,4)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model: C64-C128-C256-C512-C512-C512-C512-C512
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
# e7 = define_encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e6)
b = Activation('relu')(b)
# decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
# d1 = decoder_block(b, e7, 512)
d2 = decoder_block(b, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False)
d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# output
g = Conv2DTranspose(4, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = Activation('tanh')(g)
# define model
model = Model(in_image, out_image)
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model, image_shape, learning_rate_generator = 0.0002):
# make weights in the discriminator not trainable
d_model.trainable = False
# define the source image
in_src = Input(shape=image_shape)
# connect the source image to the generator input. The input to the generator are
# images with only obstacles
gen_out = g_model(in_src)
# connect the source input and generator output to the discriminator input
dis_out = d_model([in_src, gen_out])
# src image as input, generated image and classification output
model = Model(in_src, [dis_out, gen_out])
# compile model
opt = Adam(lr=learning_rate_generator, beta_1=0.5)
model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1,100])
return model
# select a batch of random samples, returns images and target
def generate_real_samples(dataset, n_samples, patch_shape):
# unpack dataset
image_obsta, image_paths_n_obsta = dataset
# choose random instances
indices = list(range(0,image_obsta.shape[0]))
random.shuffle(indices)
ix = indices[0:n_samples]
# retrieve selected images
X1, X2 = image_obsta[ix], image_paths_n_obsta[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return [X1, X2], y
# generate a batch of images, returns images and targets
def generate_fake_samples(g_model, samples, patch_shape):
# generate fake instance
X = g_model.predict(samples)
# create 'fake' class labels (0)
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y
# # extracts path images. Its given a batch of color images with obstacles and paths
# # implement a thresholding method to extract only the path i.e. remove the obstacles
# def extract_path_image(imgs, im_size = 128):
# size = imgs.shape[0]
# print("size is : ", size)
# for i in range(size):
# im = imgs[i]
# for j in range(im_size):
# for k in range(im_size):
# pixel = im[j][k]
# # remove the obstacles
# if(pixel[1]>80 and pixel[0]<40 and pixel[2]<40):
# im[j][k] = [0,0,0,255]
# return imgs
# input is a set of color images with obstacles and paths. It removed the paths and outputs the set of images with
# only the obstacles
def remove_paths(imgs, im_size = 128):
size = imgs.shape[0]
for i in range(size):
im = imgs[i]
for j in range(im_size):
for k in range(im_size):
pixel = im[j][k]
# remove the white paths
if((abs((pixel[0]-pixel[1])/2)<10 and abs((pixel[1]-pixel[2])/2)<10) or (pixel[0]>=100 and pixel[1]>=100 and pixel[2]>=100)):
im[j][k] = [0,0,0,255] # convert white pixels to black
# remove the blue paths
elif((pixel[2]>=80 and pixel[0]<pixel[2]-20 and pixel[1]<pixel[2]-20 and pixel[0]<80 and pixel[1]<80) or (pixel[0]<40 and pixel[1]<40 and pixel[2]<80)):
im[j][k] = [0,0,0,255]
return imgs
# train pix2pix models
def train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1, n_patch=8):
# calculate the number of batches per training epoch
trainA, trainB = dataset
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
generator_loss = []
discriminator_loss = []
discriminator_loss_real = []
discriminator_loss_fake = []
for i in range(n_steps):
# select a batch of real samples
[real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real = generate_real_samples(dataset, n_batch, n_patch)
# generate a batch of fake samples
fake_image_paths_n_obsta, label_fake = generate_fake_samples(g_model, real_image_obsta_batch, n_patch)
# update discriminator for real samples
d_loss1 = d_model.train_on_batch([real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real)
# update discriminator for generated samples
d_loss2 = d_model.train_on_batch([real_image_obsta_batch, fake_image_paths_n_obsta], label_fake)
# update the generator
g_loss, _, _ = gan_model.train_on_batch(real_image_obsta_batch, [label_real, real_image_paths_n_obsta_batch])
# store the images that the generator generates after each epoch
if(i % bat_per_epo == 0):
[real_image_obsta_sample, real_image_paths_n_obsta_sample], label_real = generate_real_samples(dataset, 1, n_patch)
generated_image = g_model.predict(real_image_obsta_sample)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_paths_n_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_paths.pdf")
fig2 = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig2.add_gridspec(1,2)
ax[0] = fig2.add_subplot(gs[0, 0])
ax[1] = fig2.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_obst.pdf")
discriminator_loss_real.append(d_loss1)
discriminator_loss_fake.append(d_loss2)
generator_loss.append(g_loss)
discriminator_loss.append(d_loss1+d_loss2)
print(i)
# save the plots for loss etc
x = np.linspace(0, n_steps, n_steps)
plt.figure()
plt.plot(x, discriminator_loss, color = 'blue')
plt.ylabel('Discriminator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator','generator'))
plt.savefig(save_path+'/loss_discriminator.pdf')
plt.figure()
plt.plot(x, generator_loss, color = 'orange')
plt.ylabel('Generator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator loss for fake images','discriminator loss for real images'))
plt.savefig(save_path+'/loss_generator.pdf')
writer = pd.ExcelWriter(save_path+'/loss.xlsx', engine='xlsxwriter')
df1 = DataFrame({'Generator Loss': generator_loss, 'Discriminator Loss': discriminator_loss, 'Discriminator Loss for Real Images': discriminator_loss_real, 'Discriminator Loss for Fake Images': discriminator_loss_fake})
df1.to_excel(writer, sheet_name='sheet1', index=False)
writer.save()
# Saving the Gnerator Model and weights since that is the only one necessary
model_json = g_model.to_json()
with open(save_path+'/Generator_model_tex.json', "w") as json_file:
json_file.write(model_json)
g_model.save_weights(save_path+'/Generator_model_weights_tex.h5')
def load_model_and_check(load_path, test_data):
json_file = open(load_path+'/Generator_model_tex.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
print('Model loaded')
loaded_model.load_weights(load_path+'/Generator_model_weights_tex.h5')
for i in range(test_data.shape[0]):
rand_im = test_data[i]
rand_im = rand_im[np.newaxis,:,:,:]
generated_image = loaded_model.predict(rand_im)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(rand_im,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Test Image as Input', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(load_path +'/Test_Image_Level4_'+ str(i)+'.pdf')
def load_images(folder, im_size = (128,128), col = 1):
# load color images after resizing them !
im_list = []
for filename in os.listdir(folder):
p = os.path.join(folder, filename)
if p == folder + '/.DS_Store':
continue
# img = mpimg.imread(p)
if(col == 1):
img = Image.open(p).convert('L')
else:
img = Image.open(p)
im_resize = img.resize(im_size, Image.ANTIALIAS)
im_list.append(np.ravel(im_resize)) # flattened the images, we need to reshape them before printing
image_list = np.array(im_list)
return image_list
if __name__ == '__main__':
# define image shape
image_shape = (128,128,4)
image_size = (128,128)
col = 4 # set to 4 for color images and 1 for black and white images
image_tp = 'circuit'
#-------------------------------
ver = 13
lr_discriminator = 0.0001
lr_generator = 0.001
num_epochs = 5
num_batch = 1 # ensure that the batch size dives the number of samples entirely
# base_path = '/home/s3494950/thesis'
base_path = '/Users/swarajdalmia/Desktop/NeuroMorphicComputing/Code'
# load_path = base_path+'/Data/circuitImages/usefulCircuits/withObstacles_withoutNoise'
load_path = base_path+'/Data/circuitImages/usefulCircuits/smallerset_obstacles' # 56 items
# load_path = base_path+'/Data/biggerDataset'
save_path = base_path + '/Results/Trained_final_GANs/pix2pix/circuit_' + str(ver)
#-------------------------------
# images = load_images(load_path, image_size, col)
# images = np.reshape(images, (images.shape[0], image_size[0], image_size[1], col))
# d_model = define_discriminator(image_shape, learning_rate_discriminator=lr_discriminator)
# g_model = define_generator(image_shape)
# gan_model = define_gan(g_model, d_model, image_shape, learning_rate_generator=lr_generator)
# # load image data. [image_obsta, image_paths_n_obsta]
# im = copy.deepcopy(images)
# dataset = [remove_paths(im),images]
# print("removed paths")
# # train model
# train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs = num_epochs, n_batch=num_batch)
p = '/Users/swarajdalmia/Desktop/NeuroMorphicComputing/Code/Data/circuitImages/usefulCircuits/test_obstacles'
testing_data = load_images(p, image_size, col)
testing_data = np.reshape(testing_data, (testing_data.shape[0], image_size[0], image_size[1], col))
load_model_and_check(save_path, testing_data)
| define_generator | identifier_name |
pix2pix_GAN.py | from keras.optimizers import Adam
from keras.initializers import RandomNormal
from keras.models import Model
from keras.models import Input
from keras.models import Sequential, model_from_json
from keras.layers import Conv2D
from keras.layers import LeakyReLU
from keras.layers import Activation
from keras.layers import Concatenate
from keras.layers import BatchNormalization
from keras.layers import Conv2DTranspose
from keras.layers import Dropout
from keras.utils.vis_utils import plot_model
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib as mpl
import os
from pandas import DataFrame
import pandas as pd
from PIL import Image
from random import randint
import copy
import shutil
import glob
# define the discriminator model
def define_discriminator(image_shape, learning_rate_discriminator = 0.0002):
# weight initialization
init = RandomNormal(stddev=0.02)
# source image input
in_src_image = Input(shape=image_shape)
# target image input
in_target_image = Input(shape=image_shape)
# concatenate images channel-wise
merged = Concatenate()([in_src_image, in_target_image])
# C64
d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(merged)
d = LeakyReLU(alpha=0.2)(d)
# C128
d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C256
d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C512
d = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# second last output layer
d = Conv2D(512, (4,4), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# patch output
d = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
patch_out = Activation('sigmoid')(d)
# define model
model = Model([in_src_image, in_target_image], patch_out)
# compile model
opt = Adam(lr=learning_rate_discriminator, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, loss_weights=[0.5])
return model
# define an encoder block
def define_encoder_block(layer_in, n_filters, batchnorm=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add downsampling layer
g = Conv2D(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# conditionally add batch normalization
if batchnorm:
g = BatchNormalization()(g, training=True)
# leaky relu activation
g = LeakyReLU(alpha=0.2)(g)
return g
# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# add batch normalization
g = BatchNormalization()(g, training=True)
# conditionally add dropout
if dropout:
g = Dropout(0.5)(g, training=True)
# merge with skip connection
g = Concatenate()([g, skip_in])
# relu activation
g = Activation('relu')(g)
return g
# define the standalone generator model
def define_generator(image_shape=(128,128,4)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model: C64-C128-C256-C512-C512-C512-C512-C512
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
# e7 = define_encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e6)
b = Activation('relu')(b) | d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# output
g = Conv2DTranspose(4, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = Activation('tanh')(g)
# define model
model = Model(in_image, out_image)
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model, image_shape, learning_rate_generator = 0.0002):
# make weights in the discriminator not trainable
d_model.trainable = False
# define the source image
in_src = Input(shape=image_shape)
# connect the source image to the generator input. The input to the generator are
# images with only obstacles
gen_out = g_model(in_src)
# connect the source input and generator output to the discriminator input
dis_out = d_model([in_src, gen_out])
# src image as input, generated image and classification output
model = Model(in_src, [dis_out, gen_out])
# compile model
opt = Adam(lr=learning_rate_generator, beta_1=0.5)
model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1,100])
return model
# select a batch of random samples, returns images and target
def generate_real_samples(dataset, n_samples, patch_shape):
# unpack dataset
image_obsta, image_paths_n_obsta = dataset
# choose random instances
indices = list(range(0,image_obsta.shape[0]))
random.shuffle(indices)
ix = indices[0:n_samples]
# retrieve selected images
X1, X2 = image_obsta[ix], image_paths_n_obsta[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return [X1, X2], y
# generate a batch of images, returns images and targets
def generate_fake_samples(g_model, samples, patch_shape):
# generate fake instance
X = g_model.predict(samples)
# create 'fake' class labels (0)
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y
# # extracts path images. Its given a batch of color images with obstacles and paths
# # implement a thresholding method to extract only the path i.e. remove the obstacles
# def extract_path_image(imgs, im_size = 128):
# size = imgs.shape[0]
# print("size is : ", size)
# for i in range(size):
# im = imgs[i]
# for j in range(im_size):
# for k in range(im_size):
# pixel = im[j][k]
# # remove the obstacles
# if(pixel[1]>80 and pixel[0]<40 and pixel[2]<40):
# im[j][k] = [0,0,0,255]
# return imgs
# input is a set of color images with obstacles and paths. It removed the paths and outputs the set of images with
# only the obstacles
def remove_paths(imgs, im_size = 128):
size = imgs.shape[0]
for i in range(size):
im = imgs[i]
for j in range(im_size):
for k in range(im_size):
pixel = im[j][k]
# remove the white paths
if((abs((pixel[0]-pixel[1])/2)<10 and abs((pixel[1]-pixel[2])/2)<10) or (pixel[0]>=100 and pixel[1]>=100 and pixel[2]>=100)):
im[j][k] = [0,0,0,255] # convert white pixels to black
# remove the blue paths
elif((pixel[2]>=80 and pixel[0]<pixel[2]-20 and pixel[1]<pixel[2]-20 and pixel[0]<80 and pixel[1]<80) or (pixel[0]<40 and pixel[1]<40 and pixel[2]<80)):
im[j][k] = [0,0,0,255]
return imgs
# train pix2pix models
def train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1, n_patch=8):
# calculate the number of batches per training epoch
trainA, trainB = dataset
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
generator_loss = []
discriminator_loss = []
discriminator_loss_real = []
discriminator_loss_fake = []
for i in range(n_steps):
# select a batch of real samples
[real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real = generate_real_samples(dataset, n_batch, n_patch)
# generate a batch of fake samples
fake_image_paths_n_obsta, label_fake = generate_fake_samples(g_model, real_image_obsta_batch, n_patch)
# update discriminator for real samples
d_loss1 = d_model.train_on_batch([real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real)
# update discriminator for generated samples
d_loss2 = d_model.train_on_batch([real_image_obsta_batch, fake_image_paths_n_obsta], label_fake)
# update the generator
g_loss, _, _ = gan_model.train_on_batch(real_image_obsta_batch, [label_real, real_image_paths_n_obsta_batch])
# store the images that the generator generates after each epoch
if(i % bat_per_epo == 0):
[real_image_obsta_sample, real_image_paths_n_obsta_sample], label_real = generate_real_samples(dataset, 1, n_patch)
generated_image = g_model.predict(real_image_obsta_sample)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_paths_n_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_paths.pdf")
fig2 = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig2.add_gridspec(1,2)
ax[0] = fig2.add_subplot(gs[0, 0])
ax[1] = fig2.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_obst.pdf")
discriminator_loss_real.append(d_loss1)
discriminator_loss_fake.append(d_loss2)
generator_loss.append(g_loss)
discriminator_loss.append(d_loss1+d_loss2)
print(i)
# save the plots for loss etc
x = np.linspace(0, n_steps, n_steps)
plt.figure()
plt.plot(x, discriminator_loss, color = 'blue')
plt.ylabel('Discriminator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator','generator'))
plt.savefig(save_path+'/loss_discriminator.pdf')
plt.figure()
plt.plot(x, generator_loss, color = 'orange')
plt.ylabel('Generator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator loss for fake images','discriminator loss for real images'))
plt.savefig(save_path+'/loss_generator.pdf')
writer = pd.ExcelWriter(save_path+'/loss.xlsx', engine='xlsxwriter')
df1 = DataFrame({'Generator Loss': generator_loss, 'Discriminator Loss': discriminator_loss, 'Discriminator Loss for Real Images': discriminator_loss_real, 'Discriminator Loss for Fake Images': discriminator_loss_fake})
df1.to_excel(writer, sheet_name='sheet1', index=False)
writer.save()
# Saving the Gnerator Model and weights since that is the only one necessary
model_json = g_model.to_json()
with open(save_path+'/Generator_model_tex.json', "w") as json_file:
json_file.write(model_json)
g_model.save_weights(save_path+'/Generator_model_weights_tex.h5')
def load_model_and_check(load_path, test_data):
json_file = open(load_path+'/Generator_model_tex.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
print('Model loaded')
loaded_model.load_weights(load_path+'/Generator_model_weights_tex.h5')
for i in range(test_data.shape[0]):
rand_im = test_data[i]
rand_im = rand_im[np.newaxis,:,:,:]
generated_image = loaded_model.predict(rand_im)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(rand_im,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Test Image as Input', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(load_path +'/Test_Image_Level4_'+ str(i)+'.pdf')
def load_images(folder, im_size = (128,128), col = 1):
# load color images after resizing them !
im_list = []
for filename in os.listdir(folder):
p = os.path.join(folder, filename)
if p == folder + '/.DS_Store':
continue
# img = mpimg.imread(p)
if(col == 1):
img = Image.open(p).convert('L')
else:
img = Image.open(p)
im_resize = img.resize(im_size, Image.ANTIALIAS)
im_list.append(np.ravel(im_resize)) # flattened the images, we need to reshape them before printing
image_list = np.array(im_list)
return image_list
if __name__ == '__main__':
# define image shape
image_shape = (128,128,4)
image_size = (128,128)
col = 4 # set to 4 for color images and 1 for black and white images
image_tp = 'circuit'
#-------------------------------
ver = 13
lr_discriminator = 0.0001
lr_generator = 0.001
num_epochs = 5
num_batch = 1 # ensure that the batch size dives the number of samples entirely
# base_path = '/home/s3494950/thesis'
base_path = '/Users/swarajdalmia/Desktop/NeuroMorphicComputing/Code'
# load_path = base_path+'/Data/circuitImages/usefulCircuits/withObstacles_withoutNoise'
load_path = base_path+'/Data/circuitImages/usefulCircuits/smallerset_obstacles' # 56 items
# load_path = base_path+'/Data/biggerDataset'
save_path = base_path + '/Results/Trained_final_GANs/pix2pix/circuit_' + str(ver)
#-------------------------------
# images = load_images(load_path, image_size, col)
# images = np.reshape(images, (images.shape[0], image_size[0], image_size[1], col))
# d_model = define_discriminator(image_shape, learning_rate_discriminator=lr_discriminator)
# g_model = define_generator(image_shape)
# gan_model = define_gan(g_model, d_model, image_shape, learning_rate_generator=lr_generator)
# # load image data. [image_obsta, image_paths_n_obsta]
# im = copy.deepcopy(images)
# dataset = [remove_paths(im),images]
# print("removed paths")
# # train model
# train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs = num_epochs, n_batch=num_batch)
p = '/Users/swarajdalmia/Desktop/NeuroMorphicComputing/Code/Data/circuitImages/usefulCircuits/test_obstacles'
testing_data = load_images(p, image_size, col)
testing_data = np.reshape(testing_data, (testing_data.shape[0], image_size[0], image_size[1], col))
load_model_and_check(save_path, testing_data) | # decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
# d1 = decoder_block(b, e7, 512)
d2 = decoder_block(b, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False) | random_line_split |
pix2pix_GAN.py | from keras.optimizers import Adam
from keras.initializers import RandomNormal
from keras.models import Model
from keras.models import Input
from keras.models import Sequential, model_from_json
from keras.layers import Conv2D
from keras.layers import LeakyReLU
from keras.layers import Activation
from keras.layers import Concatenate
from keras.layers import BatchNormalization
from keras.layers import Conv2DTranspose
from keras.layers import Dropout
from keras.utils.vis_utils import plot_model
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib as mpl
import os
from pandas import DataFrame
import pandas as pd
from PIL import Image
from random import randint
import copy
import shutil
import glob
# define the discriminator model
def define_discriminator(image_shape, learning_rate_discriminator = 0.0002):
# weight initialization
init = RandomNormal(stddev=0.02)
# source image input
in_src_image = Input(shape=image_shape)
# target image input
in_target_image = Input(shape=image_shape)
# concatenate images channel-wise
merged = Concatenate()([in_src_image, in_target_image])
# C64
d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(merged)
d = LeakyReLU(alpha=0.2)(d)
# C128
d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C256
d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C512
d = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# second last output layer
d = Conv2D(512, (4,4), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# patch output
d = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
patch_out = Activation('sigmoid')(d)
# define model
model = Model([in_src_image, in_target_image], patch_out)
# compile model
opt = Adam(lr=learning_rate_discriminator, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, loss_weights=[0.5])
return model
# define an encoder block
def define_encoder_block(layer_in, n_filters, batchnorm=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add downsampling layer
g = Conv2D(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# conditionally add batch normalization
if batchnorm:
g = BatchNormalization()(g, training=True)
# leaky relu activation
g = LeakyReLU(alpha=0.2)(g)
return g
# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# add batch normalization
g = BatchNormalization()(g, training=True)
# conditionally add dropout
if dropout:
g = Dropout(0.5)(g, training=True)
# merge with skip connection
g = Concatenate()([g, skip_in])
# relu activation
g = Activation('relu')(g)
return g
# define the standalone generator model
def define_generator(image_shape=(128,128,4)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model: C64-C128-C256-C512-C512-C512-C512-C512
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
# e7 = define_encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e6)
b = Activation('relu')(b)
# decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
# d1 = decoder_block(b, e7, 512)
d2 = decoder_block(b, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False)
d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# output
g = Conv2DTranspose(4, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = Activation('tanh')(g)
# define model
model = Model(in_image, out_image)
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model, image_shape, learning_rate_generator = 0.0002):
# make weights in the discriminator not trainable
d_model.trainable = False
# define the source image
in_src = Input(shape=image_shape)
# connect the source image to the generator input. The input to the generator are
# images with only obstacles
gen_out = g_model(in_src)
# connect the source input and generator output to the discriminator input
dis_out = d_model([in_src, gen_out])
# src image as input, generated image and classification output
model = Model(in_src, [dis_out, gen_out])
# compile model
opt = Adam(lr=learning_rate_generator, beta_1=0.5)
model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1,100])
return model
# select a batch of random samples, returns images and target
def generate_real_samples(dataset, n_samples, patch_shape):
# unpack dataset
image_obsta, image_paths_n_obsta = dataset
# choose random instances
indices = list(range(0,image_obsta.shape[0]))
random.shuffle(indices)
ix = indices[0:n_samples]
# retrieve selected images
X1, X2 = image_obsta[ix], image_paths_n_obsta[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return [X1, X2], y
# generate a batch of images, returns images and targets
def generate_fake_samples(g_model, samples, patch_shape):
# generate fake instance
X = g_model.predict(samples)
# create 'fake' class labels (0)
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y
# # extracts path images. Its given a batch of color images with obstacles and paths
# # implement a thresholding method to extract only the path i.e. remove the obstacles
# def extract_path_image(imgs, im_size = 128):
# size = imgs.shape[0]
# print("size is : ", size)
# for i in range(size):
# im = imgs[i]
# for j in range(im_size):
# for k in range(im_size):
# pixel = im[j][k]
# # remove the obstacles
# if(pixel[1]>80 and pixel[0]<40 and pixel[2]<40):
# im[j][k] = [0,0,0,255]
# return imgs
# input is a set of color images with obstacles and paths. It removed the paths and outputs the set of images with
# only the obstacles
def remove_paths(imgs, im_size = 128):
size = imgs.shape[0]
for i in range(size):
im = imgs[i]
for j in range(im_size):
for k in range(im_size):
pixel = im[j][k]
# remove the white paths
if((abs((pixel[0]-pixel[1])/2)<10 and abs((pixel[1]-pixel[2])/2)<10) or (pixel[0]>=100 and pixel[1]>=100 and pixel[2]>=100)):
im[j][k] = [0,0,0,255] # convert white pixels to black
# remove the blue paths
elif((pixel[2]>=80 and pixel[0]<pixel[2]-20 and pixel[1]<pixel[2]-20 and pixel[0]<80 and pixel[1]<80) or (pixel[0]<40 and pixel[1]<40 and pixel[2]<80)):
im[j][k] = [0,0,0,255]
return imgs
# train pix2pix models
def train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1, n_patch=8):
# calculate the number of batches per training epoch
trainA, trainB = dataset
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
generator_loss = []
discriminator_loss = []
discriminator_loss_real = []
discriminator_loss_fake = []
for i in range(n_steps):
# select a batch of real samples
[real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real = generate_real_samples(dataset, n_batch, n_patch)
# generate a batch of fake samples
fake_image_paths_n_obsta, label_fake = generate_fake_samples(g_model, real_image_obsta_batch, n_patch)
# update discriminator for real samples
d_loss1 = d_model.train_on_batch([real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real)
# update discriminator for generated samples
d_loss2 = d_model.train_on_batch([real_image_obsta_batch, fake_image_paths_n_obsta], label_fake)
# update the generator
g_loss, _, _ = gan_model.train_on_batch(real_image_obsta_batch, [label_real, real_image_paths_n_obsta_batch])
# store the images that the generator generates after each epoch
if(i % bat_per_epo == 0):
[real_image_obsta_sample, real_image_paths_n_obsta_sample], label_real = generate_real_samples(dataset, 1, n_patch)
generated_image = g_model.predict(real_image_obsta_sample)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_paths_n_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_paths.pdf")
fig2 = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig2.add_gridspec(1,2)
ax[0] = fig2.add_subplot(gs[0, 0])
ax[1] = fig2.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_obst.pdf")
discriminator_loss_real.append(d_loss1)
discriminator_loss_fake.append(d_loss2)
generator_loss.append(g_loss)
discriminator_loss.append(d_loss1+d_loss2)
print(i)
# save the plots for loss etc
x = np.linspace(0, n_steps, n_steps)
plt.figure()
plt.plot(x, discriminator_loss, color = 'blue')
plt.ylabel('Discriminator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator','generator'))
plt.savefig(save_path+'/loss_discriminator.pdf')
plt.figure()
plt.plot(x, generator_loss, color = 'orange')
plt.ylabel('Generator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator loss for fake images','discriminator loss for real images'))
plt.savefig(save_path+'/loss_generator.pdf')
writer = pd.ExcelWriter(save_path+'/loss.xlsx', engine='xlsxwriter')
df1 = DataFrame({'Generator Loss': generator_loss, 'Discriminator Loss': discriminator_loss, 'Discriminator Loss for Real Images': discriminator_loss_real, 'Discriminator Loss for Fake Images': discriminator_loss_fake})
df1.to_excel(writer, sheet_name='sheet1', index=False)
writer.save()
# Saving the Gnerator Model and weights since that is the only one necessary
model_json = g_model.to_json()
with open(save_path+'/Generator_model_tex.json', "w") as json_file:
json_file.write(model_json)
g_model.save_weights(save_path+'/Generator_model_weights_tex.h5')
def load_model_and_check(load_path, test_data):
json_file = open(load_path+'/Generator_model_tex.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
print('Model loaded')
loaded_model.load_weights(load_path+'/Generator_model_weights_tex.h5')
for i in range(test_data.shape[0]):
rand_im = test_data[i]
rand_im = rand_im[np.newaxis,:,:,:]
generated_image = loaded_model.predict(rand_im)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(rand_im,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Test Image as Input', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(load_path +'/Test_Image_Level4_'+ str(i)+'.pdf')
def load_images(folder, im_size = (128,128), col = 1):
# load color images after resizing them !
im_list = []
for filename in os.listdir(folder):
p = os.path.join(folder, filename)
if p == folder + '/.DS_Store':
continue
# img = mpimg.imread(p)
if(col == 1):
|
else:
img = Image.open(p)
im_resize = img.resize(im_size, Image.ANTIALIAS)
im_list.append(np.ravel(im_resize)) # flattened the images, we need to reshape them before printing
image_list = np.array(im_list)
return image_list
if __name__ == '__main__':
# define image shape
image_shape = (128,128,4)
image_size = (128,128)
col = 4 # set to 4 for color images and 1 for black and white images
image_tp = 'circuit'
#-------------------------------
ver = 13
lr_discriminator = 0.0001
lr_generator = 0.001
num_epochs = 5
num_batch = 1 # ensure that the batch size dives the number of samples entirely
# base_path = '/home/s3494950/thesis'
base_path = '/Users/swarajdalmia/Desktop/NeuroMorphicComputing/Code'
# load_path = base_path+'/Data/circuitImages/usefulCircuits/withObstacles_withoutNoise'
load_path = base_path+'/Data/circuitImages/usefulCircuits/smallerset_obstacles' # 56 items
# load_path = base_path+'/Data/biggerDataset'
save_path = base_path + '/Results/Trained_final_GANs/pix2pix/circuit_' + str(ver)
#-------------------------------
# images = load_images(load_path, image_size, col)
# images = np.reshape(images, (images.shape[0], image_size[0], image_size[1], col))
# d_model = define_discriminator(image_shape, learning_rate_discriminator=lr_discriminator)
# g_model = define_generator(image_shape)
# gan_model = define_gan(g_model, d_model, image_shape, learning_rate_generator=lr_generator)
# # load image data. [image_obsta, image_paths_n_obsta]
# im = copy.deepcopy(images)
# dataset = [remove_paths(im),images]
# print("removed paths")
# # train model
# train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs = num_epochs, n_batch=num_batch)
p = '/Users/swarajdalmia/Desktop/NeuroMorphicComputing/Code/Data/circuitImages/usefulCircuits/test_obstacles'
testing_data = load_images(p, image_size, col)
testing_data = np.reshape(testing_data, (testing_data.shape[0], image_size[0], image_size[1], col))
load_model_and_check(save_path, testing_data)
| img = Image.open(p).convert('L') | conditional_block |
pix2pix_GAN.py | from keras.optimizers import Adam
from keras.initializers import RandomNormal
from keras.models import Model
from keras.models import Input
from keras.models import Sequential, model_from_json
from keras.layers import Conv2D
from keras.layers import LeakyReLU
from keras.layers import Activation
from keras.layers import Concatenate
from keras.layers import BatchNormalization
from keras.layers import Conv2DTranspose
from keras.layers import Dropout
from keras.utils.vis_utils import plot_model
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib as mpl
import os
from pandas import DataFrame
import pandas as pd
from PIL import Image
from random import randint
import copy
import shutil
import glob
# define the discriminator model
def define_discriminator(image_shape, learning_rate_discriminator = 0.0002):
# weight initialization
init = RandomNormal(stddev=0.02)
# source image input
in_src_image = Input(shape=image_shape)
# target image input
in_target_image = Input(shape=image_shape)
# concatenate images channel-wise
merged = Concatenate()([in_src_image, in_target_image])
# C64
d = Conv2D(64, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(merged)
d = LeakyReLU(alpha=0.2)(d)
# C128
d = Conv2D(128, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C256
d = Conv2D(256, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# C512
d = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# second last output layer
d = Conv2D(512, (4,4), padding='same', kernel_initializer=init)(d)
d = BatchNormalization()(d)
d = LeakyReLU(alpha=0.2)(d)
# patch output
d = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
patch_out = Activation('sigmoid')(d)
# define model
model = Model([in_src_image, in_target_image], patch_out)
# compile model
opt = Adam(lr=learning_rate_discriminator, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, loss_weights=[0.5])
return model
# define an encoder block
def define_encoder_block(layer_in, n_filters, batchnorm=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add downsampling layer
g = Conv2D(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# conditionally add batch normalization
if batchnorm:
g = BatchNormalization()(g, training=True)
# leaky relu activation
g = LeakyReLU(alpha=0.2)(g)
return g
# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = Conv2DTranspose(n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(layer_in)
# add batch normalization
g = BatchNormalization()(g, training=True)
# conditionally add dropout
if dropout:
g = Dropout(0.5)(g, training=True)
# merge with skip connection
g = Concatenate()([g, skip_in])
# relu activation
g = Activation('relu')(g)
return g
# define the standalone generator model
def define_generator(image_shape=(128,128,4)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = Input(shape=image_shape)
# encoder model: C64-C128-C256-C512-C512-C512-C512-C512
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
# e7 = define_encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(e6)
b = Activation('relu')(b)
# decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
# d1 = decoder_block(b, e7, 512)
d2 = decoder_block(b, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False)
d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# output
g = Conv2DTranspose(4, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = Activation('tanh')(g)
# define model
model = Model(in_image, out_image)
return model
# define the combined generator and discriminator model, for updating the generator
def define_gan(g_model, d_model, image_shape, learning_rate_generator = 0.0002):
# make weights in the discriminator not trainable
d_model.trainable = False
# define the source image
in_src = Input(shape=image_shape)
# connect the source image to the generator input. The input to the generator are
# images with only obstacles
gen_out = g_model(in_src)
# connect the source input and generator output to the discriminator input
dis_out = d_model([in_src, gen_out])
# src image as input, generated image and classification output
model = Model(in_src, [dis_out, gen_out])
# compile model
opt = Adam(lr=learning_rate_generator, beta_1=0.5)
model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1,100])
return model
# select a batch of random samples, returns images and target
def generate_real_samples(dataset, n_samples, patch_shape):
# unpack dataset
image_obsta, image_paths_n_obsta = dataset
# choose random instances
indices = list(range(0,image_obsta.shape[0]))
random.shuffle(indices)
ix = indices[0:n_samples]
# retrieve selected images
X1, X2 = image_obsta[ix], image_paths_n_obsta[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape, 1))
return [X1, X2], y
# generate a batch of images, returns images and targets
def generate_fake_samples(g_model, samples, patch_shape):
# generate fake instance
|
# # extracts path images. Its given a batch of color images with obstacles and paths
# # implement a thresholding method to extract only the path i.e. remove the obstacles
# def extract_path_image(imgs, im_size = 128):
# size = imgs.shape[0]
# print("size is : ", size)
# for i in range(size):
# im = imgs[i]
# for j in range(im_size):
# for k in range(im_size):
# pixel = im[j][k]
# # remove the obstacles
# if(pixel[1]>80 and pixel[0]<40 and pixel[2]<40):
# im[j][k] = [0,0,0,255]
# return imgs
# input is a set of color images with obstacles and paths. It removed the paths and outputs the set of images with
# only the obstacles
def remove_paths(imgs, im_size = 128):
size = imgs.shape[0]
for i in range(size):
im = imgs[i]
for j in range(im_size):
for k in range(im_size):
pixel = im[j][k]
# remove the white paths
if((abs((pixel[0]-pixel[1])/2)<10 and abs((pixel[1]-pixel[2])/2)<10) or (pixel[0]>=100 and pixel[1]>=100 and pixel[2]>=100)):
im[j][k] = [0,0,0,255] # convert white pixels to black
# remove the blue paths
elif((pixel[2]>=80 and pixel[0]<pixel[2]-20 and pixel[1]<pixel[2]-20 and pixel[0]<80 and pixel[1]<80) or (pixel[0]<40 and pixel[1]<40 and pixel[2]<80)):
im[j][k] = [0,0,0,255]
return imgs
# train pix2pix models
def train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs=100, n_batch=1, n_patch=8):
# calculate the number of batches per training epoch
trainA, trainB = dataset
bat_per_epo = int(len(trainA) / n_batch)
# calculate the number of training iterations
n_steps = bat_per_epo * n_epochs
# manually enumerate epochs
generator_loss = []
discriminator_loss = []
discriminator_loss_real = []
discriminator_loss_fake = []
for i in range(n_steps):
# select a batch of real samples
[real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real = generate_real_samples(dataset, n_batch, n_patch)
# generate a batch of fake samples
fake_image_paths_n_obsta, label_fake = generate_fake_samples(g_model, real_image_obsta_batch, n_patch)
# update discriminator for real samples
d_loss1 = d_model.train_on_batch([real_image_obsta_batch, real_image_paths_n_obsta_batch], label_real)
# update discriminator for generated samples
d_loss2 = d_model.train_on_batch([real_image_obsta_batch, fake_image_paths_n_obsta], label_fake)
# update the generator
g_loss, _, _ = gan_model.train_on_batch(real_image_obsta_batch, [label_real, real_image_paths_n_obsta_batch])
# store the images that the generator generates after each epoch
if(i % bat_per_epo == 0):
[real_image_obsta_sample, real_image_paths_n_obsta_sample], label_real = generate_real_samples(dataset, 1, n_patch)
generated_image = g_model.predict(real_image_obsta_sample)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_paths_n_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_paths.pdf")
fig2 = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig2.add_gridspec(1,2)
ax[0] = fig2.add_subplot(gs[0, 0])
ax[1] = fig2.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(real_image_obsta_sample,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Original Image', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(save_path +'/Epoch_'+ str(int(i/bat_per_epo))+"_obst.pdf")
discriminator_loss_real.append(d_loss1)
discriminator_loss_fake.append(d_loss2)
generator_loss.append(g_loss)
discriminator_loss.append(d_loss1+d_loss2)
print(i)
# save the plots for loss etc
x = np.linspace(0, n_steps, n_steps)
plt.figure()
plt.plot(x, discriminator_loss, color = 'blue')
plt.ylabel('Discriminator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator','generator'))
plt.savefig(save_path+'/loss_discriminator.pdf')
plt.figure()
plt.plot(x, generator_loss, color = 'orange')
plt.ylabel('Generator Loss')
plt.xlabel('Number of iterations')
# plt.show()
# plt.legend('upper right')
# plt.gca().legend(('discriminator loss for fake images','discriminator loss for real images'))
plt.savefig(save_path+'/loss_generator.pdf')
writer = pd.ExcelWriter(save_path+'/loss.xlsx', engine='xlsxwriter')
df1 = DataFrame({'Generator Loss': generator_loss, 'Discriminator Loss': discriminator_loss, 'Discriminator Loss for Real Images': discriminator_loss_real, 'Discriminator Loss for Fake Images': discriminator_loss_fake})
df1.to_excel(writer, sheet_name='sheet1', index=False)
writer.save()
# Saving the Gnerator Model and weights since that is the only one necessary
model_json = g_model.to_json()
with open(save_path+'/Generator_model_tex.json', "w") as json_file:
json_file.write(model_json)
g_model.save_weights(save_path+'/Generator_model_weights_tex.h5')
def load_model_and_check(load_path, test_data):
json_file = open(load_path+'/Generator_model_tex.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
print('Model loaded')
loaded_model.load_weights(load_path+'/Generator_model_weights_tex.h5')
for i in range(test_data.shape[0]):
rand_im = test_data[i]
rand_im = rand_im[np.newaxis,:,:,:]
generated_image = loaded_model.predict(rand_im)
mpl.use('pdf')
title_fontsize = 'small'
fig = plt.figure(dpi=300, tight_layout=True)
ax = np.zeros(2, dtype=object)
gs = fig.add_gridspec(1,2)
ax[0] = fig.add_subplot(gs[0, 0])
ax[1] = fig.add_subplot(gs[0, 1])
ax[0].imshow(np.reshape(rand_im,(128, 128, 4)).astype('uint8'))
ax[0].set_title('Test Image as Input', fontsize = title_fontsize)
ax[0].set_xlabel('(a)')
ax[1].imshow(np.reshape(generated_image,(128, 128, 4)))
ax[1].set_title('Image Generated by Generator', fontsize = title_fontsize)
ax[1].set_xlabel('(b)')
for a in ax:
a.set_xticks([])
a.set_yticks([])
plt.savefig(load_path +'/Test_Image_Level4_'+ str(i)+'.pdf')
def load_images(folder, im_size = (128,128), col = 1):
# load color images after resizing them !
im_list = []
for filename in os.listdir(folder):
p = os.path.join(folder, filename)
if p == folder + '/.DS_Store':
continue
# img = mpimg.imread(p)
if(col == 1):
img = Image.open(p).convert('L')
else:
img = Image.open(p)
im_resize = img.resize(im_size, Image.ANTIALIAS)
im_list.append(np.ravel(im_resize)) # flattened the images, we need to reshape them before printing
image_list = np.array(im_list)
return image_list
if __name__ == '__main__':
# define image shape
image_shape = (128,128,4)
image_size = (128,128)
col = 4 # set to 4 for color images and 1 for black and white images
image_tp = 'circuit'
#-------------------------------
ver = 13
lr_discriminator = 0.0001
lr_generator = 0.001
num_epochs = 5
num_batch = 1 # ensure that the batch size dives the number of samples entirely
# base_path = '/home/s3494950/thesis'
base_path = '/Users/swarajdalmia/Desktop/NeuroMorphicComputing/Code'
# load_path = base_path+'/Data/circuitImages/usefulCircuits/withObstacles_withoutNoise'
load_path = base_path+'/Data/circuitImages/usefulCircuits/smallerset_obstacles' # 56 items
# load_path = base_path+'/Data/biggerDataset'
save_path = base_path + '/Results/Trained_final_GANs/pix2pix/circuit_' + str(ver)
#-------------------------------
# images = load_images(load_path, image_size, col)
# images = np.reshape(images, (images.shape[0], image_size[0], image_size[1], col))
# d_model = define_discriminator(image_shape, learning_rate_discriminator=lr_discriminator)
# g_model = define_generator(image_shape)
# gan_model = define_gan(g_model, d_model, image_shape, learning_rate_generator=lr_generator)
# # load image data. [image_obsta, image_paths_n_obsta]
# im = copy.deepcopy(images)
# dataset = [remove_paths(im),images]
# print("removed paths")
# # train model
# train_save(save_path, d_model, g_model, gan_model, dataset, n_epochs = num_epochs, n_batch=num_batch)
p = '/Users/swarajdalmia/Desktop/NeuroMorphicComputing/Code/Data/circuitImages/usefulCircuits/test_obstacles'
testing_data = load_images(p, image_size, col)
testing_data = np.reshape(testing_data, (testing_data.shape[0], image_size[0], image_size[1], col))
load_model_and_check(save_path, testing_data)
| X = g_model.predict(samples)
# create 'fake' class labels (0)
y = np.zeros((len(X), patch_shape, patch_shape, 1))
return X, y | identifier_body |
utils.rs | use crate::{
acc::{AccPublicKey, AccSecretKey},
chain::{block::Height, object::Object, query::query_param::QueryParam, traits::Num},
};
use anyhow::{ensure, Context, Error, Result};
use howlong::ProcessDuration;
use memmap2::Mmap;
use rand::{CryptoRng, RngCore};
use serde::{Deserialize, Serialize};
use snap::{read::FrameDecoder, write::FrameEncoder};
use std::{
collections::{BTreeMap, HashSet},
error::Error as StdError,
fs,
fs::File,
io::{prelude::*, BufReader},
path::{Path, PathBuf},
str::FromStr,
};
use tracing_subscriber::EnvFilter;
#[macro_export]
macro_rules! create_id_type_by_u32 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u32);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU32, Ordering};
static ID_CNT: AtomicU32 = AtomicU32::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
#[macro_export]
macro_rules! create_id_type_by_u16 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u16);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU16, Ordering};
static ID_CNT: AtomicU16 = AtomicU16::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
pub fn load_query_param_from_file(path: &Path) -> Result<Vec<QueryParam<u32>>> {
let data = fs::read_to_string(path)?;
let query_params: Vec<QueryParam<u32>> = serde_json::from_str(&data)?;
Ok(query_params)
}
// input format: block_id sep [ v_data ] sep { w_data }
// sep = \t or space
// v_data = v_1 comma v_2 ...
// w_data = w_1 comma w_2 ...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s| !s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
}
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn | (path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx6 = qp_dag.add_node(QPNode::Union(union.clone()));
qp_dag.add_edge(idx4, idx0, true);
qp_dag.add_edge(idx4, idx1, false);
qp_dag.add_edge(idx5, idx2, true);
qp_dag.add_edge(idx5, idx3, false);
qp_dag.add_edge(idx6, idx4, true);
qp_dag.add_edge(idx6, idx5, false);
let size_original = bincode::serialize(&qp_dag).unwrap().len();
qp_dag.remove_node(idx0);
qp_dag.remove_node(idx1);
qp_dag.remove_node(idx2);
qp_dag.remove_node(idx3);
let size_update = bincode::serialize(&qp_dag).unwrap().len();
println!("before: {}", size_original);
println!("after: {}", size_update);
assert_eq!(1, 1);
}
#[test]
fn test_compress() {
let value = String::from("hello world");
let bin = binary_encode(&value).unwrap();
assert_eq!(binary_decode::<String>(bin.as_ref()).unwrap(), value);
}
#[test]
fn test_acc_size() {
use crate::chain::tests::PUB_KEY;
let set = set! {11, 12, 13, 14, 15, 16, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
let acc = AccValue::from_set(&set, &PUB_KEY);
let acc_size = bincode::serialize(&acc).unwrap().len();
let dig = acc.to_digest();
let dig_size = bincode::serialize(&dig).unwrap().len();
assert_eq!(dig_size, 32);
assert_eq!(acc_size, 416);
}
#[test]
fn test_proof_size() {
use crate::chain::tests::PUB_KEY;
let set1 = set! {11, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30};
let set2 = set! {12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 33, 23, };
let acc1 = AccValue::from_set(&set1, &PUB_KEY);
let acc2 = AccValue::from_set(&set2, &PUB_KEY);
let (_set, _acc, inter_proof) =
compute_set_operation_intermediate(Op::Union, &set1, &acc1, &set2, &acc2, &PUB_KEY);
let (_set, final_proof) = compute_set_operation_final(Op::Union, &set1, &set2, &PUB_KEY);
let inter_size = bincode::serialize(&inter_proof).unwrap().len();
let final_size = bincode::serialize(&final_proof).unwrap().len();
assert_eq!(inter_size, 564);
assert_eq!(final_size, 204);
}
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId(u8);
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId2(u64);
#[test]
fn test_int_size() {
let a: u8 = 1;
let b: u32 = 1;
let c: u64 = 1;
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
let c_size = bincode::serialize(&c).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 4);
assert_eq!(c_size, 8);
let a = TestId(1);
let b = TestId2(1);
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 8);
let c = Some(b);
let d: Option<TestId2> = None;
let c_size = bincode::serialize(&c).unwrap().len();
let d_size = bincode::serialize(&d).unwrap().len();
assert_eq!(c_size, 9);
assert_eq!(d_size, 1);
}
#[test]
fn test_str_size() {
let a: smol_str::SmolStr = smol_str::SmolStr::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a: String = String::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a = String::from("53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
let a = smol_str::SmolStr::from(
"53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78",
);
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
}
}
| pk_path | identifier_name |
utils.rs | use crate::{
acc::{AccPublicKey, AccSecretKey},
chain::{block::Height, object::Object, query::query_param::QueryParam, traits::Num},
};
use anyhow::{ensure, Context, Error, Result};
use howlong::ProcessDuration;
use memmap2::Mmap;
use rand::{CryptoRng, RngCore};
use serde::{Deserialize, Serialize};
use snap::{read::FrameDecoder, write::FrameEncoder};
use std::{
collections::{BTreeMap, HashSet},
error::Error as StdError,
fs,
fs::File,
io::{prelude::*, BufReader},
path::{Path, PathBuf},
str::FromStr,
};
use tracing_subscriber::EnvFilter;
#[macro_export]
macro_rules! create_id_type_by_u32 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u32);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU32, Ordering};
static ID_CNT: AtomicU32 = AtomicU32::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
#[macro_export]
macro_rules! create_id_type_by_u16 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u16);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU16, Ordering}; | }
}
};
}
pub fn load_query_param_from_file(path: &Path) -> Result<Vec<QueryParam<u32>>> {
let data = fs::read_to_string(path)?;
let query_params: Vec<QueryParam<u32>> = serde_json::from_str(&data)?;
Ok(query_params)
}
// input format: block_id sep [ v_data ] sep { w_data }
// sep = \t or space
// v_data = v_1 comma v_2 ...
// w_data = w_1 comma w_2 ...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s| !s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
}
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn pk_path(path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx6 = qp_dag.add_node(QPNode::Union(union.clone()));
qp_dag.add_edge(idx4, idx0, true);
qp_dag.add_edge(idx4, idx1, false);
qp_dag.add_edge(idx5, idx2, true);
qp_dag.add_edge(idx5, idx3, false);
qp_dag.add_edge(idx6, idx4, true);
qp_dag.add_edge(idx6, idx5, false);
let size_original = bincode::serialize(&qp_dag).unwrap().len();
qp_dag.remove_node(idx0);
qp_dag.remove_node(idx1);
qp_dag.remove_node(idx2);
qp_dag.remove_node(idx3);
let size_update = bincode::serialize(&qp_dag).unwrap().len();
println!("before: {}", size_original);
println!("after: {}", size_update);
assert_eq!(1, 1);
}
#[test]
fn test_compress() {
let value = String::from("hello world");
let bin = binary_encode(&value).unwrap();
assert_eq!(binary_decode::<String>(bin.as_ref()).unwrap(), value);
}
#[test]
fn test_acc_size() {
use crate::chain::tests::PUB_KEY;
let set = set! {11, 12, 13, 14, 15, 16, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
let acc = AccValue::from_set(&set, &PUB_KEY);
let acc_size = bincode::serialize(&acc).unwrap().len();
let dig = acc.to_digest();
let dig_size = bincode::serialize(&dig).unwrap().len();
assert_eq!(dig_size, 32);
assert_eq!(acc_size, 416);
}
#[test]
fn test_proof_size() {
use crate::chain::tests::PUB_KEY;
let set1 = set! {11, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30};
let set2 = set! {12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 33, 23, };
let acc1 = AccValue::from_set(&set1, &PUB_KEY);
let acc2 = AccValue::from_set(&set2, &PUB_KEY);
let (_set, _acc, inter_proof) =
compute_set_operation_intermediate(Op::Union, &set1, &acc1, &set2, &acc2, &PUB_KEY);
let (_set, final_proof) = compute_set_operation_final(Op::Union, &set1, &set2, &PUB_KEY);
let inter_size = bincode::serialize(&inter_proof).unwrap().len();
let final_size = bincode::serialize(&final_proof).unwrap().len();
assert_eq!(inter_size, 564);
assert_eq!(final_size, 204);
}
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId(u8);
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId2(u64);
#[test]
fn test_int_size() {
let a: u8 = 1;
let b: u32 = 1;
let c: u64 = 1;
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
let c_size = bincode::serialize(&c).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 4);
assert_eq!(c_size, 8);
let a = TestId(1);
let b = TestId2(1);
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 8);
let c = Some(b);
let d: Option<TestId2> = None;
let c_size = bincode::serialize(&c).unwrap().len();
let d_size = bincode::serialize(&d).unwrap().len();
assert_eq!(c_size, 9);
assert_eq!(d_size, 1);
}
#[test]
fn test_str_size() {
let a: smol_str::SmolStr = smol_str::SmolStr::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a: String = String::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a = String::from("53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
let a = smol_str::SmolStr::from(
"53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78",
);
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
}
} | static ID_CNT: AtomicU16 = AtomicU16::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst)) | random_line_split |
utils.rs | use crate::{
acc::{AccPublicKey, AccSecretKey},
chain::{block::Height, object::Object, query::query_param::QueryParam, traits::Num},
};
use anyhow::{ensure, Context, Error, Result};
use howlong::ProcessDuration;
use memmap2::Mmap;
use rand::{CryptoRng, RngCore};
use serde::{Deserialize, Serialize};
use snap::{read::FrameDecoder, write::FrameEncoder};
use std::{
collections::{BTreeMap, HashSet},
error::Error as StdError,
fs,
fs::File,
io::{prelude::*, BufReader},
path::{Path, PathBuf},
str::FromStr,
};
use tracing_subscriber::EnvFilter;
#[macro_export]
macro_rules! create_id_type_by_u32 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u32);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU32, Ordering};
static ID_CNT: AtomicU32 = AtomicU32::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
#[macro_export]
macro_rules! create_id_type_by_u16 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u16);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU16, Ordering};
static ID_CNT: AtomicU16 = AtomicU16::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
pub fn load_query_param_from_file(path: &Path) -> Result<Vec<QueryParam<u32>>> {
let data = fs::read_to_string(path)?;
let query_params: Vec<QueryParam<u32>> = serde_json::from_str(&data)?;
Ok(query_params)
}
// input format: block_id sep [ v_data ] sep { w_data }
// sep = \t or space
// v_data = v_1 comma v_2 ...
// w_data = w_1 comma w_2 ...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() {
continue;
}
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s| !s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> |
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn pk_path(path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx6 = qp_dag.add_node(QPNode::Union(union.clone()));
qp_dag.add_edge(idx4, idx0, true);
qp_dag.add_edge(idx4, idx1, false);
qp_dag.add_edge(idx5, idx2, true);
qp_dag.add_edge(idx5, idx3, false);
qp_dag.add_edge(idx6, idx4, true);
qp_dag.add_edge(idx6, idx5, false);
let size_original = bincode::serialize(&qp_dag).unwrap().len();
qp_dag.remove_node(idx0);
qp_dag.remove_node(idx1);
qp_dag.remove_node(idx2);
qp_dag.remove_node(idx3);
let size_update = bincode::serialize(&qp_dag).unwrap().len();
println!("before: {}", size_original);
println!("after: {}", size_update);
assert_eq!(1, 1);
}
#[test]
fn test_compress() {
let value = String::from("hello world");
let bin = binary_encode(&value).unwrap();
assert_eq!(binary_decode::<String>(bin.as_ref()).unwrap(), value);
}
#[test]
fn test_acc_size() {
use crate::chain::tests::PUB_KEY;
let set = set! {11, 12, 13, 14, 15, 16, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
let acc = AccValue::from_set(&set, &PUB_KEY);
let acc_size = bincode::serialize(&acc).unwrap().len();
let dig = acc.to_digest();
let dig_size = bincode::serialize(&dig).unwrap().len();
assert_eq!(dig_size, 32);
assert_eq!(acc_size, 416);
}
#[test]
fn test_proof_size() {
use crate::chain::tests::PUB_KEY;
let set1 = set! {11, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30};
let set2 = set! {12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 33, 23, };
let acc1 = AccValue::from_set(&set1, &PUB_KEY);
let acc2 = AccValue::from_set(&set2, &PUB_KEY);
let (_set, _acc, inter_proof) =
compute_set_operation_intermediate(Op::Union, &set1, &acc1, &set2, &acc2, &PUB_KEY);
let (_set, final_proof) = compute_set_operation_final(Op::Union, &set1, &set2, &PUB_KEY);
let inter_size = bincode::serialize(&inter_proof).unwrap().len();
let final_size = bincode::serialize(&final_proof).unwrap().len();
assert_eq!(inter_size, 564);
assert_eq!(final_size, 204);
}
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId(u8);
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId2(u64);
#[test]
fn test_int_size() {
let a: u8 = 1;
let b: u32 = 1;
let c: u64 = 1;
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
let c_size = bincode::serialize(&c).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 4);
assert_eq!(c_size, 8);
let a = TestId(1);
let b = TestId2(1);
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 8);
let c = Some(b);
let d: Option<TestId2> = None;
let c_size = bincode::serialize(&c).unwrap().len();
let d_size = bincode::serialize(&d).unwrap().len();
assert_eq!(c_size, 9);
assert_eq!(d_size, 1);
}
#[test]
fn test_str_size() {
let a: smol_str::SmolStr = smol_str::SmolStr::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a: String = String::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a = String::from("53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
let a = smol_str::SmolStr::from(
"53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78",
);
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
}
}
| {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
} | identifier_body |
utils.rs | use crate::{
acc::{AccPublicKey, AccSecretKey},
chain::{block::Height, object::Object, query::query_param::QueryParam, traits::Num},
};
use anyhow::{ensure, Context, Error, Result};
use howlong::ProcessDuration;
use memmap2::Mmap;
use rand::{CryptoRng, RngCore};
use serde::{Deserialize, Serialize};
use snap::{read::FrameDecoder, write::FrameEncoder};
use std::{
collections::{BTreeMap, HashSet},
error::Error as StdError,
fs,
fs::File,
io::{prelude::*, BufReader},
path::{Path, PathBuf},
str::FromStr,
};
use tracing_subscriber::EnvFilter;
#[macro_export]
macro_rules! create_id_type_by_u32 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u32);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU32, Ordering};
static ID_CNT: AtomicU32 = AtomicU32::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
#[macro_export]
macro_rules! create_id_type_by_u16 {
($name: ident) => {
#[derive(
Debug,
Default,
Copy,
Clone,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
serde::Serialize,
serde::Deserialize,
derive_more::Deref,
derive_more::DerefMut,
derive_more::Display,
derive_more::From,
derive_more::Into,
)]
pub struct $name(pub u16);
impl $name {
pub fn next_id() -> Self {
use core::sync::atomic::{AtomicU16, Ordering};
static ID_CNT: AtomicU16 = AtomicU16::new(0);
Self(ID_CNT.fetch_add(1, Ordering::SeqCst))
}
}
};
}
pub fn load_query_param_from_file(path: &Path) -> Result<Vec<QueryParam<u32>>> {
let data = fs::read_to_string(path)?;
let query_params: Vec<QueryParam<u32>> = serde_json::from_str(&data)?;
Ok(query_params)
}
// input format: block_id sep [ v_data ] sep { w_data }
// sep = \t or space
// v_data = v_1 comma v_2 ...
// w_data = w_1 comma w_2 ...
pub fn load_raw_obj_from_file<K, ParseErr>(path: &Path) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut reader = BufReader::new(File::open(path)?);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
load_raw_obj_from_str(&buf)
}
pub fn load_raw_obj_from_str<K, ParseErr>(input: &str) -> Result<BTreeMap<Height, Vec<Object<K>>>>
where
K: Num + FromStr<Err = ParseErr>,
ParseErr: StdError + Sync + Send + 'static,
{
let mut res = BTreeMap::new();
for line in input.lines() {
let line = line.trim();
if line.is_empty() |
let mut split_str = line.splitn(3, |c| c == '[' || c == ']');
let blk_height: Height = Height(
split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.parse()?,
);
let v_data: Vec<K> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.parse::<K>().map_err(Error::from))
.collect::<Result<_>>()?;
let w_data: HashSet<String> = split_str
.next()
.with_context(|| format!("failed to parse line {}", line))?
.trim()
.replace('{', "")
.replace('}', "")
.split(',')
.map(|s| s.trim().to_owned())
.filter(|s| !s.is_empty())
.collect();
let raw_obj = Object::new(blk_height, v_data, w_data);
res.entry(blk_height).or_insert_with(Vec::new).push(raw_obj);
}
Ok(res)
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct KeyPair {
sk: AccSecretKey,
pub pk: AccPublicKey,
}
impl KeyPair {
pub fn gen(q: u64, mut rng: impl RngCore + CryptoRng) -> Self {
let sk = AccSecretKey::rand(&mut rng);
let sk_with_pow = sk.into();
let pk = AccPublicKey::gen_key(&sk_with_pow, q);
Self { sk, pk }
}
pub fn save(&self, path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
ensure!(!path.exists(), "{} already exists.", path.display());
fs::create_dir_all(&path)?;
let sk_f = File::create(&Self::sk_path(path))?;
bincode::serialize_into(sk_f, &self.sk)?;
let pk_f = File::create(&Self::pk_path(path))?;
bincode::serialize_into(pk_f, &self.pk)?;
Ok(())
}
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let path = path.as_ref();
let sk_file = File::open(Self::sk_path(path))?;
let sk_reader = BufReader::new(sk_file);
let sk: AccSecretKey = bincode::deserialize_from(sk_reader)?;
let pk_file = File::open(Self::pk_path(path))?;
let pk_data = unsafe { Mmap::map(&pk_file) }?;
let pk: AccPublicKey = bincode::deserialize(&pk_data[..])?;
Ok(Self { sk, pk })
}
fn sk_path(path: &Path) -> PathBuf {
path.join("sk")
}
fn pk_path(path: &Path) -> PathBuf {
path.join("pk")
}
}
pub fn init_tracing_subscriber(directives: &str) -> Result<()> {
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(directives));
tracing_subscriber::fmt()
.with_env_filter(filter)
.try_init()
.map_err(Error::msg)
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTime {
pub(crate) stage1: Time,
pub(crate) stage2: Time,
pub(crate) stage3: Time,
pub(crate) stage4: Time,
pub(crate) total: Time,
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct Time {
real: u64,
user: u64,
sys: u64,
}
impl From<ProcessDuration> for Time {
fn from(p_duration: ProcessDuration) -> Self {
Self {
real: p_duration.real.as_micros() as u64,
user: p_duration.user.as_micros() as u64,
sys: p_duration.system.as_micros() as u64,
}
}
}
pub fn binary_encode<T: Serialize>(value: &T) -> Result<Vec<u8>> {
let mut encoder = FrameEncoder::new(Vec::new());
bincode::serialize_into(&mut encoder, value).map_err(Error::msg)?;
Ok(encoder.into_inner()?)
}
pub fn binary_decode<T: for<'de> Deserialize<'de>>(bytes: &[u8]) -> Result<T> {
let decoder = FrameDecoder::new(bytes);
bincode::deserialize_from(decoder).map_err(Error::msg)
}
#[cfg(test)]
mod tests {
use super::KeyPair;
use crate::{
acc::{compute_set_operation_final, compute_set_operation_intermediate, AccValue, Op},
chain::{
block::Height,
object::Object,
query::query_plan::{QPKeywordNode, QPNode, QPUnion},
},
digest::Digestible,
set,
utils::{binary_decode, binary_encode, load_raw_obj_from_str},
};
use petgraph::Graph;
use std::collections::BTreeMap;
#[test]
fn test_create_id() {
create_id_type_by_u32!(TestId);
assert_eq!(TestId::next_id(), TestId(0));
assert_eq!(TestId::next_id(), TestId(1));
assert_eq!(TestId::next_id(), TestId(2));
}
#[test]
fn test_load_raw_obj() {
let input = "1\t[1,2]\t{a,b}\n2 [ 3, 4 ] { c, d, }\n2\t[ 5, 6 ]\t { e }\n";
let expect = {
let mut exp: BTreeMap<Height, Vec<Object<u32>>> = BTreeMap::new();
exp.insert(
Height(1),
vec![Object {
blk_height: Height(1),
num_data: vec![1, 2],
keyword_data: ["a".to_owned(), "b".to_owned()].iter().cloned().collect(),
}],
);
exp.insert(
Height(2),
vec![
Object {
blk_height: Height(2),
num_data: vec![3, 4],
keyword_data: ["c".to_owned(), "d".to_owned()].iter().cloned().collect(),
},
Object {
blk_height: Height(2),
num_data: vec![5, 6],
keyword_data: ["e".to_owned()].iter().cloned().collect(),
},
],
);
exp
};
assert_eq!(load_raw_obj_from_str(&input).unwrap(), expect);
}
#[test]
fn test_maintain_key() {
let dir = tempfile::tempdir().unwrap();
let path = dir.path().join("key");
let q: u64 = 10;
let rng = rand::thread_rng();
let key_pair = KeyPair::gen(q, rng);
key_pair.save(path.clone()).unwrap();
let read_key_pair = KeyPair::load(&path).unwrap();
assert_eq!(key_pair, read_key_pair);
}
#[test]
fn test_petgraph_serialize() {
let k1 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k2 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k3 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let k4 = QPKeywordNode {
blk_height: Height(0),
set: None,
};
let union = QPUnion { set: None };
let mut qp_dag = Graph::<QPNode<u32>, bool>::new();
let idx0 = qp_dag.add_node(QPNode::Keyword(Box::new(k1.clone())));
let idx1 = qp_dag.add_node(QPNode::Keyword(Box::new(k2.clone())));
let idx2 = qp_dag.add_node(QPNode::Keyword(Box::new(k3.clone())));
let idx3 = qp_dag.add_node(QPNode::Keyword(Box::new(k4.clone())));
let idx4 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx5 = qp_dag.add_node(QPNode::Union(union.clone()));
let idx6 = qp_dag.add_node(QPNode::Union(union.clone()));
qp_dag.add_edge(idx4, idx0, true);
qp_dag.add_edge(idx4, idx1, false);
qp_dag.add_edge(idx5, idx2, true);
qp_dag.add_edge(idx5, idx3, false);
qp_dag.add_edge(idx6, idx4, true);
qp_dag.add_edge(idx6, idx5, false);
let size_original = bincode::serialize(&qp_dag).unwrap().len();
qp_dag.remove_node(idx0);
qp_dag.remove_node(idx1);
qp_dag.remove_node(idx2);
qp_dag.remove_node(idx3);
let size_update = bincode::serialize(&qp_dag).unwrap().len();
println!("before: {}", size_original);
println!("after: {}", size_update);
assert_eq!(1, 1);
}
#[test]
fn test_compress() {
let value = String::from("hello world");
let bin = binary_encode(&value).unwrap();
assert_eq!(binary_decode::<String>(bin.as_ref()).unwrap(), value);
}
#[test]
fn test_acc_size() {
use crate::chain::tests::PUB_KEY;
let set = set! {11, 12, 13, 14, 15, 16, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39};
let acc = AccValue::from_set(&set, &PUB_KEY);
let acc_size = bincode::serialize(&acc).unwrap().len();
let dig = acc.to_digest();
let dig_size = bincode::serialize(&dig).unwrap().len();
assert_eq!(dig_size, 32);
assert_eq!(acc_size, 416);
}
#[test]
fn test_proof_size() {
use crate::chain::tests::PUB_KEY;
let set1 = set! {11, 17, 19, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30};
let set2 = set! {12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 33, 23, };
let acc1 = AccValue::from_set(&set1, &PUB_KEY);
let acc2 = AccValue::from_set(&set2, &PUB_KEY);
let (_set, _acc, inter_proof) =
compute_set_operation_intermediate(Op::Union, &set1, &acc1, &set2, &acc2, &PUB_KEY);
let (_set, final_proof) = compute_set_operation_final(Op::Union, &set1, &set2, &PUB_KEY);
let inter_size = bincode::serialize(&inter_proof).unwrap().len();
let final_size = bincode::serialize(&final_proof).unwrap().len();
assert_eq!(inter_size, 564);
assert_eq!(final_size, 204);
}
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId(u8);
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
struct TestId2(u64);
#[test]
fn test_int_size() {
let a: u8 = 1;
let b: u32 = 1;
let c: u64 = 1;
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
let c_size = bincode::serialize(&c).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 4);
assert_eq!(c_size, 8);
let a = TestId(1);
let b = TestId2(1);
let a_size = bincode::serialize(&a).unwrap().len();
let b_size = bincode::serialize(&b).unwrap().len();
assert_eq!(a_size, 1);
assert_eq!(b_size, 8);
let c = Some(b);
let d: Option<TestId2> = None;
let c_size = bincode::serialize(&c).unwrap().len();
let d_size = bincode::serialize(&d).unwrap().len();
assert_eq!(c_size, 9);
assert_eq!(d_size, 1);
}
#[test]
fn test_str_size() {
let a: smol_str::SmolStr = smol_str::SmolStr::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a: String = String::from("");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 8);
let a = String::from("53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78");
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
let a = smol_str::SmolStr::from(
"53c79113311e8a8ec291d412d1572516d0356a5c3aced0b108e0ad04c440de78",
);
let str_size = bincode::serialize(&a).unwrap().len();
assert_eq!(str_size, 72);
}
}
| {
continue;
} | conditional_block |
huffman.rs | use std::{cmp, io, usize};
use bitstream::BitRead;
use error::{Error, Result};
use util::{self, Bits};
#[derive(Debug)]
pub struct HuffmanDecoder {
lookup_table: LookupTable,
long_codes: Box<[LongCode]>,
max_code_len: usize,
}
impl HuffmanDecoder {
pub fn builder(lookup_table_bits: usize) -> HuffmanDecoderBuilder {
assert!(lookup_table_bits > 0 && lookup_table_bits < 32);
let lookup_table_len = if lookup_table_bits == 0 {
0
} else {
1 << lookup_table_bits
};
let lookup_entries = vec![LookupEntry::Null; lookup_table_len];
let long_codes = Vec::new();
HuffmanDecoderBuilder {
lookup_table: LookupTable {
entries: lookup_entries.into_boxed_slice(),
len_bits: lookup_table_bits,
},
long_codes: long_codes,
cur_codes: [None; 31],
max_code_len: 0,
}
}
pub fn decode<R: BitRead>(&self, reader: &mut R) -> Result<u32> {
let lookup_len_bits = cmp::min(self.max_code_len, self.lookup_table.len_bits);
let (mut code_bits, mut read) = try!(reader.try_read_u32_bits(lookup_len_bits));
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected EOF while reading Huffman code")));
}
let entry = &self.lookup_table.entries[code_bits as usize];
let code = match entry {
&LookupEntry::Code(code) => code,
&LookupEntry::LongCode => {
let r = try!(reader.try_read_u32_bits(self.max_code_len - lookup_len_bits));
read += r.1;
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
code_bits |= r.0 << lookup_len_bits;
try!(self.find_long_code(code_bits, read))
},
&LookupEntry::Null => return Err(Error::Undecodable("Matched a null Huffman code entry")),
};
if code.len < read {
let unread_len = read - code.len;
let unread_bits = code_bits >> code.len;
reader.unread_u32_bits(unread_bits, unread_len);
} else if code.len > read {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct HuffmanDecoderBuilder {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> |
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) & !util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos != 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0000001, 0b00000001, 0b0000000001000, 0b000000001, 0b000001, 0b0000100, 0b00000000011, 0b0000101000, 0b00001011, 0b00001100, 0b000000000101, 0b00000000010010000, 0b00000000010010001, 0b00000000010010010, 0b00000000010010011, 0b0000111, 0b00010, 0b00011, 0b000010101, 0b001000, 0b0011, 0b0100, 0b00001101, 0b00100100, 0b00101, 0b01010, 0b00100101, 0b0000000001001010, 0b00000000010011, 0b0000101001000, 0b0000000001001011, 0b0010011, 0b01011, 0b01100, 0b0110100, 0b011011, 0b100, 0b101, 0b01110, 0b01101010, 0b01111]);
}
#[test]
fn overspecified() {
let mut b = HuffmanDecoder::builder(1);
b.next_code(1).unwrap();
b.next_code(1).unwrap();
assert_eq!(b.next_code(1).err().unwrap().kind(), ErrorKind::Undecodable);
}
fn test_decode(code_lens: &[usize], input: &str, expected: &[u32]) {
let max_code_len = *code_lens.iter().max().unwrap();
// Without long codes.
test_decode_(max_code_len, code_lens, input, expected);
// With long codes.
if max_code_len > 1 {
test_decode_(cmp::max(max_code_len as isize - 4, 1) as usize, code_lens, input, expected);
}
}
fn test_decode_(lookup_table_bits: usize, code_lens: &[usize], input: &str, expected: &[u32]) {
let mut b = HuffmanDecoder::builder(lookup_table_bits);
for (i, &code_len) in code_lens.iter().enumerate() {
b.create_code(i as u32, code_len).unwrap();
}
let d = b.build();
let mut reader = new_bit_reader(input);
for exp in expected {
assert_eq!(d.decode(&mut reader).unwrap(), *exp);
}
}
#[test]
fn decode_1() {
/*
0 2 codeword 00
1 4 codeword 0100
2 4 codeword 0101
3 4 codeword 0110
4 4 codeword 0111
5 2 codeword 10
6 3 codeword 110
7 3 codeword 111 */
test_decode(&[2, 4, 4, 4, 4, 2, 3, 3],
"00 111 0111 0110 110 110 111",
&[0, 7, 4, 3, 6, 6, 7]);
}
#[test]
fn decode_2() {
test_decode(&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
"001000 0000000001001011 100 000001 0000000000 01111 00010 unused: 011011",
&[20, 31, 37, 5, 0, 41, 17]);
}
} | {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if !self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true
};
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
} | identifier_body |
huffman.rs | use std::{cmp, io, usize};
use bitstream::BitRead;
use error::{Error, Result};
use util::{self, Bits};
#[derive(Debug)]
pub struct HuffmanDecoder {
lookup_table: LookupTable,
long_codes: Box<[LongCode]>,
max_code_len: usize,
}
impl HuffmanDecoder {
pub fn builder(lookup_table_bits: usize) -> HuffmanDecoderBuilder {
assert!(lookup_table_bits > 0 && lookup_table_bits < 32);
let lookup_table_len = if lookup_table_bits == 0 {
0
} else {
1 << lookup_table_bits
};
let lookup_entries = vec![LookupEntry::Null; lookup_table_len];
let long_codes = Vec::new();
HuffmanDecoderBuilder {
lookup_table: LookupTable {
entries: lookup_entries.into_boxed_slice(),
len_bits: lookup_table_bits,
},
long_codes: long_codes,
cur_codes: [None; 31],
max_code_len: 0,
}
}
pub fn decode<R: BitRead>(&self, reader: &mut R) -> Result<u32> {
let lookup_len_bits = cmp::min(self.max_code_len, self.lookup_table.len_bits);
let (mut code_bits, mut read) = try!(reader.try_read_u32_bits(lookup_len_bits));
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected EOF while reading Huffman code")));
}
let entry = &self.lookup_table.entries[code_bits as usize];
let code = match entry {
&LookupEntry::Code(code) => code,
&LookupEntry::LongCode => {
let r = try!(reader.try_read_u32_bits(self.max_code_len - lookup_len_bits));
read += r.1;
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
code_bits |= r.0 << lookup_len_bits;
try!(self.find_long_code(code_bits, read))
},
&LookupEntry::Null => return Err(Error::Undecodable("Matched a null Huffman code entry")),
};
if code.len < read {
let unread_len = read - code.len;
let unread_bits = code_bits >> code.len;
reader.unread_u32_bits(unread_bits, unread_len);
} else if code.len > read {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct HuffmanDecoderBuilder {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if !self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true
};
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) & !util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos != 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0000001, 0b00000001, 0b0000000001000, 0b000000001, 0b000001, 0b0000100, 0b00000000011, 0b0000101000, 0b00001011, 0b00001100, 0b000000000101, 0b00000000010010000, 0b00000000010010001, 0b00000000010010010, 0b00000000010010011, 0b0000111, 0b00010, 0b00011, 0b000010101, 0b001000, 0b0011, 0b0100, 0b00001101, 0b00100100, 0b00101, 0b01010, 0b00100101, 0b0000000001001010, 0b00000000010011, 0b0000101001000, 0b0000000001001011, 0b0010011, 0b01011, 0b01100, 0b0110100, 0b011011, 0b100, 0b101, 0b01110, 0b01101010, 0b01111]);
}
#[test]
fn overspecified() {
let mut b = HuffmanDecoder::builder(1);
b.next_code(1).unwrap();
b.next_code(1).unwrap();
assert_eq!(b.next_code(1).err().unwrap().kind(), ErrorKind::Undecodable);
}
fn test_decode(code_lens: &[usize], input: &str, expected: &[u32]) {
let max_code_len = *code_lens.iter().max().unwrap();
// Without long codes.
test_decode_(max_code_len, code_lens, input, expected);
// With long codes.
if max_code_len > 1 {
test_decode_(cmp::max(max_code_len as isize - 4, 1) as usize, code_lens, input, expected);
}
}
fn test_decode_(lookup_table_bits: usize, code_lens: &[usize], input: &str, expected: &[u32]) {
let mut b = HuffmanDecoder::builder(lookup_table_bits);
for (i, &code_len) in code_lens.iter().enumerate() {
b.create_code(i as u32, code_len).unwrap();
}
let d = b.build();
let mut reader = new_bit_reader(input);
for exp in expected {
assert_eq!(d.decode(&mut reader).unwrap(), *exp);
}
}
#[test]
fn decode_1() {
/*
0 2 codeword 00
1 4 codeword 0100
2 4 codeword 0101
3 4 codeword 0110
4 4 codeword 0111
5 2 codeword 10
6 3 codeword 110
7 3 codeword 111 */
test_decode(&[2, 4, 4, 4, 4, 2, 3, 3],
"00 111 0111 0110 110 110 111",
&[0, 7, 4, 3, 6, 6, 7]);
}
#[test]
fn decode_2() {
test_decode(&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
"001000 0000000001001011 100 000001 0000000000 01111 00010 unused: 011011",
&[20, 31, 37, 5, 0, 41, 17]);
} | } | random_line_split | |
huffman.rs | use std::{cmp, io, usize};
use bitstream::BitRead;
use error::{Error, Result};
use util::{self, Bits};
#[derive(Debug)]
pub struct HuffmanDecoder {
lookup_table: LookupTable,
long_codes: Box<[LongCode]>,
max_code_len: usize,
}
impl HuffmanDecoder {
pub fn builder(lookup_table_bits: usize) -> HuffmanDecoderBuilder {
assert!(lookup_table_bits > 0 && lookup_table_bits < 32);
let lookup_table_len = if lookup_table_bits == 0 {
0
} else {
1 << lookup_table_bits
};
let lookup_entries = vec![LookupEntry::Null; lookup_table_len];
let long_codes = Vec::new();
HuffmanDecoderBuilder {
lookup_table: LookupTable {
entries: lookup_entries.into_boxed_slice(),
len_bits: lookup_table_bits,
},
long_codes: long_codes,
cur_codes: [None; 31],
max_code_len: 0,
}
}
pub fn decode<R: BitRead>(&self, reader: &mut R) -> Result<u32> {
let lookup_len_bits = cmp::min(self.max_code_len, self.lookup_table.len_bits);
let (mut code_bits, mut read) = try!(reader.try_read_u32_bits(lookup_len_bits));
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected EOF while reading Huffman code")));
}
let entry = &self.lookup_table.entries[code_bits as usize];
let code = match entry {
&LookupEntry::Code(code) => code,
&LookupEntry::LongCode => {
let r = try!(reader.try_read_u32_bits(self.max_code_len - lookup_len_bits));
read += r.1;
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
code_bits |= r.0 << lookup_len_bits;
try!(self.find_long_code(code_bits, read))
},
&LookupEntry::Null => return Err(Error::Undecodable("Matched a null Huffman code entry")),
};
if code.len < read {
let unread_len = read - code.len;
let unread_bits = code_bits >> code.len;
reader.unread_u32_bits(unread_bits, unread_len);
} else if code.len > read {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct | {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if !self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true
};
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) & !util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos != 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0000001, 0b00000001, 0b0000000001000, 0b000000001, 0b000001, 0b0000100, 0b00000000011, 0b0000101000, 0b00001011, 0b00001100, 0b000000000101, 0b00000000010010000, 0b00000000010010001, 0b00000000010010010, 0b00000000010010011, 0b0000111, 0b00010, 0b00011, 0b000010101, 0b001000, 0b0011, 0b0100, 0b00001101, 0b00100100, 0b00101, 0b01010, 0b00100101, 0b0000000001001010, 0b00000000010011, 0b0000101001000, 0b0000000001001011, 0b0010011, 0b01011, 0b01100, 0b0110100, 0b011011, 0b100, 0b101, 0b01110, 0b01101010, 0b01111]);
}
#[test]
fn overspecified() {
let mut b = HuffmanDecoder::builder(1);
b.next_code(1).unwrap();
b.next_code(1).unwrap();
assert_eq!(b.next_code(1).err().unwrap().kind(), ErrorKind::Undecodable);
}
fn test_decode(code_lens: &[usize], input: &str, expected: &[u32]) {
let max_code_len = *code_lens.iter().max().unwrap();
// Without long codes.
test_decode_(max_code_len, code_lens, input, expected);
// With long codes.
if max_code_len > 1 {
test_decode_(cmp::max(max_code_len as isize - 4, 1) as usize, code_lens, input, expected);
}
}
fn test_decode_(lookup_table_bits: usize, code_lens: &[usize], input: &str, expected: &[u32]) {
let mut b = HuffmanDecoder::builder(lookup_table_bits);
for (i, &code_len) in code_lens.iter().enumerate() {
b.create_code(i as u32, code_len).unwrap();
}
let d = b.build();
let mut reader = new_bit_reader(input);
for exp in expected {
assert_eq!(d.decode(&mut reader).unwrap(), *exp);
}
}
#[test]
fn decode_1() {
/*
0 2 codeword 00
1 4 codeword 0100
2 4 codeword 0101
3 4 codeword 0110
4 4 codeword 0111
5 2 codeword 10
6 3 codeword 110
7 3 codeword 111 */
test_decode(&[2, 4, 4, 4, 4, 2, 3, 3],
"00 111 0111 0110 110 110 111",
&[0, 7, 4, 3, 6, 6, 7]);
}
#[test]
fn decode_2() {
test_decode(&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
"001000 0000000001001011 100 000001 0000000000 01111 00010 unused: 011011",
&[20, 31, 37, 5, 0, 41, 17]);
}
} | HuffmanDecoderBuilder | identifier_name |
huffman.rs | use std::{cmp, io, usize};
use bitstream::BitRead;
use error::{Error, Result};
use util::{self, Bits};
#[derive(Debug)]
pub struct HuffmanDecoder {
lookup_table: LookupTable,
long_codes: Box<[LongCode]>,
max_code_len: usize,
}
impl HuffmanDecoder {
pub fn builder(lookup_table_bits: usize) -> HuffmanDecoderBuilder {
assert!(lookup_table_bits > 0 && lookup_table_bits < 32);
let lookup_table_len = if lookup_table_bits == 0 {
0
} else {
1 << lookup_table_bits
};
let lookup_entries = vec![LookupEntry::Null; lookup_table_len];
let long_codes = Vec::new();
HuffmanDecoderBuilder {
lookup_table: LookupTable {
entries: lookup_entries.into_boxed_slice(),
len_bits: lookup_table_bits,
},
long_codes: long_codes,
cur_codes: [None; 31],
max_code_len: 0,
}
}
pub fn decode<R: BitRead>(&self, reader: &mut R) -> Result<u32> {
let lookup_len_bits = cmp::min(self.max_code_len, self.lookup_table.len_bits);
let (mut code_bits, mut read) = try!(reader.try_read_u32_bits(lookup_len_bits));
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected EOF while reading Huffman code")));
}
let entry = &self.lookup_table.entries[code_bits as usize];
let code = match entry {
&LookupEntry::Code(code) => code,
&LookupEntry::LongCode => {
let r = try!(reader.try_read_u32_bits(self.max_code_len - lookup_len_bits));
read += r.1;
if read == 0 {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
}
code_bits |= r.0 << lookup_len_bits;
try!(self.find_long_code(code_bits, read))
},
&LookupEntry::Null => return Err(Error::Undecodable("Matched a null Huffman code entry")),
};
if code.len < read {
let unread_len = read - code.len;
let unread_bits = code_bits >> code.len;
reader.unread_u32_bits(unread_bits, unread_len);
} else if code.len > read |
Ok(code.value)
}
fn find_long_code(&self, bits: u32, len: usize) -> Result<CodeValue> {
// TODO: Use binary search here.
self.long_codes.iter()
.filter(|lc| lc.len <= len &&
lc.code.ls_bits(lc.len) == bits.ls_bits(lc.len))
.next()
.map(|lc| CodeValue {
value: lc.value,
len: lc.len,
})
.ok_or_else(|| Error::Undecodable("Incomplete or unknown Huffman code"))
}
}
pub struct HuffmanDecoderBuilder {
lookup_table: LookupTable,
long_codes: Vec<LongCode>,
/// Current lowest codes for each code length (length 1 is at index 0).
cur_codes: [Option<u32>; 31],
max_code_len: usize,
}
impl HuffmanDecoderBuilder {
pub fn create_code(&mut self, value: u32, len: usize) -> Result<()> {
let code_straight = try!(self.next_code(len));
let code = code_straight.reverse_bits() >> (32 - len);
let code = Code { code: code, len: len };
let value = CodeValue {
value: value,
len: len,
};
let is_long_code = if !self.lookup_table.is_empty() && len > 0 {
let lookup_table_len = self.lookup_table.len_bits;
let (entry, is_long_code) = if len <= lookup_table_len {
(LookupEntry::Code(value), false)
} else {
(LookupEntry::LongCode, true)
};
self.lookup_table.set(code.truncate(lookup_table_len), entry);
is_long_code
} else {
true
};
if is_long_code {
let lc = LongCode {
sort_key: code_straight,
code: code.code,
value: value.value,
len: len,
};
self.long_codes.push(lc);
}
Ok(())
}
pub fn build(mut self) -> HuffmanDecoder {
for lc in self.long_codes.iter_mut() {
lc.pad_sort_key(self.max_code_len);
}
self.long_codes.sort_by_key(|lc| lc.sort_key);
HuffmanDecoder {
lookup_table: self.lookup_table,
long_codes: self.long_codes.into_boxed_slice(),
max_code_len: self.max_code_len,
}
}
fn next_code(&mut self, len: usize) -> Result<u32> {
let r = try!(self.do_next_code(len));
if len > self.max_code_len {
self.max_code_len = len;
}
Ok(r)
}
fn do_next_code(&mut self, len: usize) -> Result<u32> {
assert!(len > 0 && len < 32);
let idx = len - 1;
if self.cur_codes[idx].is_none() {
let r = if idx > 0 {
try!(self.do_next_code(idx)) << 1
} else {
0
};
self.cur_codes[idx] = Some(r);
return Ok(r);
}
let cur_code_bits = self.cur_codes[idx].unwrap();
if cur_code_bits & 1 == 0 {
let cur_code_bits = cur_code_bits | 1;
self.cur_codes[idx] = Some(cur_code_bits);
return Ok(cur_code_bits);
}
if len == 1 {
return Err(Error::Undecodable("Overspecified Huffman tree"));
}
let cur_code_bits = try!(self.do_next_code(idx)) << 1;
self.cur_codes[idx] = Some(cur_code_bits);
Ok(cur_code_bits)
}
}
#[derive(Clone, Copy, Debug)]
struct Code {
code: u32,
len: usize,
}
impl Code {
pub fn truncate(&self, len: usize) -> Self {
if self.len <= len {
*self
} else {
Code {
code: self.code.ls_bits(len),
len: len,
}
}
}
}
#[derive(Clone, Copy, Debug)]
struct CodeValue {
value: u32,
len: usize,
}
#[derive(Clone, Copy, Debug)]
struct LongCode {
sort_key: u32,
code: u32,
value: u32,
len: usize,
}
impl LongCode {
pub fn pad_sort_key(&mut self, len: usize) {
assert!(len >= self.len && len <= 32);
self.sort_key <<= len - self.len;
}
}
#[derive(Debug)]
struct LookupTable {
entries: Box<[LookupEntry]>,
len_bits: usize,
}
impl LookupTable {
pub fn is_empty(&self) -> bool {
self.len_bits == 0
}
pub fn set(&mut self, code: Code, entry: LookupEntry) {
assert!(code.len <= self.len_bits);
let mut index = code.code as usize;
let last_index = ((self.entries.len() - 1) & !util::lsb_mask(code.len) as usize) | index;
let step = 1 << code.len;
loop {
assert!(match self.entries[index] {
LookupEntry::Null | LookupEntry::LongCode => true,
_ => false,
});
self.entries[index] = entry;
if index == last_index {
break;
}
index += step;
}
}
}
#[derive(Clone, Copy, Debug)]
enum LookupEntry {
Null,
Code(CodeValue),
LongCode,
}
#[cfg(test)]
mod tests {
use std::cmp;
use std::io::Cursor;
use super::*;
use bitstream::BitReader;
use error::ErrorKind;
fn new_bit_reader(bits: &str) -> BitReader<Cursor<Vec<u8>>> {
let mut buf = Vec::new();
let mut byte = 0;
let mut bit_pos = 0;
for c in bits.chars() {
match c {
'0' => {},
'1' => byte |= 1 << bit_pos,
_ => continue,
}
if bit_pos == 7 {
buf.push(byte);
byte = 0;
bit_pos = 0;
} else {
bit_pos += 1;
}
}
if bit_pos != 0 {
buf.push(byte);
}
BitReader::new(Cursor::new(buf))
}
fn test_next_code(check_underspec: bool, input: &[usize], expected: &[u32]) {
assert!(!input.is_empty());
assert_eq!(input.len(), expected.len());
let mut b = HuffmanDecoder::builder(1);
for (&inp, &exp) in input.iter().zip(expected.iter()) {
let act = b.next_code(inp).unwrap();
/*let code_str = format!("{:032b}", act);
println!("{:2} {}", inp, &code_str[code_str.len() - inp as usize..]);
println!("cur_codes:");
for (i, &c) in b.cur_codes.iter().enumerate() {
if let Some(c) = c {
println!(" {:2} {:b}", i + 1, c);
}
}*/
assert_eq!(act, exp);
}
assert_eq!(b.max_code_len, *input.iter().max().unwrap());
if check_underspec {
for i in 1..32 {
let c = b.next_code(i);
if c.is_ok() {
println!("Underspecified: {} -> {:b}", i, c.as_ref().unwrap());
}
assert_eq!(c.err().unwrap().kind(), ErrorKind::Undecodable);
}
}
}
#[test]
fn next_code_1() {
test_next_code(true,
&[2, 4, 4, 4, 4, 2, 3, 3],
&[0b00, 0b0100, 0b0101, 0b0110, 0b0111, 0b10, 0b110, 0b111]);
}
#[test]
fn next_code_2() {
test_next_code(true,
&[3, 1, 2, 3],
&[0b000, 0b1, 0b01, 0b001]);
}
#[test]
fn next_code_3() {
test_next_code(false,
&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
&[0b0000000000, 0b0000001, 0b00000001, 0b0000000001000, 0b000000001, 0b000001, 0b0000100, 0b00000000011, 0b0000101000, 0b00001011, 0b00001100, 0b000000000101, 0b00000000010010000, 0b00000000010010001, 0b00000000010010010, 0b00000000010010011, 0b0000111, 0b00010, 0b00011, 0b000010101, 0b001000, 0b0011, 0b0100, 0b00001101, 0b00100100, 0b00101, 0b01010, 0b00100101, 0b0000000001001010, 0b00000000010011, 0b0000101001000, 0b0000000001001011, 0b0010011, 0b01011, 0b01100, 0b0110100, 0b011011, 0b100, 0b101, 0b01110, 0b01101010, 0b01111]);
}
#[test]
fn overspecified() {
let mut b = HuffmanDecoder::builder(1);
b.next_code(1).unwrap();
b.next_code(1).unwrap();
assert_eq!(b.next_code(1).err().unwrap().kind(), ErrorKind::Undecodable);
}
fn test_decode(code_lens: &[usize], input: &str, expected: &[u32]) {
let max_code_len = *code_lens.iter().max().unwrap();
// Without long codes.
test_decode_(max_code_len, code_lens, input, expected);
// With long codes.
if max_code_len > 1 {
test_decode_(cmp::max(max_code_len as isize - 4, 1) as usize, code_lens, input, expected);
}
}
fn test_decode_(lookup_table_bits: usize, code_lens: &[usize], input: &str, expected: &[u32]) {
let mut b = HuffmanDecoder::builder(lookup_table_bits);
for (i, &code_len) in code_lens.iter().enumerate() {
b.create_code(i as u32, code_len).unwrap();
}
let d = b.build();
let mut reader = new_bit_reader(input);
for exp in expected {
assert_eq!(d.decode(&mut reader).unwrap(), *exp);
}
}
#[test]
fn decode_1() {
/*
0 2 codeword 00
1 4 codeword 0100
2 4 codeword 0101
3 4 codeword 0110
4 4 codeword 0111
5 2 codeword 10
6 3 codeword 110
7 3 codeword 111 */
test_decode(&[2, 4, 4, 4, 4, 2, 3, 3],
"00 111 0111 0110 110 110 111",
&[0, 7, 4, 3, 6, 6, 7]);
}
#[test]
fn decode_2() {
test_decode(&[10, 7, 8, 13, 9, 6, 7, 11, 10, 8, 8, 12, 17, 17, 17, 17, 7, 5, 5, 9, 6, 4, 4, 8, 8, 5, 5, 8, 16, 14, 13, 16, 7, 5, 5, 7, 6, 3, 3, 5, 8, 5],
"001000 0000000001001011 100 000001 0000000000 01111 00010 unused: 011011",
&[20, 31, 37, 5, 0, 41, 17]);
}
} | {
return Err(Error::Io(io::Error::new(io::ErrorKind::UnexpectedEof,
"Incomplete Huffman code")));
} | conditional_block |
audio_feature.py | #coding=utf-8
'''
音频特征提取类, mfcc量化特征 和 指纹特征
'''
import os
import sys
import scipy
import librosa
import numpy as np
import pandas as pd
class FeatureType:
FEATURE_MFCC = 0 # mfcc量化特征
FEATURE_FINGERS = 1 # 指纹特征
class AudioFeature():
def __init__(self, n_fft=400, hop_length=200):
self.n_fft = n_fft
self.hop_length = hop_length
def frame_to_second(self, frame, sr=16000):
return (frame * self.hop_length + self.n_fft / 2) / sr
def second_to_frame(self, second, sr=16000):
return (second * sr - (self.n_fft/2)) / self.hop_length if second > 0 else 0
def get_audio_feature(self, audio_data, audio_sr, feature_type):
if feature_type == FeatureType.FEATURE_MFCC:
return self.get_mfcc_quantify(audio_data, audio_sr)
elif feature_type == FeatureType.FEATURE_FINGERS:
return self.get_fingerprints(audio_data, audio_sr)
def get_fingerprints(self, audio_data, audio_sr=16000):
'''音频指纹特征
'''
Sxx, f, t = self._get_spectrogram(audio_data, audio_sr)
f_step = np.median(f[1:-1] - f[:-2]) #np.median() 计算中位数
t_step = np.median(t[1:-1] - t[:-2])
peak_locations, max_filter, max_filter_size = self._find_spectrogram_peaks(Sxx, t_step, audio_sr)
if peak_locations.size == 0:
return []
fingerprints = self._get_fingerprints_from_peaks(len(f) - 1, f_step, peak_locations, len(t) - 1, t_step)
return fingerprints
def _get_spectrogram(self, audio_data, audio_sr):
f, t, Sxx = scipy.signal.spectrogram(audio_data, fs=audio_sr,
scaling='spectrum',
mode='magnitude',
window='hann',
nperseg=self.n_fft,
noverlap=self.hop_length)
return Sxx, f, t
def _find_spectrogram_peaks(self, Sxx, t_step, audio_sr, f_size_hz=500, t_size_sec=2):
max_f = audio_sr // 2
f_bins = Sxx.shape[0]
f_per_bin = max_f / f_bins
f_size = int(np.round(f_size_hz / f_per_bin))
t_size = int(np.round(t_size_sec / t_step))
max_filter = scipy.ndimage.filters.maximum_filter(Sxx, size=(f_size, t_size), mode='constant')
peak = (Sxx == max_filter) & (Sxx != 0)
peak_locations = np.argwhere((Sxx == max_filter) & (Sxx != 0))
return peak_locations, max_filter, (t_size, f_size)
def _get_fingerprints_from_peaks(self, f_max, f_step, peak_locations, t_max, t_step):
n_peaks = len(peak_locations) #the number of peak points
# 1400hz tall zone box
zone_f_size = 1400 // f_step
# 6 second wide zone box
zone_t_size = 6 // t_step
# start one spectrogram time segment after the current one
zone_t_offset = 1
df_peak_locations = pd.DataFrame(peak_locations, columns=['f', 't'])
# sort by time
df_peak_locations.sort_values(by='t', ascending=True, inplace=True)
peak_locations_t_sort = df_peak_locations['t']
# sort by frequency
peak_locations_f_sort = df_peak_locations['f'].sort_values(ascending=True)
fingerprints = []
avg_n_pairs_per_peak = 0
save_num = 0
for i, anchor in df_peak_locations.iterrows():
anchor_t, anchor_f = anchor['t'], anchor['f'] # 锚点的坐标
zone_freq_start, zone_freq_end, zone_time_start, zone_time_end = self._get_target_zone_bounds(anchor_f,
anchor_t,
f_max, t_max,
zone_f_size,
zone_t_offset,
zone_t_size)
paired_df_peak_locations, n_pairs = self._query_dataframe_for_peaks_in_target_zone_binary_search(
df_peak_locations, peak_locations_t_sort, peak_locations_f_sort,
zone_freq_end, zone_freq_start, zone_time_end, zone_time_start)
avg_n_pairs_per_peak += n_pairs
for j, second_peak in paired_df_peak_locations.iterrows():
second_peak_f = second_peak['f']
second_peak_t_ = second_peak['t']
time_delta = second_peak_t_ - anchor_t
combined_key = self._combine_parts_into_key(anchor_f, second_peak_f, time_delta)
fingerprint = [int(combined_key), int(anchor_t), int(second_peak_t_)]
fingerprints.append(fingerprint)
avg_n_pairs_per_peak /= n_peaks
return fingerprints
def _get_target_zone_bounds(self, anchor_f, anchor_t, f_max, t_max, zone_f_size, zone_t_offset, zone_t_size):
"""
anchor_f:锚点的频率,
anchor_t:锚点的时间,
f_max, t_max = 多少个f, 多少个t
"""
zone_time_start = anchor_t + zone_t_offset #起点:锚点的时间 + 1
zone_time_end = min(t_max, zone_time_start + zone_t_size)
zone_freq_start = max(0, anchor_f - (zone_f_size // 2))
zone_freq_end = min(f_max, zone_freq_start + zone_f_size)
if zone_freq_end == f_max:
zone_freq_start = zone_freq_end - zone_f_size
return int(zone_freq_start), int(zone_freq_end), int(zone_time_start), int(zone_time_end)
def _query_dataframe_for_peaks_in_target_zone_binary_search(self, df_peak_locations, peak_locations_t,
peak_locations_f,
zone_freq_end, zone_freq_start,
zone_time_end, zone_time_start):
start = peak_locations_t.searchsorted(zone_time_start, side='left')
end = peak_locations_t.searchsorted(zone_time_end, side='right')
if isinstance(start, np.ndarray):
start = start[0]
if isinstance(end, np.ndarray):
end = end[0]
t_index = peak_locations_t.index[start:end]
f_start = peak_locations_f.searchsorted(zone_freq_start, side='left')
f_end = peak_locations_f.searchsorted(zone_freq_end, side='right')
if isinstance(f_start, np.ndarray):
f_start = f_start[0]
if isinstance(f_end, np.ndarray):
f_end = f_end[0]
f_index = peak_locations_f.index[f_start:f_end]
paired_df_peak_locations = df_peak_locations.loc[t_index & f_index]
n_pairs = len(paired_df_peak_locations)
return paired_df_peak_locations, n_pairs
def _combine_parts_into_key(self, peak_f, second_peak_f, time_delta):
peak_f = np.uint32(peak_f)
second_peak_f = np.uint32(second_peak_f)
time_delta = np.uint32(time_delta)
first_part = np.left_shift(peak_f, np.uint32(20))
second_part = np.left_shift(second_peak_f, np.uint32(10))
combined_key = first_part + second_part + time_delta
return combined_key
@staticmethod
def get_mfcc_quantify(audio_data, audio_sr=16000, n_mfcc=12, n_fft=1024, hop_length=128):
'''
mfcc量化特征
return shape=(duration, audio_sr//hop_length + 1)
'''
if len(audio_data.shape) > 1:
audio_data = np.mean(audio_data, axis=0) # 多声道的取平均值
duration = audio_data.shape[0]//audio_sr
quan_level = 6
value = 64/quan_level #quan_level最大只能是6,超过6计算出的word值就可能超过int64所表达范围了
words_list = []
for i in range(duration):
#提取每秒的特征
one_data = audio_data[i*audio_sr:(i+1)*audio_sr] #1s的数据
one_mfcc_feat = librosa.feature.mfcc(y=one_data, sr=audio_sr, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length) #提取mfcc特征
cur_feat = one_mfcc_feat.T
r, c = cur_feat.shape #(126, n_mfcc)
feat_list = []
pre_feat = [0]*c
for i in range(r):
l = []
for j in range(c):
if i == 0 or i == r-1:
v = cur_feat[i][j]
else:
v = (cur_feat[i-1][j] + cur_feat[i][j] + cur_feat[i+1][j])/3 #平滑
l.append(v)
l += pre_feat
pre_feat = l[:c]
| zero_num = 0
word = 0
for v in l:
if v >= -1 and v <= 1:
zero_num += 1
plus = int((v + 32)/value)
plus = min(quan_level, max(0, plus))
word = word * quan_level + plus
if zero_num == len(l):
word = 0
feat_list.append(word)
words_list.append(feat_list)
feature = np.array(words_list)
return feature
class Audio:
"""音频类
"""
def __init__(self, audio_path:str, start_time:int=0, end_time:int=None):
self.audio_obj = AudioFeature()
self.audio_path = audio_path
self.audio_name = os.path.basename(audio_path).split(".")[0]
self.start_time = start_time
self.end_time = end_time
self.get_audio_params(self.audio_path)
def get_audio_params(self, audio_path:str):
# self.y, self.sr = read_audio(audio_path, 0, None)
self.y, self.sr = librosa.load(audio_path, sr=None, mono=True)
self.audio_feature = self.audio_obj.get_audio_feature(self.y, self.sr, 1)
print("path:", self.audio_path, " sr:", self.sr, " duration:", len(self.y)/self.sr, " feature.shape:", np.array(self.audio_feature).shape) | #量化
| conditional_block |
audio_feature.py | #coding=utf-8
'''
音频特征提取类, mfcc量化特征 和 指纹特征
'''
import os
import sys
import scipy
import librosa
import numpy as np
import pandas as pd
class FeatureType:
FEATURE_MFCC = 0 # mfcc量化特征
FEATURE_FINGERS = 1 # 指纹特征
class AudioFeature():
def __init__(self, n_fft=400, hop_length=200):
self.n_fft = n_fft
self.hop_length = hop_length
def frame_to_second(self, frame, sr=16000):
return (frame * self.hop_length + self.n_fft / 2) / sr
def second_to_frame(self, second, sr=16000):
return (second * sr - (self.n_fft/2)) / self.hop_length if second > 0 else 0
def get_audio_feature(self, audio_data, audio_sr, feature_type):
if feature_type == FeatureType.FEATURE_MFCC:
return self.get_mfcc_quantify(audio_data, audio_sr)
elif feature_type == FeatureType.FEATURE_FINGERS:
return self.get_fingerprints(audio_data, audio_sr)
def get_fingerprints(self, audio_data, audio_sr=16000):
'''音频指纹特征
'''
Sxx, f, t = self._get_spectrogram(audio_data, audio_sr)
f_step = np.median(f[1:-1] - f[:-2]) #np.median() 计算中位数
t_step = np.median(t[1:-1] - t[:-2])
peak_locations, max_filter, max_filter_size = self._find_spectrogram_peaks(Sxx, t_step, audio_sr)
if peak_locations.size == 0:
return []
fingerprints = self._get_fingerprints_from_peaks(len(f) - 1, f_step, peak_locations, len(t) - 1, t_step)
return fingerprints
def _get_spectrogram(self, audio_data, audio_sr):
f, t, Sxx = scipy.signal.spectrogram(audio_data, fs=audio_sr,
scaling='spectrum',
mode='magnitude',
window='hann',
nperseg=self.n_fft,
noverlap=self.hop_length)
return Sxx, f, t
def _find_spectrogram_peaks(self, Sxx, t_step, audio_sr, f_size_hz=500, t_size_sec=2):
max_f = audio_sr // 2
f_bins = Sxx.shape[0]
f_per_bin = max_f / f_bins
f_size = int(np.round(f_size_hz / f_per_bin))
t_size = int(np.round(t_size_sec / t_step))
max_filter = scipy.ndimage.filters.maximum_filter(Sxx, size=(f_size, t_size), mode='constant')
peak = (Sxx == max_filter) & (Sxx != 0)
peak_locations = np.argwhere((Sxx == max_filter) & (Sxx != 0))
return peak_locations, max_filter, (t_size, f_size)
def _get_fingerprints_from_peaks(self, f_max, f_step, peak_locations, t_max, t_step):
n_peaks = len(peak_locations) #the number of peak points
# 1400hz tall zone box
zone_f_size = 1400 // f_step
# 6 second wide zone box
zone_t_size = 6 // t_step
# start one spectrogram time segment after the current one
zone_t_offset = 1
df_peak_locations = pd.DataFrame(peak_locations, columns=['f', 't'])
# sort by time
df_peak_locations.sort_values(by='t', ascending=True, inplace=True)
peak_locations_t_sort = df_peak_locations['t']
# sort by frequency
peak_locations_f_sort = df_peak_locations['f'].sort_values(ascending=True)
fingerprints = []
avg_n_pairs_per_peak = 0
save_num = 0
for i, anchor in df_peak_locations.iterrows():
anchor_t, anchor_f = anchor['t'], anchor['f'] # 锚点的坐标
zone_freq_start, zone_freq_end, zone_time_start, zone_time_end = self._get_target_zone_bounds(anchor_f,
anchor_t,
f_max, t_max,
zone_f_size,
zone_t_offset, | paired_df_peak_locations, n_pairs = self._query_dataframe_for_peaks_in_target_zone_binary_search(
df_peak_locations, peak_locations_t_sort, peak_locations_f_sort,
zone_freq_end, zone_freq_start, zone_time_end, zone_time_start)
avg_n_pairs_per_peak += n_pairs
for j, second_peak in paired_df_peak_locations.iterrows():
second_peak_f = second_peak['f']
second_peak_t_ = second_peak['t']
time_delta = second_peak_t_ - anchor_t
combined_key = self._combine_parts_into_key(anchor_f, second_peak_f, time_delta)
fingerprint = [int(combined_key), int(anchor_t), int(second_peak_t_)]
fingerprints.append(fingerprint)
avg_n_pairs_per_peak /= n_peaks
return fingerprints
def _get_target_zone_bounds(self, anchor_f, anchor_t, f_max, t_max, zone_f_size, zone_t_offset, zone_t_size):
"""
anchor_f:锚点的频率,
anchor_t:锚点的时间,
f_max, t_max = 多少个f, 多少个t
"""
zone_time_start = anchor_t + zone_t_offset #起点:锚点的时间 + 1
zone_time_end = min(t_max, zone_time_start + zone_t_size)
zone_freq_start = max(0, anchor_f - (zone_f_size // 2))
zone_freq_end = min(f_max, zone_freq_start + zone_f_size)
if zone_freq_end == f_max:
zone_freq_start = zone_freq_end - zone_f_size
return int(zone_freq_start), int(zone_freq_end), int(zone_time_start), int(zone_time_end)
def _query_dataframe_for_peaks_in_target_zone_binary_search(self, df_peak_locations, peak_locations_t,
peak_locations_f,
zone_freq_end, zone_freq_start,
zone_time_end, zone_time_start):
start = peak_locations_t.searchsorted(zone_time_start, side='left')
end = peak_locations_t.searchsorted(zone_time_end, side='right')
if isinstance(start, np.ndarray):
start = start[0]
if isinstance(end, np.ndarray):
end = end[0]
t_index = peak_locations_t.index[start:end]
f_start = peak_locations_f.searchsorted(zone_freq_start, side='left')
f_end = peak_locations_f.searchsorted(zone_freq_end, side='right')
if isinstance(f_start, np.ndarray):
f_start = f_start[0]
if isinstance(f_end, np.ndarray):
f_end = f_end[0]
f_index = peak_locations_f.index[f_start:f_end]
paired_df_peak_locations = df_peak_locations.loc[t_index & f_index]
n_pairs = len(paired_df_peak_locations)
return paired_df_peak_locations, n_pairs
def _combine_parts_into_key(self, peak_f, second_peak_f, time_delta):
peak_f = np.uint32(peak_f)
second_peak_f = np.uint32(second_peak_f)
time_delta = np.uint32(time_delta)
first_part = np.left_shift(peak_f, np.uint32(20))
second_part = np.left_shift(second_peak_f, np.uint32(10))
combined_key = first_part + second_part + time_delta
return combined_key
@staticmethod
def get_mfcc_quantify(audio_data, audio_sr=16000, n_mfcc=12, n_fft=1024, hop_length=128):
'''
mfcc量化特征
return shape=(duration, audio_sr//hop_length + 1)
'''
if len(audio_data.shape) > 1:
audio_data = np.mean(audio_data, axis=0) # 多声道的取平均值
duration = audio_data.shape[0]//audio_sr
quan_level = 6
value = 64/quan_level #quan_level最大只能是6,超过6计算出的word值就可能超过int64所表达范围了
words_list = []
for i in range(duration):
#提取每秒的特征
one_data = audio_data[i*audio_sr:(i+1)*audio_sr] #1s的数据
one_mfcc_feat = librosa.feature.mfcc(y=one_data, sr=audio_sr, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length) #提取mfcc特征
cur_feat = one_mfcc_feat.T
r, c = cur_feat.shape #(126, n_mfcc)
feat_list = []
pre_feat = [0]*c
for i in range(r):
l = []
for j in range(c):
if i == 0 or i == r-1:
v = cur_feat[i][j]
else:
v = (cur_feat[i-1][j] + cur_feat[i][j] + cur_feat[i+1][j])/3 #平滑
l.append(v)
l += pre_feat
pre_feat = l[:c]
#量化
zero_num = 0
word = 0
for v in l:
if v >= -1 and v <= 1:
zero_num += 1
plus = int((v + 32)/value)
plus = min(quan_level, max(0, plus))
word = word * quan_level + plus
if zero_num == len(l):
word = 0
feat_list.append(word)
words_list.append(feat_list)
feature = np.array(words_list)
return feature
class Audio:
"""音频类
"""
def __init__(self, audio_path:str, start_time:int=0, end_time:int=None):
self.audio_obj = AudioFeature()
self.audio_path = audio_path
self.audio_name = os.path.basename(audio_path).split(".")[0]
self.start_time = start_time
self.end_time = end_time
self.get_audio_params(self.audio_path)
def get_audio_params(self, audio_path:str):
# self.y, self.sr = read_audio(audio_path, 0, None)
self.y, self.sr = librosa.load(audio_path, sr=None, mono=True)
self.audio_feature = self.audio_obj.get_audio_feature(self.y, self.sr, 1)
print("path:", self.audio_path, " sr:", self.sr, " duration:", len(self.y)/self.sr, " feature.shape:", np.array(self.audio_feature).shape) | zone_t_size)
| random_line_split |
audio_feature.py | #coding=utf-8
'''
音频特征提取类, mfcc量化特征 和 指纹特征
'''
import os
import sys
import scipy
import librosa
import numpy as np
import pandas as pd
class FeatureType:
FEATURE_MFCC = 0 # mfcc量化特征
FEATURE_FINGERS = 1 # 指纹特征
class AudioFeature():
def __init__(self, n_fft=400, hop_length=200):
self.n_fft = n_fft
self.hop_length = hop_length
def frame_to_second(self, frame, sr=16000):
return (frame * self.hop_length + self.n_fft / 2) / sr
def second_to_frame(self, second, sr=16000):
return (second * sr - (self.n_fft/2)) / self.hop_length if second > 0 else 0
def get_audio_feature(self, audio_data, audio_sr, feature_type):
if feature_type == FeatureType.FEATURE_MFCC:
return self.get_mfcc_quantify(audio_data, audio_sr)
elif feature_type == FeatureType.FEATURE_FINGERS:
return self.get_fingerprints(audio_data, audio_sr)
def get_fingerprints(self, audio_data, audio_sr=16000):
'''音频指纹特征
'''
Sxx, f, t = self._get_spectrogram(audio_data, audio_sr)
f_step = np.median(f[1:-1] - f[:-2]) #np.median() 计算中位数
t_step = np.median(t[1:-1] - t[:-2])
peak_locations, max_filter, max_filter_size = self._find_spectrogram_peaks(Sxx, t_step, audio_sr)
if peak_locations.size == 0:
return []
fingerprints = self._get_fingerprints_from_peaks(len(f) - 1, f_step, peak_locations, len(t) - 1, t_step)
return fingerprints
def _get_spectrogram(self, audio_data, audio_sr):
f, t, Sxx = scipy.signal.spectrogram(audio_data, fs=audio_sr,
scaling='spectrum',
mode='magnitude',
window='hann',
nperseg=self.n_fft,
noverlap=self.hop_length)
return Sxx, f, t
def _find_spectrogram_peaks(self, Sxx, t_step, audio_sr, f_size_hz=500, t_size_sec=2):
max_f = audio_sr // 2
f_bins = Sxx.shape[0]
f_per_bin = max_f / f_bins
f_size = int(np.round(f_size_hz / f_per_bin))
t_size = int(np.round(t_size_sec / t_step))
max_filter = scipy.ndimage.filters.maximum_filter(Sxx, size=(f_size, t_size), mode='constant')
peak = (Sxx == max_filter) & (Sxx != 0)
peak_locations = np.argwhere((Sxx == max_filter) & (Sxx != 0))
return peak_locations, max_filter, (t_size, f_size)
def _get_fingerprints_from_peaks(self, f_max, f_step, peak_locations, t_max, t_step):
n_peaks = len(peak_locations) #the number of peak points
# 1400hz tall zone box
zone_f_size = 1400 // f_step
# 6 second wide zone box
zone_t_size = 6 // t_step
# start one spectrogram time segment after the current one
zone_t_offset = 1
df_peak_locations = pd.DataFrame(peak_locations, columns=['f', 't'])
# sort by time
df_peak_locations.sort_values(by='t', ascending=True, inplace=True)
peak_locations_t_sort = df_peak_locations['t']
# sort by frequency
peak_locations_f_sort = df_peak_locations['f'].sort_values(ascending=True)
fingerprints = []
avg_n_pairs_per_peak = 0
save_num = 0
for i, anchor in df_peak_locations.iterrows():
anchor_t, anchor_f = anchor['t'], anchor['f'] # 锚点的坐标
zone_freq_start, zone_freq_end, zone_time_start, zone_time_end = self._get_target_zone_bounds(anchor_f,
anchor_t,
f_max, t_max,
zone_f_size,
zone_t_offset,
zone_t_size)
paired_df_peak_locations, n_pairs = self._query_dataframe_for_peaks_in_target_zone_binary_search(
df_peak_locations, peak_locations_t_sort, peak_locations_f_sort,
zone_freq_end, zone_freq_start, zone_time_end, zone_time_start)
avg_n_pairs_per_peak += n_pairs
for j, second_peak in paired_df_peak_locations.iterrows():
second_peak_f = second_peak['f']
second_peak_t_ = second_peak['t']
time_delta = second_peak_t_ - anchor_t
combined_key = self._combine_parts_into_key(anchor_f, second_peak_f, time_delta)
fingerprint = [int(combined_key), int(anchor_t), int(second_peak_t_)]
fingerprints.append(fingerprint)
avg_n_pairs_per_peak /= n_peaks
return fingerprints
def _get_target_zone_bounds(self, anchor_f, anchor_t, f_max, t_max, zone_f_size, zone_t_offset, zone_t_size):
"""
anchor_f:锚点的频率,
anchor_t:锚点的时间,
f_max, t_max = 多少个f, 多少个t
"""
zone_time_start = anchor_t + zone_t_offset #起点:锚点的时间 + 1
zone_time_end = min(t_max, zone_time_start + zone_t_size)
zone_freq_start = max(0, anchor_f - (zone_f_size // 2))
zone_freq_end = min(f_max, zone_freq_start + zone_f_size)
if zone_freq_end == f_max:
zone_freq_start = zone_freq_end - zone_f_size
return int(zone_freq_start), int(zone_freq_end), int(zone_time_start), int(zone_time_end)
def _query_dataframe_for_peaks_in_target_zone_binary_search(self, df_peak_locations, peak_locations_t,
peak_locations_f,
zone_freq_end, zone_freq_start,
zone_time_end, zone_time_start):
start = peak_locations_t.searchsorted(zone_time_start, side='left')
end = peak_locations_t.searchsorted(zone_time_end, side= | = np.uint32(second_peak_f)
time_delta = np.uint32(time_delta)
first_part = np.left_shift(peak_f, np.uint32(20))
second_part = np.left_shift(second_peak_f, np.uint32(10))
combined_key = first_part + second_part + time_delta
return combined_key
@staticmethod
def get_mfcc_quantify(audio_data, audio_sr=16000, n_mfcc=12, n_fft=1024, hop_length=128):
'''
mfcc量化特征
return shape=(duration, audio_sr//hop_length + 1)
'''
if len(audio_data.shape) > 1:
audio_data = np.mean(audio_data, axis=0) # 多声道的取平均值
duration = audio_data.shape[0]//audio_sr
quan_level = 6
value = 64/quan_level #quan_level最大只能是6,超过6计算出的word值就可能超过int64所表达范围了
words_list = []
for i in range(duration):
#提取每秒的特征
one_data = audio_data[i*audio_sr:(i+1)*audio_sr] #1s的数据
one_mfcc_feat = librosa.feature.mfcc(y=one_data, sr=audio_sr, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length) #提取mfcc特征
cur_feat = one_mfcc_feat.T
r, c = cur_feat.shape #(126, n_mfcc)
feat_list = []
pre_feat = [0]*c
for i in range(r):
l = []
for j in range(c):
if i == 0 or i == r-1:
v = cur_feat[i][j]
else:
v = (cur_feat[i-1][j] + cur_feat[i][j] + cur_feat[i+1][j])/3 #平滑
l.append(v)
l += pre_feat
pre_feat = l[:c]
#量化
zero_num = 0
word = 0
for v in l:
if v >= -1 and v <= 1:
zero_num += 1
plus = int((v + 32)/value)
plus = min(quan_level, max(0, plus))
word = word * quan_level + plus
if zero_num == len(l):
word = 0
feat_list.append(word)
words_list.append(feat_list)
feature = np.array(words_list)
return feature
class Audio:
"""音频类
"""
def __init__(self, audio_path:str, start_time:int=0, end_time:int=None):
self.audio_obj = AudioFeature()
self.audio_path = audio_path
self.audio_name = os.path.basename(audio_path).split(".")[0]
self.start_time = start_time
self.end_time = end_time
self.get_audio_params(self.audio_path)
def get_audio_params(self, audio_path:str):
# self.y, self.sr = read_audio(audio_path, 0, None)
self.y, self.sr = librosa.load(audio_path, sr=None, mono=True)
self.audio_feature = self.audio_obj.get_audio_feature(self.y, self.sr, 1)
print("path:", self.audio_path, " sr:", self.sr, " duration:", len(self.y)/self.sr, " feature.shape:", np.array(self.audio_feature).shape) | 'right')
if isinstance(start, np.ndarray):
start = start[0]
if isinstance(end, np.ndarray):
end = end[0]
t_index = peak_locations_t.index[start:end]
f_start = peak_locations_f.searchsorted(zone_freq_start, side='left')
f_end = peak_locations_f.searchsorted(zone_freq_end, side='right')
if isinstance(f_start, np.ndarray):
f_start = f_start[0]
if isinstance(f_end, np.ndarray):
f_end = f_end[0]
f_index = peak_locations_f.index[f_start:f_end]
paired_df_peak_locations = df_peak_locations.loc[t_index & f_index]
n_pairs = len(paired_df_peak_locations)
return paired_df_peak_locations, n_pairs
def _combine_parts_into_key(self, peak_f, second_peak_f, time_delta):
peak_f = np.uint32(peak_f)
second_peak_f | identifier_body |
audio_feature.py | #coding=utf-8
'''
音频特征提取类, mfcc量化特征 和 指纹特征
'''
import os
import sys
import scipy
import librosa
import numpy as np
import pandas as pd
class FeatureType:
FEATURE_MFCC = 0 # mfcc量化特征
FEATURE_FINGERS = 1 # 指纹特征
class AudioFeature():
def __init__(self, n_fft=400, hop_length=200):
self.n_fft = n_fft
self.hop_length = hop_length
def frame_to_second(self, frame, sr=16000):
return (frame * self.hop_length + self.n_fft / 2) / sr
def second_to_frame(self, second, sr=16000):
return (second * sr - (self.n_fft/2)) / self.hop_length if second > 0 else 0
def get_audio_feature(self, audio_data, audio_sr, fe | if feature_type == FeatureType.FEATURE_MFCC:
return self.get_mfcc_quantify(audio_data, audio_sr)
elif feature_type == FeatureType.FEATURE_FINGERS:
return self.get_fingerprints(audio_data, audio_sr)
def get_fingerprints(self, audio_data, audio_sr=16000):
'''音频指纹特征
'''
Sxx, f, t = self._get_spectrogram(audio_data, audio_sr)
f_step = np.median(f[1:-1] - f[:-2]) #np.median() 计算中位数
t_step = np.median(t[1:-1] - t[:-2])
peak_locations, max_filter, max_filter_size = self._find_spectrogram_peaks(Sxx, t_step, audio_sr)
if peak_locations.size == 0:
return []
fingerprints = self._get_fingerprints_from_peaks(len(f) - 1, f_step, peak_locations, len(t) - 1, t_step)
return fingerprints
def _get_spectrogram(self, audio_data, audio_sr):
f, t, Sxx = scipy.signal.spectrogram(audio_data, fs=audio_sr,
scaling='spectrum',
mode='magnitude',
window='hann',
nperseg=self.n_fft,
noverlap=self.hop_length)
return Sxx, f, t
def _find_spectrogram_peaks(self, Sxx, t_step, audio_sr, f_size_hz=500, t_size_sec=2):
max_f = audio_sr // 2
f_bins = Sxx.shape[0]
f_per_bin = max_f / f_bins
f_size = int(np.round(f_size_hz / f_per_bin))
t_size = int(np.round(t_size_sec / t_step))
max_filter = scipy.ndimage.filters.maximum_filter(Sxx, size=(f_size, t_size), mode='constant')
peak = (Sxx == max_filter) & (Sxx != 0)
peak_locations = np.argwhere((Sxx == max_filter) & (Sxx != 0))
return peak_locations, max_filter, (t_size, f_size)
def _get_fingerprints_from_peaks(self, f_max, f_step, peak_locations, t_max, t_step):
n_peaks = len(peak_locations) #the number of peak points
# 1400hz tall zone box
zone_f_size = 1400 // f_step
# 6 second wide zone box
zone_t_size = 6 // t_step
# start one spectrogram time segment after the current one
zone_t_offset = 1
df_peak_locations = pd.DataFrame(peak_locations, columns=['f', 't'])
# sort by time
df_peak_locations.sort_values(by='t', ascending=True, inplace=True)
peak_locations_t_sort = df_peak_locations['t']
# sort by frequency
peak_locations_f_sort = df_peak_locations['f'].sort_values(ascending=True)
fingerprints = []
avg_n_pairs_per_peak = 0
save_num = 0
for i, anchor in df_peak_locations.iterrows():
anchor_t, anchor_f = anchor['t'], anchor['f'] # 锚点的坐标
zone_freq_start, zone_freq_end, zone_time_start, zone_time_end = self._get_target_zone_bounds(anchor_f,
anchor_t,
f_max, t_max,
zone_f_size,
zone_t_offset,
zone_t_size)
paired_df_peak_locations, n_pairs = self._query_dataframe_for_peaks_in_target_zone_binary_search(
df_peak_locations, peak_locations_t_sort, peak_locations_f_sort,
zone_freq_end, zone_freq_start, zone_time_end, zone_time_start)
avg_n_pairs_per_peak += n_pairs
for j, second_peak in paired_df_peak_locations.iterrows():
second_peak_f = second_peak['f']
second_peak_t_ = second_peak['t']
time_delta = second_peak_t_ - anchor_t
combined_key = self._combine_parts_into_key(anchor_f, second_peak_f, time_delta)
fingerprint = [int(combined_key), int(anchor_t), int(second_peak_t_)]
fingerprints.append(fingerprint)
avg_n_pairs_per_peak /= n_peaks
return fingerprints
def _get_target_zone_bounds(self, anchor_f, anchor_t, f_max, t_max, zone_f_size, zone_t_offset, zone_t_size):
"""
anchor_f:锚点的频率,
anchor_t:锚点的时间,
f_max, t_max = 多少个f, 多少个t
"""
zone_time_start = anchor_t + zone_t_offset #起点:锚点的时间 + 1
zone_time_end = min(t_max, zone_time_start + zone_t_size)
zone_freq_start = max(0, anchor_f - (zone_f_size // 2))
zone_freq_end = min(f_max, zone_freq_start + zone_f_size)
if zone_freq_end == f_max:
zone_freq_start = zone_freq_end - zone_f_size
return int(zone_freq_start), int(zone_freq_end), int(zone_time_start), int(zone_time_end)
def _query_dataframe_for_peaks_in_target_zone_binary_search(self, df_peak_locations, peak_locations_t,
peak_locations_f,
zone_freq_end, zone_freq_start,
zone_time_end, zone_time_start):
start = peak_locations_t.searchsorted(zone_time_start, side='left')
end = peak_locations_t.searchsorted(zone_time_end, side='right')
if isinstance(start, np.ndarray):
start = start[0]
if isinstance(end, np.ndarray):
end = end[0]
t_index = peak_locations_t.index[start:end]
f_start = peak_locations_f.searchsorted(zone_freq_start, side='left')
f_end = peak_locations_f.searchsorted(zone_freq_end, side='right')
if isinstance(f_start, np.ndarray):
f_start = f_start[0]
if isinstance(f_end, np.ndarray):
f_end = f_end[0]
f_index = peak_locations_f.index[f_start:f_end]
paired_df_peak_locations = df_peak_locations.loc[t_index & f_index]
n_pairs = len(paired_df_peak_locations)
return paired_df_peak_locations, n_pairs
def _combine_parts_into_key(self, peak_f, second_peak_f, time_delta):
peak_f = np.uint32(peak_f)
second_peak_f = np.uint32(second_peak_f)
time_delta = np.uint32(time_delta)
first_part = np.left_shift(peak_f, np.uint32(20))
second_part = np.left_shift(second_peak_f, np.uint32(10))
combined_key = first_part + second_part + time_delta
return combined_key
@staticmethod
def get_mfcc_quantify(audio_data, audio_sr=16000, n_mfcc=12, n_fft=1024, hop_length=128):
'''
mfcc量化特征
return shape=(duration, audio_sr//hop_length + 1)
'''
if len(audio_data.shape) > 1:
audio_data = np.mean(audio_data, axis=0) # 多声道的取平均值
duration = audio_data.shape[0]//audio_sr
quan_level = 6
value = 64/quan_level #quan_level最大只能是6,超过6计算出的word值就可能超过int64所表达范围了
words_list = []
for i in range(duration):
#提取每秒的特征
one_data = audio_data[i*audio_sr:(i+1)*audio_sr] #1s的数据
one_mfcc_feat = librosa.feature.mfcc(y=one_data, sr=audio_sr, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length) #提取mfcc特征
cur_feat = one_mfcc_feat.T
r, c = cur_feat.shape #(126, n_mfcc)
feat_list = []
pre_feat = [0]*c
for i in range(r):
l = []
for j in range(c):
if i == 0 or i == r-1:
v = cur_feat[i][j]
else:
v = (cur_feat[i-1][j] + cur_feat[i][j] + cur_feat[i+1][j])/3 #平滑
l.append(v)
l += pre_feat
pre_feat = l[:c]
#量化
zero_num = 0
word = 0
for v in l:
if v >= -1 and v <= 1:
zero_num += 1
plus = int((v + 32)/value)
plus = min(quan_level, max(0, plus))
word = word * quan_level + plus
if zero_num == len(l):
word = 0
feat_list.append(word)
words_list.append(feat_list)
feature = np.array(words_list)
return feature
class Audio:
"""音频类
"""
def __init__(self, audio_path:str, start_time:int=0, end_time:int=None):
self.audio_obj = AudioFeature()
self.audio_path = audio_path
self.audio_name = os.path.basename(audio_path).split(".")[0]
self.start_time = start_time
self.end_time = end_time
self.get_audio_params(self.audio_path)
def get_audio_params(self, audio_path:str):
# self.y, self.sr = read_audio(audio_path, 0, None)
self.y, self.sr = librosa.load(audio_path, sr=None, mono=True)
self.audio_feature = self.audio_obj.get_audio_feature(self.y, self.sr, 1)
print("path:", self.audio_path, " sr:", self.sr, " duration:", len(self.y)/self.sr, " feature.shape:", np.array(self.audio_feature).shape) | ature_type):
| identifier_name |
Experiments.py | from abc import ABC, abstractmethod
import torch
from .metrics import nltk_bleu
import numpy as np
import os
import sys
from .useful_utils import string_split_v3, string_split_v1, chunks
import pytrec_eval
import json
import subprocess
import csv
import re
import ast
from tqdm.auto import tqdm
from .bleu_score import compute_bleu
class Experiment(ABC):
def __init__(self, task_data):
|
@abstractmethod
def evaluate(self, prediction_fn):
"""
This function should compute all relevant metrics to the task,
prediction_fn: (inp) -> (pred): it's an end-to-end prediction function from any model.
returns: dict: metrics
"""
pass
def save(self, path):
"""
Saves the entire object ready to be loaded.
"""
torch.save(self, path)
def load(path):
"""
STATIC METHOD
accessed through class, loads a pre-existing experiment.
"""
return torch.load(path)
class TranslationExperiment(Experiment):
def __init__(self, task_data, src_splitter=string_split_v1, tgt_splitter=string_split_v1):
"""
task_data: [(str, str)]: this is the expected data format.
>>> from src.Experiments import TranslationExperiment
>>> translation_experiment = TranslationExperiment(validation_pairs)
>>> def simple_translate(src):
>>> return "return output"
>>> translation_experiment.evaluate(simple_translate)
{'BLEU': 1.4384882092392364e-09}
"""
super().__init__(task_data)
self.src_splitter = src_splitter
self.tgt_splitter = tgt_splitter
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", batched=None):
"""
Produces evaluation scores and saves the results to a file. The tokenisation is done through string_split_v1.
So any non spaced text will be considered as one token.
prediction_fn: (str)->(str) or [str]->[str]
save_dir: str: folder to save the file
save_name: str: name of file
batched: int or None: size to use for the prediction function
"""
if batched:
src_sents = [src for (src, tgt) in self.task_data]
chunked_sents = list(chunks(src_sents, batched))
predictions = [prediction_fn(sents) for sents in tqdm.tqdm(chunked_sents, desc="predicting", total=len(chunked_sents))]
predictions = [val for sublist in predictions for val in sublist] # flattening
else:
predictions = [prediction_fn(src) for (src, tgt) in tqdm.tqdm(self.task_data, desc="predicting")]
# BLEU calculation
BLEU_scores = []
for (src, tgt), pred in tqdm.tqdm(list(zip(self.task_data, predictions)), desc="calculating bleu"):
BLEU_score = nltk_bleu(self.tgt_splitter(tgt), self.tgt_splitter(pred))
BLEU_scores.append(BLEU_score)
total_BLEU = np.average(BLEU_scores)
# Write to file
if save_dir != None:
save_path = os.path.join(save_dir, save_name)
print(f"saving translation eval to file: {save_path}")
with open(save_path, "w", encoding="utf-8") as out_fp:
for (src, tgt), pred, BLEU in zip(self.task_data, predictions, BLEU_scores):
out_fp.write("SRC :" + src + "\n")
out_fp.write("TGT :" + tgt + "\n")
out_fp.write("PRED :" + pred + "\n")
out_fp.write("BLEU :" + str(BLEU) + "\n")
out_fp.write("\n")
out_fp.write("\n\n| EVALUATION | BLEU: {:5.2f} |\n".format(total_BLEU))
print("| EVALUATION | BLEU: {:5.3f} |".format(total_BLEU))
return {"BLEU":total_BLEU}
class CAsT_experiment(Experiment):
def __init__(self, topics):
'''
topics: (context:[q_ids], q_id, q_rel:[d_ids])
'''
self.topics = topics
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", hits=100):
full_q_rels = {}
run = {}
for topic in self.topics:
pred_d_ids = prediction_fn(topic, hits=100)
context, q_id, q_rels = topic
full_q_rels[q_id] = {d_id:1 for d_id in q_rels}
run[q_id] = {d_id:score for (d_id, score) in pred_d_ids}
evaluator = pytrec_eval.RelevanceEvaluator(full_q_rels, {'map', 'ndcg'})
results = evaluator.evaluate(run)
aggregate = self.dict_mean(list(results.values()))
return aggregate, results
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
class TREC_Eval_Command_Experiment():
def __init__(self, trec_eval_command='trec_eval -q -c -M1000 -m ndcg_cut.3,5,10,15,20,100,1000 -m all_trec qRELS RUN_FILE',
relevant_metrics=['ndcg_cut_3', 'ndcg_cut_5', 'ndcg_cut_1000', 'map_cut_1000', 'recall_500', 'recall_1000'],
q_rel_file='datasets/TREC_CAsT/2020qrels.txt'):
'''
This is an experiment transform that uses the official trec_eval command to compute scores for each query
and return valid results according to the command specified.
'''
self.trec_eval_command = trec_eval_command
self.relevant_metrics = relevant_metrics
self.q_rel_file = q_rel_file
self.temp_run_file = '/tmp/temp_run_by_carlos.run'
self.run_file_exporter = RUN_File_Transform_Exporter(self.temp_run_file, model_name='temp_model_by_carlos')
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
returns: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...], 'ndcg_cut_3':0.33, 'ndcg_cut_5'...},...]
'''
self.run_file_exporter(samples)
resolved_command = self.trec_eval_command.replace('qRELS', self.q_rel_file).replace('RUN_FILE', self.temp_run_file)
print(f'Running the following command: {resolved_command} > /tmp/temp_run.eval')
os.system(f'{resolved_command} > /tmp/temp_run.eval')
with open('/tmp/temp_run.eval', 'r') as eval_f:
eval_results = {}
for row in eval_f:
if not any([metric in row for metric in self.relevant_metrics]):
continue
metric, q_id, score = row.split()
if q_id not in eval_results:
eval_results[q_id] = {}
eval_results[q_id][metric] = float(score)
for sample in samples:
if sample['q_id'] not in eval_results:
print(f"q_rel missing for q_id {sample['q_id']}. No scores added to sample")
continue
sample.update(eval_results[sample['q_id']])
return samples
class Ranking_Experiment():
def __init__(self, q_rels, save_dir=None, save_name="rerank_eval.run"):
'''
q_rels: dict: {'q_id':[d_id, d_id,...],...}
'''
pytrec_q_rels = {}
for q_id, d_ids in q_rels.items():
pytrec_q_rels[q_id] = {d_id:1 for d_id in d_ids}
self.evaluator = pytrec_eval.RelevanceEvaluator(pytrec_q_rels, {'map', 'ndcg_cut_3', 'set_recall', 'recip_rank'})
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
pytrec_run = {}
for sample_obj in samples:
q_id = sample_obj['q_id']
pytrec_run[q_id] = {}
for d_id, score in sample_obj['search_results']:
pytrec_run[q_id][d_id] = score
results = self.evaluator.evaluate(pytrec_run)
for sample_obj, result in zip(samples, results.values()):
sample_obj.update(result)
aggregate = self.dict_mean(list(results.values()))
return aggregate
class Sequence_BLEU_Experiment():
def __init__(self, fields={}, debug=True):
'''
An Experiment to evaluate sequence similarity through metrics like: BLEU or token accuracy.
'''
self.fields = {'predicted_seq':'predicted_seq', 'target_seq':'target_seq'}
self.debug = debug
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text"},...]
returns: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text", "BELU":0.6},...]
'''
for sample_obj in samples:
pred_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['predicted_seq']])
refrence_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['target_seq']])
if pred_tokens==[]:
pred_tokens = ['']
sample_obj["nltk_BLEU"] = nltk_bleu(refrence_tokens, pred_tokens)
if self.debug:
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples], [self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([s["nltk_BLEU"] for s in samples])
print(f'corpus_official_BLEU: {corpus_bleu}')
print(f'nltk_BLEU: {nltk_BLEU}')
return samples
def overall(self, samples):
samples = self(samples)
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples],
[self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([s["nltk_BLEU"] for s in samples])
return {'nltk_BLEU':nltk_BLEU, 'corpus_BLEU':corpus_bleu}
def tokenize_for_bleu_eval(self, code):
"""
The tokenizer that we use for code submissions, from Wang Ling et al., Latent Predictor Networks for Code Generation (2016)
@param code: string containing a code snippet
@return: list of code tokens
"""
code = re.sub(r'([^A-Za-z0-9_])', r' \1 ', code)
code = re.sub(r'([a-z])([A-Z])', r'\1 \2', code)
code = re.sub(r'\s+', ' ', code)
code = code.replace('"', '`')
code = code.replace('\'', '`')
tokens = [t for t in code.split(' ') if t]
return tokens
class Compilability_Experiment():
def __init__(self, fields={}):
'''
an experiment to evaluate the vallidity of a sequence as actual compilable code. Here in Python 3.
'''
self.fields = {'code_field': 'code'}
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'code':'print("foo")'},...]
returns: [dict]: [{'code':'print("foo")', 'compiles':1},...]
'''
for sample_obj in samples:
try:
code = sample_obj[self.fields['code_field']]
ast.parse(code)
sample_obj['compiles'] = 1
except:
sample_obj['compiles'] = 0
return samples
def overall(self, samples):
samples = self(samples)
compilability_score = np.average([s["compiles"] for s in samples])
return {'compilability_score':compilability_score}
class RUN_File_Transform_Exporter():
def __init__(self, run_file_path, model_name='model_by_carlos'):
'''
A Transform Exporter that creates a RUN file from samples returnedd by a search engine.
'''
self.run_file_path = run_file_path
self.model_name = model_name
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
total_samples = 0
with open(self.run_file_path, 'w') as run_file:
for sample_obj in tqdm(samples, desc='Writing to RUN file', leave=False):
q_id = sample_obj['q_id']
search_results = sample_obj['search_results']
ordered_results = sorted(search_results, key=lambda res: res[1], reverse=True)
for idx, result in enumerate(ordered_results):
d_id, score = result
total_samples+=1
run_file.write(f"{q_id} Q0 {d_id} {idx+1} {score} {self.model_name}\n")
print(f"Successfully written {total_samples} samples from {len(samples)} queries run to: {self.run_file_path}") | """
task_data: [(str, str)]: input/target pairs for translation evaluation.
"""
self.task_data = task_data | identifier_body |
Experiments.py | from abc import ABC, abstractmethod
import torch
from .metrics import nltk_bleu
import numpy as np
import os
import sys
from .useful_utils import string_split_v3, string_split_v1, chunks
import pytrec_eval
import json
import subprocess
import csv
import re
import ast
from tqdm.auto import tqdm
from .bleu_score import compute_bleu
class Experiment(ABC):
def __init__(self, task_data):
"""
task_data: [(str, str)]: input/target pairs for translation evaluation.
"""
self.task_data = task_data
@abstractmethod
def evaluate(self, prediction_fn):
"""
This function should compute all relevant metrics to the task,
prediction_fn: (inp) -> (pred): it's an end-to-end prediction function from any model.
returns: dict: metrics
"""
pass
def save(self, path):
"""
Saves the entire object ready to be loaded.
"""
torch.save(self, path)
def load(path):
"""
STATIC METHOD
accessed through class, loads a pre-existing experiment.
"""
return torch.load(path)
class TranslationExperiment(Experiment):
def __init__(self, task_data, src_splitter=string_split_v1, tgt_splitter=string_split_v1):
"""
task_data: [(str, str)]: this is the expected data format.
>>> from src.Experiments import TranslationExperiment
>>> translation_experiment = TranslationExperiment(validation_pairs)
>>> def simple_translate(src):
>>> return "return output"
>>> translation_experiment.evaluate(simple_translate)
{'BLEU': 1.4384882092392364e-09}
"""
super().__init__(task_data)
self.src_splitter = src_splitter
self.tgt_splitter = tgt_splitter
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", batched=None):
"""
Produces evaluation scores and saves the results to a file. The tokenisation is done through string_split_v1.
So any non spaced text will be considered as one token.
prediction_fn: (str)->(str) or [str]->[str]
save_dir: str: folder to save the file
save_name: str: name of file
batched: int or None: size to use for the prediction function
"""
if batched:
src_sents = [src for (src, tgt) in self.task_data]
chunked_sents = list(chunks(src_sents, batched))
predictions = [prediction_fn(sents) for sents in tqdm.tqdm(chunked_sents, desc="predicting", total=len(chunked_sents))]
predictions = [val for sublist in predictions for val in sublist] # flattening
else:
predictions = [prediction_fn(src) for (src, tgt) in tqdm.tqdm(self.task_data, desc="predicting")]
# BLEU calculation
BLEU_scores = []
for (src, tgt), pred in tqdm.tqdm(list(zip(self.task_data, predictions)), desc="calculating bleu"):
BLEU_score = nltk_bleu(self.tgt_splitter(tgt), self.tgt_splitter(pred))
BLEU_scores.append(BLEU_score)
total_BLEU = np.average(BLEU_scores)
# Write to file
if save_dir != None:
save_path = os.path.join(save_dir, save_name)
print(f"saving translation eval to file: {save_path}")
with open(save_path, "w", encoding="utf-8") as out_fp:
for (src, tgt), pred, BLEU in zip(self.task_data, predictions, BLEU_scores):
out_fp.write("SRC :" + src + "\n")
out_fp.write("TGT :" + tgt + "\n")
out_fp.write("PRED :" + pred + "\n")
out_fp.write("BLEU :" + str(BLEU) + "\n")
out_fp.write("\n")
out_fp.write("\n\n| EVALUATION | BLEU: {:5.2f} |\n".format(total_BLEU))
print("| EVALUATION | BLEU: {:5.3f} |".format(total_BLEU))
return {"BLEU":total_BLEU}
class CAsT_experiment(Experiment):
def __init__(self, topics):
'''
topics: (context:[q_ids], q_id, q_rel:[d_ids])
'''
self.topics = topics
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", hits=100):
full_q_rels = {}
run = {}
for topic in self.topics:
pred_d_ids = prediction_fn(topic, hits=100)
context, q_id, q_rels = topic
full_q_rels[q_id] = {d_id:1 for d_id in q_rels}
run[q_id] = {d_id:score for (d_id, score) in pred_d_ids}
evaluator = pytrec_eval.RelevanceEvaluator(full_q_rels, {'map', 'ndcg'})
results = evaluator.evaluate(run)
aggregate = self.dict_mean(list(results.values()))
return aggregate, results
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
class TREC_Eval_Command_Experiment():
def __init__(self, trec_eval_command='trec_eval -q -c -M1000 -m ndcg_cut.3,5,10,15,20,100,1000 -m all_trec qRELS RUN_FILE',
relevant_metrics=['ndcg_cut_3', 'ndcg_cut_5', 'ndcg_cut_1000', 'map_cut_1000', 'recall_500', 'recall_1000'],
q_rel_file='datasets/TREC_CAsT/2020qrels.txt'):
'''
This is an experiment transform that uses the official trec_eval command to compute scores for each query
and return valid results according to the command specified.
'''
self.trec_eval_command = trec_eval_command
self.relevant_metrics = relevant_metrics
self.q_rel_file = q_rel_file
self.temp_run_file = '/tmp/temp_run_by_carlos.run'
self.run_file_exporter = RUN_File_Transform_Exporter(self.temp_run_file, model_name='temp_model_by_carlos')
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
returns: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...], 'ndcg_cut_3':0.33, 'ndcg_cut_5'...},...]
'''
self.run_file_exporter(samples)
resolved_command = self.trec_eval_command.replace('qRELS', self.q_rel_file).replace('RUN_FILE', self.temp_run_file)
print(f'Running the following command: {resolved_command} > /tmp/temp_run.eval')
os.system(f'{resolved_command} > /tmp/temp_run.eval')
with open('/tmp/temp_run.eval', 'r') as eval_f:
eval_results = {}
for row in eval_f:
if not any([metric in row for metric in self.relevant_metrics]):
continue
metric, q_id, score = row.split()
if q_id not in eval_results:
|
eval_results[q_id][metric] = float(score)
for sample in samples:
if sample['q_id'] not in eval_results:
print(f"q_rel missing for q_id {sample['q_id']}. No scores added to sample")
continue
sample.update(eval_results[sample['q_id']])
return samples
class Ranking_Experiment():
def __init__(self, q_rels, save_dir=None, save_name="rerank_eval.run"):
'''
q_rels: dict: {'q_id':[d_id, d_id,...],...}
'''
pytrec_q_rels = {}
for q_id, d_ids in q_rels.items():
pytrec_q_rels[q_id] = {d_id:1 for d_id in d_ids}
self.evaluator = pytrec_eval.RelevanceEvaluator(pytrec_q_rels, {'map', 'ndcg_cut_3', 'set_recall', 'recip_rank'})
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
pytrec_run = {}
for sample_obj in samples:
q_id = sample_obj['q_id']
pytrec_run[q_id] = {}
for d_id, score in sample_obj['search_results']:
pytrec_run[q_id][d_id] = score
results = self.evaluator.evaluate(pytrec_run)
for sample_obj, result in zip(samples, results.values()):
sample_obj.update(result)
aggregate = self.dict_mean(list(results.values()))
return aggregate
class Sequence_BLEU_Experiment():
def __init__(self, fields={}, debug=True):
'''
An Experiment to evaluate sequence similarity through metrics like: BLEU or token accuracy.
'''
self.fields = {'predicted_seq':'predicted_seq', 'target_seq':'target_seq'}
self.debug = debug
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text"},...]
returns: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text", "BELU":0.6},...]
'''
for sample_obj in samples:
pred_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['predicted_seq']])
refrence_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['target_seq']])
if pred_tokens==[]:
pred_tokens = ['']
sample_obj["nltk_BLEU"] = nltk_bleu(refrence_tokens, pred_tokens)
if self.debug:
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples], [self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([s["nltk_BLEU"] for s in samples])
print(f'corpus_official_BLEU: {corpus_bleu}')
print(f'nltk_BLEU: {nltk_BLEU}')
return samples
def overall(self, samples):
samples = self(samples)
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples],
[self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([s["nltk_BLEU"] for s in samples])
return {'nltk_BLEU':nltk_BLEU, 'corpus_BLEU':corpus_bleu}
def tokenize_for_bleu_eval(self, code):
"""
The tokenizer that we use for code submissions, from Wang Ling et al., Latent Predictor Networks for Code Generation (2016)
@param code: string containing a code snippet
@return: list of code tokens
"""
code = re.sub(r'([^A-Za-z0-9_])', r' \1 ', code)
code = re.sub(r'([a-z])([A-Z])', r'\1 \2', code)
code = re.sub(r'\s+', ' ', code)
code = code.replace('"', '`')
code = code.replace('\'', '`')
tokens = [t for t in code.split(' ') if t]
return tokens
class Compilability_Experiment():
def __init__(self, fields={}):
'''
an experiment to evaluate the vallidity of a sequence as actual compilable code. Here in Python 3.
'''
self.fields = {'code_field': 'code'}
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'code':'print("foo")'},...]
returns: [dict]: [{'code':'print("foo")', 'compiles':1},...]
'''
for sample_obj in samples:
try:
code = sample_obj[self.fields['code_field']]
ast.parse(code)
sample_obj['compiles'] = 1
except:
sample_obj['compiles'] = 0
return samples
def overall(self, samples):
samples = self(samples)
compilability_score = np.average([s["compiles"] for s in samples])
return {'compilability_score':compilability_score}
class RUN_File_Transform_Exporter():
def __init__(self, run_file_path, model_name='model_by_carlos'):
'''
A Transform Exporter that creates a RUN file from samples returnedd by a search engine.
'''
self.run_file_path = run_file_path
self.model_name = model_name
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
total_samples = 0
with open(self.run_file_path, 'w') as run_file:
for sample_obj in tqdm(samples, desc='Writing to RUN file', leave=False):
q_id = sample_obj['q_id']
search_results = sample_obj['search_results']
ordered_results = sorted(search_results, key=lambda res: res[1], reverse=True)
for idx, result in enumerate(ordered_results):
d_id, score = result
total_samples+=1
run_file.write(f"{q_id} Q0 {d_id} {idx+1} {score} {self.model_name}\n")
print(f"Successfully written {total_samples} samples from {len(samples)} queries run to: {self.run_file_path}") | eval_results[q_id] = {} | conditional_block |
Experiments.py | from abc import ABC, abstractmethod
import torch
from .metrics import nltk_bleu
import numpy as np
import os
import sys
from .useful_utils import string_split_v3, string_split_v1, chunks
import pytrec_eval
import json
import subprocess
import csv
import re
import ast
from tqdm.auto import tqdm
from .bleu_score import compute_bleu
class Experiment(ABC):
def __init__(self, task_data):
"""
task_data: [(str, str)]: input/target pairs for translation evaluation.
"""
self.task_data = task_data
@abstractmethod
def evaluate(self, prediction_fn):
"""
This function should compute all relevant metrics to the task,
prediction_fn: (inp) -> (pred): it's an end-to-end prediction function from any model.
returns: dict: metrics
"""
pass
def save(self, path):
"""
Saves the entire object ready to be loaded.
"""
torch.save(self, path)
def load(path):
"""
STATIC METHOD
accessed through class, loads a pre-existing experiment.
"""
return torch.load(path)
class TranslationExperiment(Experiment):
def __init__(self, task_data, src_splitter=string_split_v1, tgt_splitter=string_split_v1):
"""
task_data: [(str, str)]: this is the expected data format.
>>> from src.Experiments import TranslationExperiment
>>> translation_experiment = TranslationExperiment(validation_pairs)
>>> def simple_translate(src):
>>> return "return output"
>>> translation_experiment.evaluate(simple_translate)
{'BLEU': 1.4384882092392364e-09}
"""
super().__init__(task_data)
self.src_splitter = src_splitter
self.tgt_splitter = tgt_splitter
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", batched=None):
"""
Produces evaluation scores and saves the results to a file. The tokenisation is done through string_split_v1.
So any non spaced text will be considered as one token.
prediction_fn: (str)->(str) or [str]->[str]
save_dir: str: folder to save the file
save_name: str: name of file
batched: int or None: size to use for the prediction function
"""
if batched:
src_sents = [src for (src, tgt) in self.task_data]
chunked_sents = list(chunks(src_sents, batched))
predictions = [prediction_fn(sents) for sents in tqdm.tqdm(chunked_sents, desc="predicting", total=len(chunked_sents))]
predictions = [val for sublist in predictions for val in sublist] # flattening
else:
predictions = [prediction_fn(src) for (src, tgt) in tqdm.tqdm(self.task_data, desc="predicting")]
# BLEU calculation
BLEU_scores = []
for (src, tgt), pred in tqdm.tqdm(list(zip(self.task_data, predictions)), desc="calculating bleu"):
BLEU_score = nltk_bleu(self.tgt_splitter(tgt), self.tgt_splitter(pred))
BLEU_scores.append(BLEU_score)
total_BLEU = np.average(BLEU_scores)
# Write to file
if save_dir != None:
save_path = os.path.join(save_dir, save_name)
print(f"saving translation eval to file: {save_path}")
with open(save_path, "w", encoding="utf-8") as out_fp:
for (src, tgt), pred, BLEU in zip(self.task_data, predictions, BLEU_scores):
out_fp.write("SRC :" + src + "\n")
out_fp.write("TGT :" + tgt + "\n")
out_fp.write("PRED :" + pred + "\n")
out_fp.write("BLEU :" + str(BLEU) + "\n")
out_fp.write("\n")
out_fp.write("\n\n| EVALUATION | BLEU: {:5.2f} |\n".format(total_BLEU))
print("| EVALUATION | BLEU: {:5.3f} |".format(total_BLEU))
return {"BLEU":total_BLEU}
class CAsT_experiment(Experiment):
def __init__(self, topics):
'''
topics: (context:[q_ids], q_id, q_rel:[d_ids])
'''
self.topics = topics
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", hits=100):
full_q_rels = {}
run = {}
for topic in self.topics:
pred_d_ids = prediction_fn(topic, hits=100)
context, q_id, q_rels = topic
full_q_rels[q_id] = {d_id:1 for d_id in q_rels}
run[q_id] = {d_id:score for (d_id, score) in pred_d_ids}
evaluator = pytrec_eval.RelevanceEvaluator(full_q_rels, {'map', 'ndcg'})
results = evaluator.evaluate(run)
aggregate = self.dict_mean(list(results.values()))
return aggregate, results
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
class TREC_Eval_Command_Experiment():
def __init__(self, trec_eval_command='trec_eval -q -c -M1000 -m ndcg_cut.3,5,10,15,20,100,1000 -m all_trec qRELS RUN_FILE',
relevant_metrics=['ndcg_cut_3', 'ndcg_cut_5', 'ndcg_cut_1000', 'map_cut_1000', 'recall_500', 'recall_1000'],
q_rel_file='datasets/TREC_CAsT/2020qrels.txt'):
'''
This is an experiment transform that uses the official trec_eval command to compute scores for each query
and return valid results according to the command specified.
'''
self.trec_eval_command = trec_eval_command
self.relevant_metrics = relevant_metrics
self.q_rel_file = q_rel_file
self.temp_run_file = '/tmp/temp_run_by_carlos.run'
self.run_file_exporter = RUN_File_Transform_Exporter(self.temp_run_file, model_name='temp_model_by_carlos')
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
returns: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...], 'ndcg_cut_3':0.33, 'ndcg_cut_5'...},...]
'''
self.run_file_exporter(samples)
resolved_command = self.trec_eval_command.replace('qRELS', self.q_rel_file).replace('RUN_FILE', self.temp_run_file)
print(f'Running the following command: {resolved_command} > /tmp/temp_run.eval')
os.system(f'{resolved_command} > /tmp/temp_run.eval')
with open('/tmp/temp_run.eval', 'r') as eval_f:
eval_results = {} | eval_results[q_id] = {}
eval_results[q_id][metric] = float(score)
for sample in samples:
if sample['q_id'] not in eval_results:
print(f"q_rel missing for q_id {sample['q_id']}. No scores added to sample")
continue
sample.update(eval_results[sample['q_id']])
return samples
class Ranking_Experiment():
def __init__(self, q_rels, save_dir=None, save_name="rerank_eval.run"):
'''
q_rels: dict: {'q_id':[d_id, d_id,...],...}
'''
pytrec_q_rels = {}
for q_id, d_ids in q_rels.items():
pytrec_q_rels[q_id] = {d_id:1 for d_id in d_ids}
self.evaluator = pytrec_eval.RelevanceEvaluator(pytrec_q_rels, {'map', 'ndcg_cut_3', 'set_recall', 'recip_rank'})
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
pytrec_run = {}
for sample_obj in samples:
q_id = sample_obj['q_id']
pytrec_run[q_id] = {}
for d_id, score in sample_obj['search_results']:
pytrec_run[q_id][d_id] = score
results = self.evaluator.evaluate(pytrec_run)
for sample_obj, result in zip(samples, results.values()):
sample_obj.update(result)
aggregate = self.dict_mean(list(results.values()))
return aggregate
class Sequence_BLEU_Experiment():
def __init__(self, fields={}, debug=True):
'''
An Experiment to evaluate sequence similarity through metrics like: BLEU or token accuracy.
'''
self.fields = {'predicted_seq':'predicted_seq', 'target_seq':'target_seq'}
self.debug = debug
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text"},...]
returns: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text", "BELU":0.6},...]
'''
for sample_obj in samples:
pred_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['predicted_seq']])
refrence_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['target_seq']])
if pred_tokens==[]:
pred_tokens = ['']
sample_obj["nltk_BLEU"] = nltk_bleu(refrence_tokens, pred_tokens)
if self.debug:
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples], [self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([s["nltk_BLEU"] for s in samples])
print(f'corpus_official_BLEU: {corpus_bleu}')
print(f'nltk_BLEU: {nltk_BLEU}')
return samples
def overall(self, samples):
samples = self(samples)
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples],
[self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([s["nltk_BLEU"] for s in samples])
return {'nltk_BLEU':nltk_BLEU, 'corpus_BLEU':corpus_bleu}
def tokenize_for_bleu_eval(self, code):
"""
The tokenizer that we use for code submissions, from Wang Ling et al., Latent Predictor Networks for Code Generation (2016)
@param code: string containing a code snippet
@return: list of code tokens
"""
code = re.sub(r'([^A-Za-z0-9_])', r' \1 ', code)
code = re.sub(r'([a-z])([A-Z])', r'\1 \2', code)
code = re.sub(r'\s+', ' ', code)
code = code.replace('"', '`')
code = code.replace('\'', '`')
tokens = [t for t in code.split(' ') if t]
return tokens
class Compilability_Experiment():
def __init__(self, fields={}):
'''
an experiment to evaluate the vallidity of a sequence as actual compilable code. Here in Python 3.
'''
self.fields = {'code_field': 'code'}
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'code':'print("foo")'},...]
returns: [dict]: [{'code':'print("foo")', 'compiles':1},...]
'''
for sample_obj in samples:
try:
code = sample_obj[self.fields['code_field']]
ast.parse(code)
sample_obj['compiles'] = 1
except:
sample_obj['compiles'] = 0
return samples
def overall(self, samples):
samples = self(samples)
compilability_score = np.average([s["compiles"] for s in samples])
return {'compilability_score':compilability_score}
class RUN_File_Transform_Exporter():
def __init__(self, run_file_path, model_name='model_by_carlos'):
'''
A Transform Exporter that creates a RUN file from samples returnedd by a search engine.
'''
self.run_file_path = run_file_path
self.model_name = model_name
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
total_samples = 0
with open(self.run_file_path, 'w') as run_file:
for sample_obj in tqdm(samples, desc='Writing to RUN file', leave=False):
q_id = sample_obj['q_id']
search_results = sample_obj['search_results']
ordered_results = sorted(search_results, key=lambda res: res[1], reverse=True)
for idx, result in enumerate(ordered_results):
d_id, score = result
total_samples+=1
run_file.write(f"{q_id} Q0 {d_id} {idx+1} {score} {self.model_name}\n")
print(f"Successfully written {total_samples} samples from {len(samples)} queries run to: {self.run_file_path}") | for row in eval_f:
if not any([metric in row for metric in self.relevant_metrics]):
continue
metric, q_id, score = row.split()
if q_id not in eval_results: | random_line_split |
Experiments.py | from abc import ABC, abstractmethod
import torch
from .metrics import nltk_bleu
import numpy as np
import os
import sys
from .useful_utils import string_split_v3, string_split_v1, chunks
import pytrec_eval
import json
import subprocess
import csv
import re
import ast
from tqdm.auto import tqdm
from .bleu_score import compute_bleu
class Experiment(ABC):
def __init__(self, task_data):
"""
task_data: [(str, str)]: input/target pairs for translation evaluation.
"""
self.task_data = task_data
@abstractmethod
def evaluate(self, prediction_fn):
"""
This function should compute all relevant metrics to the task,
prediction_fn: (inp) -> (pred): it's an end-to-end prediction function from any model.
returns: dict: metrics
"""
pass
def | (self, path):
"""
Saves the entire object ready to be loaded.
"""
torch.save(self, path)
def load(path):
"""
STATIC METHOD
accessed through class, loads a pre-existing experiment.
"""
return torch.load(path)
class TranslationExperiment(Experiment):
def __init__(self, task_data, src_splitter=string_split_v1, tgt_splitter=string_split_v1):
"""
task_data: [(str, str)]: this is the expected data format.
>>> from src.Experiments import TranslationExperiment
>>> translation_experiment = TranslationExperiment(validation_pairs)
>>> def simple_translate(src):
>>> return "return output"
>>> translation_experiment.evaluate(simple_translate)
{'BLEU': 1.4384882092392364e-09}
"""
super().__init__(task_data)
self.src_splitter = src_splitter
self.tgt_splitter = tgt_splitter
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", batched=None):
"""
Produces evaluation scores and saves the results to a file. The tokenisation is done through string_split_v1.
So any non spaced text will be considered as one token.
prediction_fn: (str)->(str) or [str]->[str]
save_dir: str: folder to save the file
save_name: str: name of file
batched: int or None: size to use for the prediction function
"""
if batched:
src_sents = [src for (src, tgt) in self.task_data]
chunked_sents = list(chunks(src_sents, batched))
predictions = [prediction_fn(sents) for sents in tqdm.tqdm(chunked_sents, desc="predicting", total=len(chunked_sents))]
predictions = [val for sublist in predictions for val in sublist] # flattening
else:
predictions = [prediction_fn(src) for (src, tgt) in tqdm.tqdm(self.task_data, desc="predicting")]
# BLEU calculation
BLEU_scores = []
for (src, tgt), pred in tqdm.tqdm(list(zip(self.task_data, predictions)), desc="calculating bleu"):
BLEU_score = nltk_bleu(self.tgt_splitter(tgt), self.tgt_splitter(pred))
BLEU_scores.append(BLEU_score)
total_BLEU = np.average(BLEU_scores)
# Write to file
if save_dir != None:
save_path = os.path.join(save_dir, save_name)
print(f"saving translation eval to file: {save_path}")
with open(save_path, "w", encoding="utf-8") as out_fp:
for (src, tgt), pred, BLEU in zip(self.task_data, predictions, BLEU_scores):
out_fp.write("SRC :" + src + "\n")
out_fp.write("TGT :" + tgt + "\n")
out_fp.write("PRED :" + pred + "\n")
out_fp.write("BLEU :" + str(BLEU) + "\n")
out_fp.write("\n")
out_fp.write("\n\n| EVALUATION | BLEU: {:5.2f} |\n".format(total_BLEU))
print("| EVALUATION | BLEU: {:5.3f} |".format(total_BLEU))
return {"BLEU":total_BLEU}
class CAsT_experiment(Experiment):
def __init__(self, topics):
'''
topics: (context:[q_ids], q_id, q_rel:[d_ids])
'''
self.topics = topics
def evaluate(self, prediction_fn, save_dir=None, save_name="translation_eval.txt", hits=100):
full_q_rels = {}
run = {}
for topic in self.topics:
pred_d_ids = prediction_fn(topic, hits=100)
context, q_id, q_rels = topic
full_q_rels[q_id] = {d_id:1 for d_id in q_rels}
run[q_id] = {d_id:score for (d_id, score) in pred_d_ids}
evaluator = pytrec_eval.RelevanceEvaluator(full_q_rels, {'map', 'ndcg'})
results = evaluator.evaluate(run)
aggregate = self.dict_mean(list(results.values()))
return aggregate, results
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
class TREC_Eval_Command_Experiment():
def __init__(self, trec_eval_command='trec_eval -q -c -M1000 -m ndcg_cut.3,5,10,15,20,100,1000 -m all_trec qRELS RUN_FILE',
relevant_metrics=['ndcg_cut_3', 'ndcg_cut_5', 'ndcg_cut_1000', 'map_cut_1000', 'recall_500', 'recall_1000'],
q_rel_file='datasets/TREC_CAsT/2020qrels.txt'):
'''
This is an experiment transform that uses the official trec_eval command to compute scores for each query
and return valid results according to the command specified.
'''
self.trec_eval_command = trec_eval_command
self.relevant_metrics = relevant_metrics
self.q_rel_file = q_rel_file
self.temp_run_file = '/tmp/temp_run_by_carlos.run'
self.run_file_exporter = RUN_File_Transform_Exporter(self.temp_run_file, model_name='temp_model_by_carlos')
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
returns: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...], 'ndcg_cut_3':0.33, 'ndcg_cut_5'...},...]
'''
self.run_file_exporter(samples)
resolved_command = self.trec_eval_command.replace('qRELS', self.q_rel_file).replace('RUN_FILE', self.temp_run_file)
print(f'Running the following command: {resolved_command} > /tmp/temp_run.eval')
os.system(f'{resolved_command} > /tmp/temp_run.eval')
with open('/tmp/temp_run.eval', 'r') as eval_f:
eval_results = {}
for row in eval_f:
if not any([metric in row for metric in self.relevant_metrics]):
continue
metric, q_id, score = row.split()
if q_id not in eval_results:
eval_results[q_id] = {}
eval_results[q_id][metric] = float(score)
for sample in samples:
if sample['q_id'] not in eval_results:
print(f"q_rel missing for q_id {sample['q_id']}. No scores added to sample")
continue
sample.update(eval_results[sample['q_id']])
return samples
class Ranking_Experiment():
def __init__(self, q_rels, save_dir=None, save_name="rerank_eval.run"):
'''
q_rels: dict: {'q_id':[d_id, d_id,...],...}
'''
pytrec_q_rels = {}
for q_id, d_ids in q_rels.items():
pytrec_q_rels[q_id] = {d_id:1 for d_id in d_ids}
self.evaluator = pytrec_eval.RelevanceEvaluator(pytrec_q_rels, {'map', 'ndcg_cut_3', 'set_recall', 'recip_rank'})
def dict_mean(self, dict_list):
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = sum(d[key] for d in dict_list) / len(dict_list)
return mean_dict
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
pytrec_run = {}
for sample_obj in samples:
q_id = sample_obj['q_id']
pytrec_run[q_id] = {}
for d_id, score in sample_obj['search_results']:
pytrec_run[q_id][d_id] = score
results = self.evaluator.evaluate(pytrec_run)
for sample_obj, result in zip(samples, results.values()):
sample_obj.update(result)
aggregate = self.dict_mean(list(results.values()))
return aggregate
class Sequence_BLEU_Experiment():
def __init__(self, fields={}, debug=True):
'''
An Experiment to evaluate sequence similarity through metrics like: BLEU or token accuracy.
'''
self.fields = {'predicted_seq':'predicted_seq', 'target_seq':'target_seq'}
self.debug = debug
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text"},...]
returns: [dict]: [{'target_seq':"taget text", 'predicted_seq':"pred text", "BELU":0.6},...]
'''
for sample_obj in samples:
pred_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['predicted_seq']])
refrence_tokens = self.tokenize_for_bleu_eval(sample_obj[self.fields['target_seq']])
if pred_tokens==[]:
pred_tokens = ['']
sample_obj["nltk_BLEU"] = nltk_bleu(refrence_tokens, pred_tokens)
if self.debug:
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples], [self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([s["nltk_BLEU"] for s in samples])
print(f'corpus_official_BLEU: {corpus_bleu}')
print(f'nltk_BLEU: {nltk_BLEU}')
return samples
def overall(self, samples):
samples = self(samples)
corpus_bleu = compute_bleu([[self.tokenize_for_bleu_eval(s[self.fields['target_seq']])] for s in samples],
[self.tokenize_for_bleu_eval(s[self.fields['predicted_seq']]) for s in samples], smooth=False)[0]
nltk_BLEU = np.average([s["nltk_BLEU"] for s in samples])
return {'nltk_BLEU':nltk_BLEU, 'corpus_BLEU':corpus_bleu}
def tokenize_for_bleu_eval(self, code):
"""
The tokenizer that we use for code submissions, from Wang Ling et al., Latent Predictor Networks for Code Generation (2016)
@param code: string containing a code snippet
@return: list of code tokens
"""
code = re.sub(r'([^A-Za-z0-9_])', r' \1 ', code)
code = re.sub(r'([a-z])([A-Z])', r'\1 \2', code)
code = re.sub(r'\s+', ' ', code)
code = code.replace('"', '`')
code = code.replace('\'', '`')
tokens = [t for t in code.split(' ') if t]
return tokens
class Compilability_Experiment():
def __init__(self, fields={}):
'''
an experiment to evaluate the vallidity of a sequence as actual compilable code. Here in Python 3.
'''
self.fields = {'code_field': 'code'}
self.fields.update(fields)
def __call__(self, samples):
'''
samples: [dict]: [{'code':'print("foo")'},...]
returns: [dict]: [{'code':'print("foo")', 'compiles':1},...]
'''
for sample_obj in samples:
try:
code = sample_obj[self.fields['code_field']]
ast.parse(code)
sample_obj['compiles'] = 1
except:
sample_obj['compiles'] = 0
return samples
def overall(self, samples):
samples = self(samples)
compilability_score = np.average([s["compiles"] for s in samples])
return {'compilability_score':compilability_score}
class RUN_File_Transform_Exporter():
def __init__(self, run_file_path, model_name='model_by_carlos'):
'''
A Transform Exporter that creates a RUN file from samples returnedd by a search engine.
'''
self.run_file_path = run_file_path
self.model_name = model_name
def __call__(self, samples):
'''
samples: [dict]: [{'q_id':"xxx", 'search_results':[("MARCO_xxx", 0.63)...]},...]
'''
total_samples = 0
with open(self.run_file_path, 'w') as run_file:
for sample_obj in tqdm(samples, desc='Writing to RUN file', leave=False):
q_id = sample_obj['q_id']
search_results = sample_obj['search_results']
ordered_results = sorted(search_results, key=lambda res: res[1], reverse=True)
for idx, result in enumerate(ordered_results):
d_id, score = result
total_samples+=1
run_file.write(f"{q_id} Q0 {d_id} {idx+1} {score} {self.model_name}\n")
print(f"Successfully written {total_samples} samples from {len(samples)} queries run to: {self.run_file_path}") | save | identifier_name |
mod.rs | //! This mod implements `kubernetes_logs` source.
//! The scope of this source is to consume the log files that `kubelet` keeps
//! at `/var/log/pods` at the host of the k8s node when `vector` itself is
//! running inside the cluster as a `DaemonSet`.
#![deny(missing_docs)]
use crate::event::{self, Event};
use crate::internal_events::{KubernetesLogsEventAnnotationFailed, KubernetesLogsEventReceived};
use crate::kubernetes as k8s;
use crate::{
dns::Resolver,
shutdown::ShutdownSignal,
sources,
topology::config::{DataType, GlobalOptions, SourceConfig, SourceDescription},
transforms::Transform,
};
use bytes05::Bytes;
use evmap10::{self as evmap};
use file_source::{FileServer, FileServerShutdown, Fingerprinter};
use futures::{future::FutureExt, sink::Sink, stream::StreamExt};
use futures01::sync::mpsc;
use k8s_openapi::api::core::v1::Pod;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Duration;
mod k8s_paths_provider;
mod lifecycle;
mod parser;
mod partial_events_merger;
mod path_helpers;
mod pod_metadata_annotator;
mod transform_utils;
mod util;
use k8s_paths_provider::K8sPathsProvider;
use lifecycle::Lifecycle;
use pod_metadata_annotator::PodMetadataAnnotator;
/// The key we use for `file` field.
const FILE_KEY: &str = "file";
/// The `self_node_name` value env var key.
const SELF_NODE_NAME_ENV_KEY: &str = "VECTOR_SELF_NODE_NAME";
/// Configuration for the `kubernetes_logs` source.
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The `name` of the Kubernetes `Node` that Vector runs at.
/// Required to filter the `Pod`s to only include the ones with the log
/// files accessible locally.
#[serde(default = "default_self_node_name_env_template")]
self_node_name: String,
/// Automatically merge partial events.
#[serde(default = "crate::serde::default_true")]
auto_partial_merge: bool,
/// Specifies the field names for metadata annotation.
annotation_fields: pod_metadata_annotator::FieldsSpec,
}
inventory::submit! {
SourceDescription::new_without_default::<Config>(COMPONENT_NAME)
}
const COMPONENT_NAME: &str = "kubernetes_logs";
#[typetag::serde(name = "kubernetes_logs")]
impl SourceConfig for Config {
fn build(
&self,
name: &str,
globals: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<sources::Source> {
let source = Source::new(self, Resolver, globals, name)?;
// TODO: this is a workaround for the legacy futures 0.1.
// When the core is updated to futures 0.3 this should be simplied
// significantly.
let out = futures::compat::Compat01As03Sink::new(out);
let fut = source.run(out, shutdown);
let fut = fut.map(|result| {
result.map_err(|error| {
error!(message = "source future failed", ?error);
})
});
let fut = Box::pin(fut);
let fut = futures::compat::Compat::new(fut);
let fut: sources::Source = Box::new(fut);
Ok(fut)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
COMPONENT_NAME
}
}
#[derive(Clone)]
struct Source {
client: k8s::client::Client,
self_node_name: String,
data_dir: PathBuf,
auto_partial_merge: bool,
fields_spec: pod_metadata_annotator::FieldsSpec,
}
impl Source {
fn new(
config: &Config,
resolver: Resolver,
globals: &GlobalOptions,
name: &str,
) -> crate::Result<Self> {
let self_node_name = if config.self_node_name.is_empty()
|| config.self_node_name == default_self_node_name_env_template()
{
std::env::var(SELF_NODE_NAME_ENV_KEY).map_err(|_| {
format!(
"self_node_name config value or {} env var is not set",
SELF_NODE_NAME_ENV_KEY
)
})?
} else {
config.self_node_name.clone()
};
info!(
message = "obtained Kubernetes Node name to collect logs for (self)",
?self_node_name
);
let k8s_config = k8s::client::config::Config::in_cluster()?;
let client = k8s::client::Client::new(k8s_config, resolver)?;
let data_dir = globals.resolve_and_make_data_subdir(None, name)?;
Ok(Self {
client,
self_node_name,
data_dir,
auto_partial_merge: config.auto_partial_merge,
fields_spec: config.annotation_fields.clone(),
})
}
async fn run<O>(self, out: O, global_shutdown: ShutdownSignal) -> crate::Result<()>
where
O: Sink<Event> + Send + 'static,
<O as Sink<Event>>::Error: std::error::Error,
{
let Self {
client,
self_node_name,
data_dir,
auto_partial_merge,
fields_spec,
} = self;
let field_selector = format!("spec.nodeName={}", self_node_name);
let label_selector = "vector.dev/exclude!=true".to_owned();
let watcher = k8s::api_watcher::ApiWatcher::new(client, Pod::watch_pod_for_all_namespaces);
let watcher = k8s::instrumenting_watcher::InstrumentingWatcher::new(watcher);
let (state_reader, state_writer) = evmap::new();
let state_writer =
k8s::state::evmap::Writer::new(state_writer, Some(Duration::from_millis(10)));
let state_writer = k8s::state::instrumenting::Writer::new(state_writer);
let state_writer =
k8s::state::delayed_delete::Writer::new(state_writer, Duration::from_secs(60));
let mut reflector = k8s::reflector::Reflector::new(
watcher,
state_writer,
Some(field_selector),
Some(label_selector),
Duration::from_secs(1),
);
let reflector_process = reflector.run();
let paths_provider = K8sPathsProvider::new(state_reader.clone());
let annotator = PodMetadataAnnotator::new(state_reader, fields_spec);
// TODO: maybe some of the parameters have to be configurable.
let max_line_bytes = 32 * 1024; // 32 KiB
let file_server = FileServer {
paths_provider,
max_read_bytes: 2048,
start_at_beginning: true,
ignore_before: None,
max_line_bytes,
data_dir,
glob_minimum_cooldown: Duration::from_secs(10),
fingerprinter: Fingerprinter::FirstLineChecksum {
max_line_length: max_line_bytes,
},
oldest_first: false,
remove_after: None,
};
let (file_source_tx, file_source_rx) =
futures::channel::mpsc::channel::<(Bytes, String)>(100);
let mut parser = parser::build();
let mut partial_events_merger = partial_events_merger::build(auto_partial_merge);
let events = file_source_rx.map(move |(bytes, file)| {
emit!(KubernetesLogsEventReceived {
file: &file,
byte_size: bytes.len(),
});
let mut event = create_event(bytes, &file);
if annotator.annotate(&mut event, &file).is_none() {
emit!(KubernetesLogsEventAnnotationFailed { event: &event });
}
event
});
let events = events
.filter_map(move |event| futures::future::ready(parser.transform(event)))
.filter_map(move |event| {
futures::future::ready(partial_events_merger.transform(event))
});
let event_processing_loop = events.map(Ok).forward(out);
let mut lifecycle = Lifecycle::new();
{
let (slot, shutdown) = lifecycle.add();
let fut =
util::cancel_on_signal(reflector_process, shutdown).map(|result| match result {
Ok(()) => info!(message = "reflector process completed gracefully"),
Err(error) => {
error!(message = "reflector process exited with an error", ?error)
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::run_file_server(file_server, file_source_tx, shutdown).map(|result| {
match result {
Ok(FileServerShutdown) => info!(message = "file server completed gracefully"),
Err(error) => error!(message = "file server exited with an error", ?error),
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::complete_with_deadline_on_signal(
event_processing_loop,
shutdown,
Duration::from_secs(30), // more than enough time to propagate
)
.map(|result| {
match result {
Ok(Ok(())) => info!(message = "event processing loop completed gracefully"),
Ok(Err(error)) => error!(
message = "event processing loop exited with an error",
?error
),
Err(error) => error!(
message = "event processing loop timed out during the shutdown",
?error
),
};
});
slot.bind(Box::pin(fut));
}
lifecycle.run(global_shutdown).await;
info!(message = "done");
Ok(())
}
}
fn create_event(line: Bytes, file: &str) -> Event |
/// This function returns the default value for `self_node_name` variable
/// as it should be at the generated config file.
fn default_self_node_name_env_template() -> String {
format!("${{{}}}", SELF_NODE_NAME_ENV_KEY)
}
| {
let mut event = Event::from(line);
// Add source type.
event
.as_mut_log()
.insert(event::log_schema().source_type_key(), COMPONENT_NAME);
// Add file.
event.as_mut_log().insert(FILE_KEY, file);
event
} | identifier_body |
mod.rs | //! This mod implements `kubernetes_logs` source.
//! The scope of this source is to consume the log files that `kubelet` keeps
//! at `/var/log/pods` at the host of the k8s node when `vector` itself is
//! running inside the cluster as a `DaemonSet`.
#![deny(missing_docs)]
use crate::event::{self, Event};
use crate::internal_events::{KubernetesLogsEventAnnotationFailed, KubernetesLogsEventReceived};
use crate::kubernetes as k8s;
use crate::{
dns::Resolver,
shutdown::ShutdownSignal,
sources,
topology::config::{DataType, GlobalOptions, SourceConfig, SourceDescription},
transforms::Transform,
};
use bytes05::Bytes;
use evmap10::{self as evmap};
use file_source::{FileServer, FileServerShutdown, Fingerprinter};
use futures::{future::FutureExt, sink::Sink, stream::StreamExt};
use futures01::sync::mpsc;
use k8s_openapi::api::core::v1::Pod;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Duration;
mod k8s_paths_provider;
mod lifecycle;
mod parser;
mod partial_events_merger;
mod path_helpers;
mod pod_metadata_annotator;
mod transform_utils;
mod util;
use k8s_paths_provider::K8sPathsProvider;
use lifecycle::Lifecycle;
use pod_metadata_annotator::PodMetadataAnnotator;
/// The key we use for `file` field.
const FILE_KEY: &str = "file";
/// The `self_node_name` value env var key.
const SELF_NODE_NAME_ENV_KEY: &str = "VECTOR_SELF_NODE_NAME";
/// Configuration for the `kubernetes_logs` source.
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The `name` of the Kubernetes `Node` that Vector runs at.
/// Required to filter the `Pod`s to only include the ones with the log
/// files accessible locally.
#[serde(default = "default_self_node_name_env_template")]
self_node_name: String,
/// Automatically merge partial events.
#[serde(default = "crate::serde::default_true")]
auto_partial_merge: bool,
/// Specifies the field names for metadata annotation.
annotation_fields: pod_metadata_annotator::FieldsSpec,
}
inventory::submit! {
SourceDescription::new_without_default::<Config>(COMPONENT_NAME)
}
const COMPONENT_NAME: &str = "kubernetes_logs";
#[typetag::serde(name = "kubernetes_logs")]
impl SourceConfig for Config {
fn build(
&self,
name: &str,
globals: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<sources::Source> {
let source = Source::new(self, Resolver, globals, name)?;
// TODO: this is a workaround for the legacy futures 0.1.
// When the core is updated to futures 0.3 this should be simplied
// significantly.
let out = futures::compat::Compat01As03Sink::new(out);
let fut = source.run(out, shutdown);
let fut = fut.map(|result| {
result.map_err(|error| {
error!(message = "source future failed", ?error);
})
});
let fut = Box::pin(fut);
let fut = futures::compat::Compat::new(fut);
let fut: sources::Source = Box::new(fut);
Ok(fut)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
COMPONENT_NAME
}
}
#[derive(Clone)]
struct Source {
client: k8s::client::Client,
self_node_name: String,
data_dir: PathBuf,
auto_partial_merge: bool,
fields_spec: pod_metadata_annotator::FieldsSpec,
}
impl Source {
fn new(
config: &Config,
resolver: Resolver,
globals: &GlobalOptions,
name: &str,
) -> crate::Result<Self> {
let self_node_name = if config.self_node_name.is_empty()
|| config.self_node_name == default_self_node_name_env_template()
{
std::env::var(SELF_NODE_NAME_ENV_KEY).map_err(|_| {
format!(
"self_node_name config value or {} env var is not set",
SELF_NODE_NAME_ENV_KEY
)
})?
} else {
config.self_node_name.clone()
};
info!(
message = "obtained Kubernetes Node name to collect logs for (self)",
?self_node_name
);
let k8s_config = k8s::client::config::Config::in_cluster()?;
let client = k8s::client::Client::new(k8s_config, resolver)?;
let data_dir = globals.resolve_and_make_data_subdir(None, name)?;
Ok(Self {
client,
self_node_name,
data_dir,
auto_partial_merge: config.auto_partial_merge,
fields_spec: config.annotation_fields.clone(),
})
}
async fn | <O>(self, out: O, global_shutdown: ShutdownSignal) -> crate::Result<()>
where
O: Sink<Event> + Send + 'static,
<O as Sink<Event>>::Error: std::error::Error,
{
let Self {
client,
self_node_name,
data_dir,
auto_partial_merge,
fields_spec,
} = self;
let field_selector = format!("spec.nodeName={}", self_node_name);
let label_selector = "vector.dev/exclude!=true".to_owned();
let watcher = k8s::api_watcher::ApiWatcher::new(client, Pod::watch_pod_for_all_namespaces);
let watcher = k8s::instrumenting_watcher::InstrumentingWatcher::new(watcher);
let (state_reader, state_writer) = evmap::new();
let state_writer =
k8s::state::evmap::Writer::new(state_writer, Some(Duration::from_millis(10)));
let state_writer = k8s::state::instrumenting::Writer::new(state_writer);
let state_writer =
k8s::state::delayed_delete::Writer::new(state_writer, Duration::from_secs(60));
let mut reflector = k8s::reflector::Reflector::new(
watcher,
state_writer,
Some(field_selector),
Some(label_selector),
Duration::from_secs(1),
);
let reflector_process = reflector.run();
let paths_provider = K8sPathsProvider::new(state_reader.clone());
let annotator = PodMetadataAnnotator::new(state_reader, fields_spec);
// TODO: maybe some of the parameters have to be configurable.
let max_line_bytes = 32 * 1024; // 32 KiB
let file_server = FileServer {
paths_provider,
max_read_bytes: 2048,
start_at_beginning: true,
ignore_before: None,
max_line_bytes,
data_dir,
glob_minimum_cooldown: Duration::from_secs(10),
fingerprinter: Fingerprinter::FirstLineChecksum {
max_line_length: max_line_bytes,
},
oldest_first: false,
remove_after: None,
};
let (file_source_tx, file_source_rx) =
futures::channel::mpsc::channel::<(Bytes, String)>(100);
let mut parser = parser::build();
let mut partial_events_merger = partial_events_merger::build(auto_partial_merge);
let events = file_source_rx.map(move |(bytes, file)| {
emit!(KubernetesLogsEventReceived {
file: &file,
byte_size: bytes.len(),
});
let mut event = create_event(bytes, &file);
if annotator.annotate(&mut event, &file).is_none() {
emit!(KubernetesLogsEventAnnotationFailed { event: &event });
}
event
});
let events = events
.filter_map(move |event| futures::future::ready(parser.transform(event)))
.filter_map(move |event| {
futures::future::ready(partial_events_merger.transform(event))
});
let event_processing_loop = events.map(Ok).forward(out);
let mut lifecycle = Lifecycle::new();
{
let (slot, shutdown) = lifecycle.add();
let fut =
util::cancel_on_signal(reflector_process, shutdown).map(|result| match result {
Ok(()) => info!(message = "reflector process completed gracefully"),
Err(error) => {
error!(message = "reflector process exited with an error", ?error)
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::run_file_server(file_server, file_source_tx, shutdown).map(|result| {
match result {
Ok(FileServerShutdown) => info!(message = "file server completed gracefully"),
Err(error) => error!(message = "file server exited with an error", ?error),
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::complete_with_deadline_on_signal(
event_processing_loop,
shutdown,
Duration::from_secs(30), // more than enough time to propagate
)
.map(|result| {
match result {
Ok(Ok(())) => info!(message = "event processing loop completed gracefully"),
Ok(Err(error)) => error!(
message = "event processing loop exited with an error",
?error
),
Err(error) => error!(
message = "event processing loop timed out during the shutdown",
?error
),
};
});
slot.bind(Box::pin(fut));
}
lifecycle.run(global_shutdown).await;
info!(message = "done");
Ok(())
}
}
fn create_event(line: Bytes, file: &str) -> Event {
let mut event = Event::from(line);
// Add source type.
event
.as_mut_log()
.insert(event::log_schema().source_type_key(), COMPONENT_NAME);
// Add file.
event.as_mut_log().insert(FILE_KEY, file);
event
}
/// This function returns the default value for `self_node_name` variable
/// as it should be at the generated config file.
fn default_self_node_name_env_template() -> String {
format!("${{{}}}", SELF_NODE_NAME_ENV_KEY)
}
| run | identifier_name |
mod.rs | //! This mod implements `kubernetes_logs` source.
//! The scope of this source is to consume the log files that `kubelet` keeps
//! at `/var/log/pods` at the host of the k8s node when `vector` itself is
//! running inside the cluster as a `DaemonSet`.
#![deny(missing_docs)]
use crate::event::{self, Event};
use crate::internal_events::{KubernetesLogsEventAnnotationFailed, KubernetesLogsEventReceived};
use crate::kubernetes as k8s;
use crate::{
dns::Resolver,
shutdown::ShutdownSignal,
sources,
topology::config::{DataType, GlobalOptions, SourceConfig, SourceDescription},
transforms::Transform,
};
use bytes05::Bytes;
use evmap10::{self as evmap};
use file_source::{FileServer, FileServerShutdown, Fingerprinter};
use futures::{future::FutureExt, sink::Sink, stream::StreamExt};
use futures01::sync::mpsc;
use k8s_openapi::api::core::v1::Pod;
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
use std::time::Duration;
mod k8s_paths_provider;
mod lifecycle;
mod parser;
mod partial_events_merger;
mod path_helpers;
mod pod_metadata_annotator;
mod transform_utils;
mod util;
use k8s_paths_provider::K8sPathsProvider;
use lifecycle::Lifecycle;
use pod_metadata_annotator::PodMetadataAnnotator;
/// The key we use for `file` field.
const FILE_KEY: &str = "file";
/// The `self_node_name` value env var key.
const SELF_NODE_NAME_ENV_KEY: &str = "VECTOR_SELF_NODE_NAME";
/// Configuration for the `kubernetes_logs` source.
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(deny_unknown_fields, default)]
pub struct Config {
/// The `name` of the Kubernetes `Node` that Vector runs at.
/// Required to filter the `Pod`s to only include the ones with the log
/// files accessible locally.
#[serde(default = "default_self_node_name_env_template")]
self_node_name: String,
/// Automatically merge partial events.
#[serde(default = "crate::serde::default_true")]
auto_partial_merge: bool,
/// Specifies the field names for metadata annotation.
annotation_fields: pod_metadata_annotator::FieldsSpec,
}
inventory::submit! {
SourceDescription::new_without_default::<Config>(COMPONENT_NAME)
}
const COMPONENT_NAME: &str = "kubernetes_logs";
#[typetag::serde(name = "kubernetes_logs")]
impl SourceConfig for Config {
fn build(
&self,
name: &str,
globals: &GlobalOptions,
shutdown: ShutdownSignal,
out: mpsc::Sender<Event>,
) -> crate::Result<sources::Source> {
let source = Source::new(self, Resolver, globals, name)?;
// TODO: this is a workaround for the legacy futures 0.1.
// When the core is updated to futures 0.3 this should be simplied
// significantly.
let out = futures::compat::Compat01As03Sink::new(out);
let fut = source.run(out, shutdown);
let fut = fut.map(|result| {
result.map_err(|error| {
error!(message = "source future failed", ?error);
})
});
let fut = Box::pin(fut);
let fut = futures::compat::Compat::new(fut);
let fut: sources::Source = Box::new(fut);
Ok(fut)
}
fn output_type(&self) -> DataType {
DataType::Log
}
fn source_type(&self) -> &'static str {
COMPONENT_NAME
}
}
#[derive(Clone)]
struct Source {
client: k8s::client::Client,
self_node_name: String,
data_dir: PathBuf,
auto_partial_merge: bool,
fields_spec: pod_metadata_annotator::FieldsSpec,
}
impl Source {
fn new(
config: &Config,
resolver: Resolver,
globals: &GlobalOptions,
name: &str,
) -> crate::Result<Self> {
let self_node_name = if config.self_node_name.is_empty()
|| config.self_node_name == default_self_node_name_env_template()
{
std::env::var(SELF_NODE_NAME_ENV_KEY).map_err(|_| {
format!(
"self_node_name config value or {} env var is not set",
SELF_NODE_NAME_ENV_KEY
)
})?
} else {
config.self_node_name.clone()
};
info!(
message = "obtained Kubernetes Node name to collect logs for (self)",
?self_node_name
);
let k8s_config = k8s::client::config::Config::in_cluster()?;
let client = k8s::client::Client::new(k8s_config, resolver)?;
let data_dir = globals.resolve_and_make_data_subdir(None, name)?;
Ok(Self {
client,
self_node_name,
data_dir,
auto_partial_merge: config.auto_partial_merge,
fields_spec: config.annotation_fields.clone(),
})
}
async fn run<O>(self, out: O, global_shutdown: ShutdownSignal) -> crate::Result<()>
where
O: Sink<Event> + Send + 'static,
<O as Sink<Event>>::Error: std::error::Error,
{
let Self {
client,
self_node_name,
data_dir,
auto_partial_merge,
fields_spec,
} = self;
let field_selector = format!("spec.nodeName={}", self_node_name);
let label_selector = "vector.dev/exclude!=true".to_owned();
let watcher = k8s::api_watcher::ApiWatcher::new(client, Pod::watch_pod_for_all_namespaces);
let watcher = k8s::instrumenting_watcher::InstrumentingWatcher::new(watcher);
let (state_reader, state_writer) = evmap::new();
let state_writer =
k8s::state::evmap::Writer::new(state_writer, Some(Duration::from_millis(10)));
let state_writer = k8s::state::instrumenting::Writer::new(state_writer);
let state_writer =
k8s::state::delayed_delete::Writer::new(state_writer, Duration::from_secs(60));
let mut reflector = k8s::reflector::Reflector::new(
watcher,
state_writer,
Some(field_selector),
Some(label_selector),
Duration::from_secs(1),
);
let reflector_process = reflector.run();
let paths_provider = K8sPathsProvider::new(state_reader.clone());
let annotator = PodMetadataAnnotator::new(state_reader, fields_spec);
// TODO: maybe some of the parameters have to be configurable.
let max_line_bytes = 32 * 1024; // 32 KiB
let file_server = FileServer {
paths_provider,
max_read_bytes: 2048,
start_at_beginning: true,
ignore_before: None,
max_line_bytes,
data_dir,
glob_minimum_cooldown: Duration::from_secs(10),
fingerprinter: Fingerprinter::FirstLineChecksum {
max_line_length: max_line_bytes,
},
oldest_first: false,
remove_after: None,
};
let (file_source_tx, file_source_rx) =
futures::channel::mpsc::channel::<(Bytes, String)>(100);
let mut parser = parser::build();
let mut partial_events_merger = partial_events_merger::build(auto_partial_merge);
let events = file_source_rx.map(move |(bytes, file)| {
emit!(KubernetesLogsEventReceived {
file: &file,
byte_size: bytes.len(),
});
let mut event = create_event(bytes, &file); | emit!(KubernetesLogsEventAnnotationFailed { event: &event });
}
event
});
let events = events
.filter_map(move |event| futures::future::ready(parser.transform(event)))
.filter_map(move |event| {
futures::future::ready(partial_events_merger.transform(event))
});
let event_processing_loop = events.map(Ok).forward(out);
let mut lifecycle = Lifecycle::new();
{
let (slot, shutdown) = lifecycle.add();
let fut =
util::cancel_on_signal(reflector_process, shutdown).map(|result| match result {
Ok(()) => info!(message = "reflector process completed gracefully"),
Err(error) => {
error!(message = "reflector process exited with an error", ?error)
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::run_file_server(file_server, file_source_tx, shutdown).map(|result| {
match result {
Ok(FileServerShutdown) => info!(message = "file server completed gracefully"),
Err(error) => error!(message = "file server exited with an error", ?error),
}
});
slot.bind(Box::pin(fut));
}
{
let (slot, shutdown) = lifecycle.add();
let fut = util::complete_with_deadline_on_signal(
event_processing_loop,
shutdown,
Duration::from_secs(30), // more than enough time to propagate
)
.map(|result| {
match result {
Ok(Ok(())) => info!(message = "event processing loop completed gracefully"),
Ok(Err(error)) => error!(
message = "event processing loop exited with an error",
?error
),
Err(error) => error!(
message = "event processing loop timed out during the shutdown",
?error
),
};
});
slot.bind(Box::pin(fut));
}
lifecycle.run(global_shutdown).await;
info!(message = "done");
Ok(())
}
}
fn create_event(line: Bytes, file: &str) -> Event {
let mut event = Event::from(line);
// Add source type.
event
.as_mut_log()
.insert(event::log_schema().source_type_key(), COMPONENT_NAME);
// Add file.
event.as_mut_log().insert(FILE_KEY, file);
event
}
/// This function returns the default value for `self_node_name` variable
/// as it should be at the generated config file.
fn default_self_node_name_env_template() -> String {
format!("${{{}}}", SELF_NODE_NAME_ENV_KEY)
} | if annotator.annotate(&mut event, &file).is_none() { | random_line_split |
PacketDownloader.py | #!/usr/bin/python2
from __future__ import print_function
import httplib2
import oauth2client # $ pip install google-api-python-client
import os
import base64
import time
import email
from googleapiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client import file
from googleapiclient import errors
UPDATE_INTERVAL = 5 # seconds
NEW_LABEL_ID = None # Gmail label ID of 'new' label
# command line arguments
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-a', '--all', action='store_true', dest='download_all', default='false', help='Download all attachments (else only download new)')
parser.add_argument('-l', '--label', required=True, help='Gmail label to use after attachment is downloaded (or label to download attachments from if --all is used)')
parser.add_argument('-d', '--directory', default='.', help='Specify parent directory in which download directory will be created')
flags = parser.parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Packet Downloader'
# Gmail authentication
def get_credentials():
# home_dir = os.path.expanduser('~')
# credential_dir = os.path.join(home_dir, '.credentials')
credential_dir = './.credentials'
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
# Gmail advanced search
def ListMessagesMatchingQuery(service, user_id, query=''):
try:
response = service.users().messages().list(userId=user_id, q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
# Download message body and attachment
def GetData(service, user_id, msg_id, prefix=""):
sbd_filename = ''
csv_filename = 'packets.csv'
try:
message = service.users().messages().get(userId=user_id, id=msg_id).execute()
for part in message['payload']['parts']:
if part['filename']:
sbd_filename = message['internalDate'] + '.sbd'
if not sbd_filename is '':
if 'data' in part['body']:
data=part['body']['data']
else:
att_id=part['body']['attachmentId']
att=service.users().messages().attachments().get(userId=user_id, messageId=msg_id,id=att_id).execute()
data=att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
sbd_dl_path = os.path.join(prefix, 'sbd', 'new', sbd_filename)
csv_dl_path = os.path.join(prefix, csv_filename)
if not os.path.exists(sbd_dl_path) and not os.path.exists(os.path.join(prefix, 'sbd', sbd_filename)):
#download individual sbd
with open(sbd_dl_path, 'w') as f:
f.write(file_data)
f.close()
#append contents to packets.csv
with open(csv_dl_path, 'a') as f:
f.write(file_data + '\n')
f.close()
record('Downloaded ' + sbd_dl_path)
else:
record('Skipped ' + sbd_dl_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
try:
if not sbd_filename is '':
message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()
txt_file = sbd_filename[:-3] + 'txt'
txt_path = os.path.join(prefix, 'txt', txt_file)
if message['raw']:
if not os.path.exists(txt_path):
data=message['raw']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
msg = email.message_from_string(file_data)
for part in msg.walk():
if part.get_content_type() == 'text/plain':
msg_txt = part.get_payload()
with open(txt_path, 'w') as f:
f.write(msg_txt)
f.close()
record('Downloaded ' + txt_path)
else:
record('Skipped ' + txt_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
# create label object
def CreateLabel(service, user_id, label_object):
try:
label = service.users().labels().create(userId=user_id, body=label_object).execute()
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
# make actual label in Gmail
def MakeLabel(label_name, mlv='show', llv='labelShow'):
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
# add/remove labels from email
def | (service, user_id, msg_id, msg_labels):
try:
message = service.users().messages().modify(userId=user_id, id=msg_id, body=msg_labels).execute()
label_ids = message['labelIds']
return message
except errors.HttpError, error:
print('An error occurred: %s' % error)
# set which labels to add/remove
def CreateMsgLabels(new_label_id, label_id):
return {'removeLabelIds': [new_label_id], 'addLabelIds': [label_id]}
# use to find label ID of 'new' label (only used on initial run for each new Gmail account)
def ListLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
# log data and print to screen
def record(text):
localtime = time.asctime(time.localtime(time.time()))
log_path = os.path.join(flags.directory, flags.label, 'log.txt')
with open(log_path, 'a') as log:
log.write(localtime + '\t' + text + '\n')
log.close()
print(localtime + '\t' + text)
def main():
# Gmail authentication
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
check = True
label_exists = False
# retrieve list of Gmail labels
labels = ListLabels(service, 'me')
for label in labels:
# check if specified label exists
if label['name'] == flags.label:
label_id = label['id']
label_exists = True
# get label_ID of 'new' label
elif label['name'] == 'new':
NEW_LABEL_ID = label['id']
if flags.directory is '.':
dir_path = os.path.join(os.getcwd(), flags.label)
else:
dir_path = os.path.join(flags.directory, flags.label)
# check if directory/logfile must be created
if label_exists is True or flags.download_all == 'false':
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record('Created directory ' + dir_path)
log_path = os.path.join(dir_path, 'log.txt')
if not os.path.exists(log_path):
open(log_path, 'w').close()
sbd_path = os.path.join(dir_path, 'sbd')
if not os.path.exists(sbd_path):
os.makedirs(sbd_path)
record('Created directory ' + sbd_path)
sbd_dl_path = os.path.join(sbd_path, 'new')
if not os.path.exists(sbd_dl_path):
os.makedirs(sbd_dl_path)
record('Created directory ' + sbd_dl_path)
txt_path = os.path.join(dir_path, 'txt')
if not os.path.exists(txt_path):
os.makedirs(txt_path)
record('Created directory ' + txt_path)
while check is True:
# download all packets with specified label
if flags.download_all is True:
if label_exists is True:
messages = ListMessagesMatchingQuery(service,'me', 'label:' + flags.label)
if not messages:
record('No messages found.')
else:
for message in messages:
GetData(service, 'me', message['id'], dir_path)
else:
localtime = time.asctime(time.localtime(time.time()))
print(localtime + '\tLabel \'' + flags.label + '\' does not exist.')
check = False
# download all new packets and relabel with specified label
else:
messages = ListMessagesMatchingQuery(service,'me', 'label:new')
if not messages:
record('No messages found.')
else:
if label_exists is False:
record('Creating label ' + flags.label)
label_object = MakeLabel(flags.label, mlv='show', llv='labelShow')
label = CreateLabel(service, 'me', label_object)
label_id = label['id']
label_exists = True
for message in messages:
GetData(service, 'me', message['id'], dir_path)
msg_label = CreateMsgLabels(NEW_LABEL_ID, label_id)
ModifyMessage(service, 'me', message['id'], msg_label)
if check is True:
time.sleep(UPDATE_INTERVAL)
if __name__ == '__main__':
main()
| ModifyMessage | identifier_name |
PacketDownloader.py | #!/usr/bin/python2
from __future__ import print_function
import httplib2
import oauth2client # $ pip install google-api-python-client
import os
import base64
import time
import email
from googleapiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client import file
from googleapiclient import errors
UPDATE_INTERVAL = 5 # seconds
NEW_LABEL_ID = None # Gmail label ID of 'new' label
# command line arguments
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-a', '--all', action='store_true', dest='download_all', default='false', help='Download all attachments (else only download new)')
parser.add_argument('-l', '--label', required=True, help='Gmail label to use after attachment is downloaded (or label to download attachments from if --all is used)')
parser.add_argument('-d', '--directory', default='.', help='Specify parent directory in which download directory will be created')
flags = parser.parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Packet Downloader'
# Gmail authentication
def get_credentials():
# home_dir = os.path.expanduser('~')
# credential_dir = os.path.join(home_dir, '.credentials')
credential_dir = './.credentials'
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
# Gmail advanced search
def ListMessagesMatchingQuery(service, user_id, query=''):
try:
response = service.users().messages().list(userId=user_id, q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
# Download message body and attachment
def GetData(service, user_id, msg_id, prefix=""):
sbd_filename = ''
csv_filename = 'packets.csv'
try:
message = service.users().messages().get(userId=user_id, id=msg_id).execute()
for part in message['payload']['parts']:
if part['filename']:
sbd_filename = message['internalDate'] + '.sbd'
if not sbd_filename is '':
if 'data' in part['body']:
data=part['body']['data']
else:
att_id=part['body']['attachmentId']
att=service.users().messages().attachments().get(userId=user_id, messageId=msg_id,id=att_id).execute()
data=att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
sbd_dl_path = os.path.join(prefix, 'sbd', 'new', sbd_filename)
csv_dl_path = os.path.join(prefix, csv_filename)
if not os.path.exists(sbd_dl_path) and not os.path.exists(os.path.join(prefix, 'sbd', sbd_filename)):
#download individual sbd
with open(sbd_dl_path, 'w') as f:
f.write(file_data)
f.close()
#append contents to packets.csv
with open(csv_dl_path, 'a') as f:
f.write(file_data + '\n')
f.close()
record('Downloaded ' + sbd_dl_path)
else:
record('Skipped ' + sbd_dl_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
try:
if not sbd_filename is '':
message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()
txt_file = sbd_filename[:-3] + 'txt'
txt_path = os.path.join(prefix, 'txt', txt_file)
if message['raw']:
if not os.path.exists(txt_path):
data=message['raw']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
msg = email.message_from_string(file_data)
for part in msg.walk():
if part.get_content_type() == 'text/plain':
msg_txt = part.get_payload()
with open(txt_path, 'w') as f:
f.write(msg_txt)
f.close()
record('Downloaded ' + txt_path)
else:
record('Skipped ' + txt_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
# create label object
def CreateLabel(service, user_id, label_object):
try:
label = service.users().labels().create(userId=user_id, body=label_object).execute()
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
# make actual label in Gmail
def MakeLabel(label_name, mlv='show', llv='labelShow'):
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
# add/remove labels from email
def ModifyMessage(service, user_id, msg_id, msg_labels):
|
# set which labels to add/remove
def CreateMsgLabels(new_label_id, label_id):
return {'removeLabelIds': [new_label_id], 'addLabelIds': [label_id]}
# use to find label ID of 'new' label (only used on initial run for each new Gmail account)
def ListLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
# log data and print to screen
def record(text):
localtime = time.asctime(time.localtime(time.time()))
log_path = os.path.join(flags.directory, flags.label, 'log.txt')
with open(log_path, 'a') as log:
log.write(localtime + '\t' + text + '\n')
log.close()
print(localtime + '\t' + text)
def main():
# Gmail authentication
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
check = True
label_exists = False
# retrieve list of Gmail labels
labels = ListLabels(service, 'me')
for label in labels:
# check if specified label exists
if label['name'] == flags.label:
label_id = label['id']
label_exists = True
# get label_ID of 'new' label
elif label['name'] == 'new':
NEW_LABEL_ID = label['id']
if flags.directory is '.':
dir_path = os.path.join(os.getcwd(), flags.label)
else:
dir_path = os.path.join(flags.directory, flags.label)
# check if directory/logfile must be created
if label_exists is True or flags.download_all == 'false':
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record('Created directory ' + dir_path)
log_path = os.path.join(dir_path, 'log.txt')
if not os.path.exists(log_path):
open(log_path, 'w').close()
sbd_path = os.path.join(dir_path, 'sbd')
if not os.path.exists(sbd_path):
os.makedirs(sbd_path)
record('Created directory ' + sbd_path)
sbd_dl_path = os.path.join(sbd_path, 'new')
if not os.path.exists(sbd_dl_path):
os.makedirs(sbd_dl_path)
record('Created directory ' + sbd_dl_path)
txt_path = os.path.join(dir_path, 'txt')
if not os.path.exists(txt_path):
os.makedirs(txt_path)
record('Created directory ' + txt_path)
while check is True:
# download all packets with specified label
if flags.download_all is True:
if label_exists is True:
messages = ListMessagesMatchingQuery(service,'me', 'label:' + flags.label)
if not messages:
record('No messages found.')
else:
for message in messages:
GetData(service, 'me', message['id'], dir_path)
else:
localtime = time.asctime(time.localtime(time.time()))
print(localtime + '\tLabel \'' + flags.label + '\' does not exist.')
check = False
# download all new packets and relabel with specified label
else:
messages = ListMessagesMatchingQuery(service,'me', 'label:new')
if not messages:
record('No messages found.')
else:
if label_exists is False:
record('Creating label ' + flags.label)
label_object = MakeLabel(flags.label, mlv='show', llv='labelShow')
label = CreateLabel(service, 'me', label_object)
label_id = label['id']
label_exists = True
for message in messages:
GetData(service, 'me', message['id'], dir_path)
msg_label = CreateMsgLabels(NEW_LABEL_ID, label_id)
ModifyMessage(service, 'me', message['id'], msg_label)
if check is True:
time.sleep(UPDATE_INTERVAL)
if __name__ == '__main__':
main()
| try:
message = service.users().messages().modify(userId=user_id, id=msg_id, body=msg_labels).execute()
label_ids = message['labelIds']
return message
except errors.HttpError, error:
print('An error occurred: %s' % error) | identifier_body |
PacketDownloader.py | #!/usr/bin/python2
from __future__ import print_function
import httplib2
import oauth2client # $ pip install google-api-python-client
import os
import base64
import time
import email
from googleapiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client import file
from googleapiclient import errors
UPDATE_INTERVAL = 5 # seconds
NEW_LABEL_ID = None # Gmail label ID of 'new' label
# command line arguments
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-a', '--all', action='store_true', dest='download_all', default='false', help='Download all attachments (else only download new)')
parser.add_argument('-l', '--label', required=True, help='Gmail label to use after attachment is downloaded (or label to download attachments from if --all is used)')
parser.add_argument('-d', '--directory', default='.', help='Specify parent directory in which download directory will be created')
flags = parser.parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Packet Downloader'
# Gmail authentication
def get_credentials():
# home_dir = os.path.expanduser('~')
# credential_dir = os.path.join(home_dir, '.credentials')
credential_dir = './.credentials'
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
# Gmail advanced search
def ListMessagesMatchingQuery(service, user_id, query=''):
try:
response = service.users().messages().list(userId=user_id, q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
# Download message body and attachment
def GetData(service, user_id, msg_id, prefix=""):
sbd_filename = ''
csv_filename = 'packets.csv'
try:
message = service.users().messages().get(userId=user_id, id=msg_id).execute()
for part in message['payload']['parts']:
if part['filename']:
sbd_filename = message['internalDate'] + '.sbd'
if not sbd_filename is '':
if 'data' in part['body']:
data=part['body']['data']
else:
att_id=part['body']['attachmentId']
att=service.users().messages().attachments().get(userId=user_id, messageId=msg_id,id=att_id).execute()
data=att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
sbd_dl_path = os.path.join(prefix, 'sbd', 'new', sbd_filename)
csv_dl_path = os.path.join(prefix, csv_filename)
if not os.path.exists(sbd_dl_path) and not os.path.exists(os.path.join(prefix, 'sbd', sbd_filename)):
#download individual sbd
with open(sbd_dl_path, 'w') as f:
f.write(file_data)
f.close()
#append contents to packets.csv
with open(csv_dl_path, 'a') as f:
f.write(file_data + '\n')
f.close()
record('Downloaded ' + sbd_dl_path)
else:
record('Skipped ' + sbd_dl_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
try:
if not sbd_filename is '':
message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()
txt_file = sbd_filename[:-3] + 'txt'
txt_path = os.path.join(prefix, 'txt', txt_file)
if message['raw']:
if not os.path.exists(txt_path):
data=message['raw']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
msg = email.message_from_string(file_data)
for part in msg.walk():
if part.get_content_type() == 'text/plain':
msg_txt = part.get_payload()
with open(txt_path, 'w') as f:
f.write(msg_txt)
f.close()
record('Downloaded ' + txt_path)
else:
record('Skipped ' + txt_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
# create label object
def CreateLabel(service, user_id, label_object):
try:
label = service.users().labels().create(userId=user_id, body=label_object).execute()
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
# make actual label in Gmail
def MakeLabel(label_name, mlv='show', llv='labelShow'):
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
# add/remove labels from email
def ModifyMessage(service, user_id, msg_id, msg_labels):
try:
message = service.users().messages().modify(userId=user_id, id=msg_id, body=msg_labels).execute()
label_ids = message['labelIds']
return message
except errors.HttpError, error:
print('An error occurred: %s' % error)
# set which labels to add/remove
def CreateMsgLabels(new_label_id, label_id):
return {'removeLabelIds': [new_label_id], 'addLabelIds': [label_id]}
# use to find label ID of 'new' label (only used on initial run for each new Gmail account)
def ListLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
# log data and print to screen
def record(text):
localtime = time.asctime(time.localtime(time.time()))
log_path = os.path.join(flags.directory, flags.label, 'log.txt')
with open(log_path, 'a') as log:
log.write(localtime + '\t' + text + '\n')
log.close()
print(localtime + '\t' + text)
def main():
# Gmail authentication
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
check = True
label_exists = False
# retrieve list of Gmail labels
labels = ListLabels(service, 'me')
for label in labels:
# check if specified label exists
if label['name'] == flags.label:
label_id = label['id']
label_exists = True
# get label_ID of 'new' label
elif label['name'] == 'new':
NEW_LABEL_ID = label['id']
if flags.directory is '.':
dir_path = os.path.join(os.getcwd(), flags.label)
else:
dir_path = os.path.join(flags.directory, flags.label)
# check if directory/logfile must be created
if label_exists is True or flags.download_all == 'false':
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record('Created directory ' + dir_path)
log_path = os.path.join(dir_path, 'log.txt')
if not os.path.exists(log_path):
open(log_path, 'w').close()
sbd_path = os.path.join(dir_path, 'sbd')
if not os.path.exists(sbd_path):
os.makedirs(sbd_path)
record('Created directory ' + sbd_path)
sbd_dl_path = os.path.join(sbd_path, 'new')
if not os.path.exists(sbd_dl_path):
os.makedirs(sbd_dl_path)
record('Created directory ' + sbd_dl_path)
txt_path = os.path.join(dir_path, 'txt')
if not os.path.exists(txt_path):
os.makedirs(txt_path)
record('Created directory ' + txt_path)
while check is True:
# download all packets with specified label
if flags.download_all is True:
if label_exists is True:
|
else:
localtime = time.asctime(time.localtime(time.time()))
print(localtime + '\tLabel \'' + flags.label + '\' does not exist.')
check = False
# download all new packets and relabel with specified label
else:
messages = ListMessagesMatchingQuery(service,'me', 'label:new')
if not messages:
record('No messages found.')
else:
if label_exists is False:
record('Creating label ' + flags.label)
label_object = MakeLabel(flags.label, mlv='show', llv='labelShow')
label = CreateLabel(service, 'me', label_object)
label_id = label['id']
label_exists = True
for message in messages:
GetData(service, 'me', message['id'], dir_path)
msg_label = CreateMsgLabels(NEW_LABEL_ID, label_id)
ModifyMessage(service, 'me', message['id'], msg_label)
if check is True:
time.sleep(UPDATE_INTERVAL)
if __name__ == '__main__':
main()
| messages = ListMessagesMatchingQuery(service,'me', 'label:' + flags.label)
if not messages:
record('No messages found.')
else:
for message in messages:
GetData(service, 'me', message['id'], dir_path) | conditional_block |
PacketDownloader.py | #!/usr/bin/python2
from __future__ import print_function
import httplib2
import oauth2client # $ pip install google-api-python-client
import os
import base64
import time
import email
from googleapiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client import file
from googleapiclient import errors
UPDATE_INTERVAL = 5 # seconds
NEW_LABEL_ID = None # Gmail label ID of 'new' label
# command line arguments
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-a', '--all', action='store_true', dest='download_all', default='false', help='Download all attachments (else only download new)')
parser.add_argument('-l', '--label', required=True, help='Gmail label to use after attachment is downloaded (or label to download attachments from if --all is used)')
parser.add_argument('-d', '--directory', default='.', help='Specify parent directory in which download directory will be created')
flags = parser.parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Packet Downloader'
# Gmail authentication
def get_credentials():
# home_dir = os.path.expanduser('~')
# credential_dir = os.path.join(home_dir, '.credentials')
credential_dir = './.credentials'
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
# Gmail advanced search
def ListMessagesMatchingQuery(service, user_id, query=''):
try:
response = service.users().messages().list(userId=user_id, q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
# Download message body and attachment
def GetData(service, user_id, msg_id, prefix=""):
sbd_filename = ''
csv_filename = 'packets.csv'
try:
message = service.users().messages().get(userId=user_id, id=msg_id).execute()
for part in message['payload']['parts']:
if part['filename']:
sbd_filename = message['internalDate'] + '.sbd'
if not sbd_filename is '':
if 'data' in part['body']:
data=part['body']['data']
else:
att_id=part['body']['attachmentId']
att=service.users().messages().attachments().get(userId=user_id, messageId=msg_id,id=att_id).execute()
data=att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
sbd_dl_path = os.path.join(prefix, 'sbd', 'new', sbd_filename)
csv_dl_path = os.path.join(prefix, csv_filename)
if not os.path.exists(sbd_dl_path) and not os.path.exists(os.path.join(prefix, 'sbd', sbd_filename)):
#download individual sbd
with open(sbd_dl_path, 'w') as f:
f.write(file_data)
f.close()
#append contents to packets.csv
with open(csv_dl_path, 'a') as f:
f.write(file_data + '\n')
f.close()
record('Downloaded ' + sbd_dl_path)
else:
record('Skipped ' + sbd_dl_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
try:
if not sbd_filename is '':
message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()
txt_file = sbd_filename[:-3] + 'txt'
txt_path = os.path.join(prefix, 'txt', txt_file)
if message['raw']:
if not os.path.exists(txt_path):
data=message['raw']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
msg = email.message_from_string(file_data)
for part in msg.walk():
if part.get_content_type() == 'text/plain':
msg_txt = part.get_payload()
with open(txt_path, 'w') as f:
f.write(msg_txt)
f.close()
record('Downloaded ' + txt_path)
else:
record('Skipped ' + txt_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
# create label object
def CreateLabel(service, user_id, label_object):
try:
label = service.users().labels().create(userId=user_id, body=label_object).execute()
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
# make actual label in Gmail
def MakeLabel(label_name, mlv='show', llv='labelShow'):
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
# add/remove labels from email
def ModifyMessage(service, user_id, msg_id, msg_labels):
try:
message = service.users().messages().modify(userId=user_id, id=msg_id, body=msg_labels).execute()
label_ids = message['labelIds']
return message
except errors.HttpError, error:
print('An error occurred: %s' % error)
# set which labels to add/remove
def CreateMsgLabels(new_label_id, label_id):
return {'removeLabelIds': [new_label_id], 'addLabelIds': [label_id]}
# use to find label ID of 'new' label (only used on initial run for each new Gmail account)
def ListLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute() | labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
# log data and print to screen
def record(text):
localtime = time.asctime(time.localtime(time.time()))
log_path = os.path.join(flags.directory, flags.label, 'log.txt')
with open(log_path, 'a') as log:
log.write(localtime + '\t' + text + '\n')
log.close()
print(localtime + '\t' + text)
def main():
# Gmail authentication
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
check = True
label_exists = False
# retrieve list of Gmail labels
labels = ListLabels(service, 'me')
for label in labels:
# check if specified label exists
if label['name'] == flags.label:
label_id = label['id']
label_exists = True
# get label_ID of 'new' label
elif label['name'] == 'new':
NEW_LABEL_ID = label['id']
if flags.directory is '.':
dir_path = os.path.join(os.getcwd(), flags.label)
else:
dir_path = os.path.join(flags.directory, flags.label)
# check if directory/logfile must be created
if label_exists is True or flags.download_all == 'false':
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record('Created directory ' + dir_path)
log_path = os.path.join(dir_path, 'log.txt')
if not os.path.exists(log_path):
open(log_path, 'w').close()
sbd_path = os.path.join(dir_path, 'sbd')
if not os.path.exists(sbd_path):
os.makedirs(sbd_path)
record('Created directory ' + sbd_path)
sbd_dl_path = os.path.join(sbd_path, 'new')
if not os.path.exists(sbd_dl_path):
os.makedirs(sbd_dl_path)
record('Created directory ' + sbd_dl_path)
txt_path = os.path.join(dir_path, 'txt')
if not os.path.exists(txt_path):
os.makedirs(txt_path)
record('Created directory ' + txt_path)
while check is True:
# download all packets with specified label
if flags.download_all is True:
if label_exists is True:
messages = ListMessagesMatchingQuery(service,'me', 'label:' + flags.label)
if not messages:
record('No messages found.')
else:
for message in messages:
GetData(service, 'me', message['id'], dir_path)
else:
localtime = time.asctime(time.localtime(time.time()))
print(localtime + '\tLabel \'' + flags.label + '\' does not exist.')
check = False
# download all new packets and relabel with specified label
else:
messages = ListMessagesMatchingQuery(service,'me', 'label:new')
if not messages:
record('No messages found.')
else:
if label_exists is False:
record('Creating label ' + flags.label)
label_object = MakeLabel(flags.label, mlv='show', llv='labelShow')
label = CreateLabel(service, 'me', label_object)
label_id = label['id']
label_exists = True
for message in messages:
GetData(service, 'me', message['id'], dir_path)
msg_label = CreateMsgLabels(NEW_LABEL_ID, label_id)
ModifyMessage(service, 'me', message['id'], msg_label)
if check is True:
time.sleep(UPDATE_INTERVAL)
if __name__ == '__main__':
main() | random_line_split | |
trace_context.rs | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! TracingContext is the context of the tracing process. Span should only be
//! created through context, and be archived into the context after the span
//! finished.
use crate::{
common::{
random_generator::RandomGenerator,
system_time::{fetch_time, TimePeriod},
wait_group::WaitGroup,
},
error::LOCK_MSG,
proto::v3::{RefType, SegmentObject, SegmentReference, SpanLayer, SpanObject, SpanType},
trace::{
propagation::context::PropagationContext,
span::{HandleSpanObject, Span},
tracer::{Tracer, WeakTracer},
},
};
use parking_lot::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
use std::{
fmt::Formatter,
mem::take,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// The span uid is to identify the [Span] for crate.
pub(crate) type SpanUid = usize;
pub(crate) struct ActiveSpan {
uid: SpanUid,
span_id: i32,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl ActiveSpan {
fn new(uid: SpanUid, span_id: i32) -> Self {
Self {
uid,
span_id,
r#ref: None,
}
}
#[inline]
pub(crate) fn uid(&self) -> SpanUid {
self.uid
}
}
pub(crate) struct FinalizeSpan {
uid: SpanUid,
/// When the span is [AsyncSpan] and unfinished, it is None.
obj: Option<SpanObject>,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl FinalizeSpan {
pub(crate) fn new(
uid: usize,
obj: Option<SpanObject>,
r#ref: Option<SegmentReference>,
) -> Self {
Self { uid, obj, r#ref }
}
}
#[derive(Default)]
pub(crate) struct SpanStack {
pub(crate) finalized: RwLock<Vec<FinalizeSpan>>,
pub(crate) active: RwLock<Vec<ActiveSpan>>,
}
impl SpanStack {
pub(crate) fn finalized(&self) -> RwLockReadGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_read().expect(LOCK_MSG)
}
pub(crate) fn finalized_mut(&self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.active.try_read().expect(LOCK_MSG)
}
pub(crate) fn active_mut(&self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.active.try_write().expect(LOCK_MSG)
}
fn pop_active(&self, uid: SpanUid) -> Option<ActiveSpan> {
let mut stack = self.active_mut();
if stack
.last()
.map(|span| span.uid() == uid)
.unwrap_or_default()
{
stack.pop()
} else {
None
}
}
/// Close span. We can't use closed span after finalize called.
pub(crate) fn finalize_span(&self, uid: SpanUid, obj: Option<SpanObject>) {
let Some(active_span) = self.pop_active(uid) else {
panic!("Finalize span isn't the active span");
};
let finalize_span = match obj {
Some(mut obj) => {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = active_span.r#ref {
obj.refs.push(r#ref);
}
FinalizeSpan::new(uid, Some(obj), None)
}
None => FinalizeSpan::new(uid, None, active_span.r#ref),
};
self.finalized_mut().push(finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service
}
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str {
&self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
}
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn | (&self) -> Option<i32> {
self.active_span().map(|span| span.span_id)
}
fn push_active_span(&mut self, span: &SpanObject) -> SpanUid {
let uid = self.generate_span_uid();
self.primary_endpoint_name = span.operation_name.clone();
let mut stack = self.active_span_stack_mut();
stack.push(ActiveSpan::new(uid, span.span_id));
uid
}
fn upgrade_tracer(&self) -> Tracer {
self.tracer.upgrade().expect("Tracer has dropped")
}
}
impl Drop for TracingContext {
/// Convert to segment object, and send to tracer for reporting.
///
/// # Panics
///
/// Panic if tracer is dropped.
fn drop(&mut self) {
self.upgrade_tracer().finalize_context(self)
}
}
/// Cross threads context snapshot.
#[derive(Debug)]
pub struct ContextSnapshot {
trace_id: String,
trace_segment_id: String,
span_id: i32,
parent_endpoint: String,
}
impl ContextSnapshot {
/// Check if the snapshot is created from current context.
pub fn is_from_current(&self, context: &TracingContext) -> bool {
!self.trace_segment_id.is_empty() && self.trace_segment_id == context.trace_segment_id()
}
/// Check if the snapshot is valid.
pub fn is_valid(&self) -> bool {
!self.trace_segment_id.is_empty() && self.span_id > -1 && !self.trace_id.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
trait AssertSend: Send {}
impl AssertSend for TracingContext {}
}
| peek_active_span_id | identifier_name |
trace_context.rs | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! TracingContext is the context of the tracing process. Span should only be
//! created through context, and be archived into the context after the span
//! finished.
use crate::{
common::{
random_generator::RandomGenerator,
system_time::{fetch_time, TimePeriod},
wait_group::WaitGroup,
},
error::LOCK_MSG,
proto::v3::{RefType, SegmentObject, SegmentReference, SpanLayer, SpanObject, SpanType},
trace::{
propagation::context::PropagationContext,
span::{HandleSpanObject, Span},
tracer::{Tracer, WeakTracer},
},
};
use parking_lot::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
use std::{
fmt::Formatter,
mem::take,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// The span uid is to identify the [Span] for crate.
pub(crate) type SpanUid = usize;
pub(crate) struct ActiveSpan {
uid: SpanUid,
span_id: i32,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl ActiveSpan {
fn new(uid: SpanUid, span_id: i32) -> Self {
Self {
uid,
span_id,
r#ref: None,
}
}
#[inline]
pub(crate) fn uid(&self) -> SpanUid {
self.uid
}
}
pub(crate) struct FinalizeSpan {
uid: SpanUid,
/// When the span is [AsyncSpan] and unfinished, it is None.
obj: Option<SpanObject>,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl FinalizeSpan {
pub(crate) fn new(
uid: usize,
obj: Option<SpanObject>,
r#ref: Option<SegmentReference>,
) -> Self {
Self { uid, obj, r#ref }
}
}
#[derive(Default)]
pub(crate) struct SpanStack {
pub(crate) finalized: RwLock<Vec<FinalizeSpan>>,
pub(crate) active: RwLock<Vec<ActiveSpan>>,
}
impl SpanStack {
pub(crate) fn finalized(&self) -> RwLockReadGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_read().expect(LOCK_MSG)
}
pub(crate) fn finalized_mut(&self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.active.try_read().expect(LOCK_MSG)
}
pub(crate) fn active_mut(&self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.active.try_write().expect(LOCK_MSG)
}
fn pop_active(&self, uid: SpanUid) -> Option<ActiveSpan> {
let mut stack = self.active_mut();
if stack
.last()
.map(|span| span.uid() == uid)
.unwrap_or_default()
{
stack.pop()
} else {
None
}
}
/// Close span. We can't use closed span after finalize called.
pub(crate) fn finalize_span(&self, uid: SpanUid, obj: Option<SpanObject>) {
let Some(active_span) = self.pop_active(uid) else {
panic!("Finalize span isn't the active span");
};
let finalize_span = match obj {
Some(mut obj) => {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = active_span.r#ref {
obj.refs.push(r#ref);
}
FinalizeSpan::new(uid, Some(obj), None)
}
None => FinalizeSpan::new(uid, None, active_span.r#ref),
};
self.finalized_mut().push(finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service
}
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str {
&self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
}
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn peek_active_span_id(&self) -> Option<i32> {
self.active_span().map(|span| span.span_id)
}
fn push_active_span(&mut self, span: &SpanObject) -> SpanUid {
let uid = self.generate_span_uid();
self.primary_endpoint_name = span.operation_name.clone();
let mut stack = self.active_span_stack_mut();
stack.push(ActiveSpan::new(uid, span.span_id));
uid
}
fn upgrade_tracer(&self) -> Tracer |
}
impl Drop for TracingContext {
/// Convert to segment object, and send to tracer for reporting.
///
/// # Panics
///
/// Panic if tracer is dropped.
fn drop(&mut self) {
self.upgrade_tracer().finalize_context(self)
}
}
/// Cross threads context snapshot.
#[derive(Debug)]
pub struct ContextSnapshot {
trace_id: String,
trace_segment_id: String,
span_id: i32,
parent_endpoint: String,
}
impl ContextSnapshot {
/// Check if the snapshot is created from current context.
pub fn is_from_current(&self, context: &TracingContext) -> bool {
!self.trace_segment_id.is_empty() && self.trace_segment_id == context.trace_segment_id()
}
/// Check if the snapshot is valid.
pub fn is_valid(&self) -> bool {
!self.trace_segment_id.is_empty() && self.span_id > -1 && !self.trace_id.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
trait AssertSend: Send {}
impl AssertSend for TracingContext {}
}
| {
self.tracer.upgrade().expect("Tracer has dropped")
} | identifier_body |
trace_context.rs | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! TracingContext is the context of the tracing process. Span should only be
//! created through context, and be archived into the context after the span
//! finished.
use crate::{
common::{
random_generator::RandomGenerator,
system_time::{fetch_time, TimePeriod},
wait_group::WaitGroup,
},
error::LOCK_MSG,
proto::v3::{RefType, SegmentObject, SegmentReference, SpanLayer, SpanObject, SpanType},
trace::{
propagation::context::PropagationContext,
span::{HandleSpanObject, Span},
tracer::{Tracer, WeakTracer},
},
};
use parking_lot::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
use std::{
fmt::Formatter,
mem::take,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// The span uid is to identify the [Span] for crate.
pub(crate) type SpanUid = usize;
pub(crate) struct ActiveSpan {
uid: SpanUid,
span_id: i32,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl ActiveSpan {
fn new(uid: SpanUid, span_id: i32) -> Self {
Self {
uid,
span_id,
r#ref: None,
}
}
#[inline]
pub(crate) fn uid(&self) -> SpanUid {
self.uid
}
}
pub(crate) struct FinalizeSpan {
uid: SpanUid,
/// When the span is [AsyncSpan] and unfinished, it is None.
obj: Option<SpanObject>,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl FinalizeSpan {
pub(crate) fn new(
uid: usize,
obj: Option<SpanObject>,
r#ref: Option<SegmentReference>,
) -> Self {
Self { uid, obj, r#ref }
}
}
#[derive(Default)]
pub(crate) struct SpanStack {
pub(crate) finalized: RwLock<Vec<FinalizeSpan>>,
pub(crate) active: RwLock<Vec<ActiveSpan>>,
}
impl SpanStack {
pub(crate) fn finalized(&self) -> RwLockReadGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_read().expect(LOCK_MSG)
}
pub(crate) fn finalized_mut(&self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.active.try_read().expect(LOCK_MSG)
}
pub(crate) fn active_mut(&self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.active.try_write().expect(LOCK_MSG)
}
fn pop_active(&self, uid: SpanUid) -> Option<ActiveSpan> {
let mut stack = self.active_mut();
if stack
.last()
.map(|span| span.uid() == uid)
.unwrap_or_default()
{
stack.pop()
} else {
None
}
}
/// Close span. We can't use closed span after finalize called.
pub(crate) fn finalize_span(&self, uid: SpanUid, obj: Option<SpanObject>) {
let Some(active_span) = self.pop_active(uid) else {
panic!("Finalize span isn't the active span");
};
let finalize_span = match obj {
Some(mut obj) => {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = active_span.r#ref {
obj.refs.push(r#ref);
}
FinalizeSpan::new(uid, Some(obj), None)
}
None => FinalizeSpan::new(uid, None, active_span.r#ref),
};
self.finalized_mut().push(finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service | &self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
}
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn peek_active_span_id(&self) -> Option<i32> {
self.active_span().map(|span| span.span_id)
}
fn push_active_span(&mut self, span: &SpanObject) -> SpanUid {
let uid = self.generate_span_uid();
self.primary_endpoint_name = span.operation_name.clone();
let mut stack = self.active_span_stack_mut();
stack.push(ActiveSpan::new(uid, span.span_id));
uid
}
fn upgrade_tracer(&self) -> Tracer {
self.tracer.upgrade().expect("Tracer has dropped")
}
}
impl Drop for TracingContext {
/// Convert to segment object, and send to tracer for reporting.
///
/// # Panics
///
/// Panic if tracer is dropped.
fn drop(&mut self) {
self.upgrade_tracer().finalize_context(self)
}
}
/// Cross threads context snapshot.
#[derive(Debug)]
pub struct ContextSnapshot {
trace_id: String,
trace_segment_id: String,
span_id: i32,
parent_endpoint: String,
}
impl ContextSnapshot {
/// Check if the snapshot is created from current context.
pub fn is_from_current(&self, context: &TracingContext) -> bool {
!self.trace_segment_id.is_empty() && self.trace_segment_id == context.trace_segment_id()
}
/// Check if the snapshot is valid.
pub fn is_valid(&self) -> bool {
!self.trace_segment_id.is_empty() && self.span_id > -1 && !self.trace_id.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
trait AssertSend: Send {}
impl AssertSend for TracingContext {}
} | }
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str { | random_line_split |
trace_context.rs | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//! TracingContext is the context of the tracing process. Span should only be
//! created through context, and be archived into the context after the span
//! finished.
use crate::{
common::{
random_generator::RandomGenerator,
system_time::{fetch_time, TimePeriod},
wait_group::WaitGroup,
},
error::LOCK_MSG,
proto::v3::{RefType, SegmentObject, SegmentReference, SpanLayer, SpanObject, SpanType},
trace::{
propagation::context::PropagationContext,
span::{HandleSpanObject, Span},
tracer::{Tracer, WeakTracer},
},
};
use parking_lot::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
};
use std::{
fmt::Formatter,
mem::take,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
/// The span uid is to identify the [Span] for crate.
pub(crate) type SpanUid = usize;
pub(crate) struct ActiveSpan {
uid: SpanUid,
span_id: i32,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl ActiveSpan {
fn new(uid: SpanUid, span_id: i32) -> Self {
Self {
uid,
span_id,
r#ref: None,
}
}
#[inline]
pub(crate) fn uid(&self) -> SpanUid {
self.uid
}
}
pub(crate) struct FinalizeSpan {
uid: SpanUid,
/// When the span is [AsyncSpan] and unfinished, it is None.
obj: Option<SpanObject>,
/// For [TracingContext::continued] used.
r#ref: Option<SegmentReference>,
}
impl FinalizeSpan {
pub(crate) fn new(
uid: usize,
obj: Option<SpanObject>,
r#ref: Option<SegmentReference>,
) -> Self {
Self { uid, obj, r#ref }
}
}
#[derive(Default)]
pub(crate) struct SpanStack {
pub(crate) finalized: RwLock<Vec<FinalizeSpan>>,
pub(crate) active: RwLock<Vec<ActiveSpan>>,
}
impl SpanStack {
pub(crate) fn finalized(&self) -> RwLockReadGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_read().expect(LOCK_MSG)
}
pub(crate) fn finalized_mut(&self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.active.try_read().expect(LOCK_MSG)
}
pub(crate) fn active_mut(&self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.active.try_write().expect(LOCK_MSG)
}
fn pop_active(&self, uid: SpanUid) -> Option<ActiveSpan> {
let mut stack = self.active_mut();
if stack
.last()
.map(|span| span.uid() == uid)
.unwrap_or_default()
{
stack.pop()
} else {
None
}
}
/// Close span. We can't use closed span after finalize called.
pub(crate) fn finalize_span(&self, uid: SpanUid, obj: Option<SpanObject>) {
let Some(active_span) = self.pop_active(uid) else {
panic!("Finalize span isn't the active span");
};
let finalize_span = match obj {
Some(mut obj) => {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = active_span.r#ref {
obj.refs.push(r#ref);
}
FinalizeSpan::new(uid, Some(obj), None)
}
None => FinalizeSpan::new(uid, None, active_span.r#ref),
};
self.finalized_mut().push(finalize_span);
}
/// Close async span, fill the span object.
pub(crate) fn finalize_async_span(&self, uid: SpanUid, mut obj: SpanObject) {
for finalize_span in &mut *self.finalized_mut() {
if finalize_span.uid == uid {
obj.end_time = fetch_time(TimePeriod::End);
if let Some(r#ref) = take(&mut finalize_span.r#ref) {
obj.refs.push(r#ref);
}
finalize_span.obj = Some(obj);
return;
}
}
unreachable!()
}
}
/// TracingContext is the context of the tracing process. Span should only be
/// created through context, and be archived into the context after the span
/// finished.
#[must_use = "call `create_entry_span` after `TracingContext` created."]
pub struct TracingContext {
trace_id: String,
trace_segment_id: String,
service: String,
service_instance: String,
next_span_id: i32,
span_stack: Arc<SpanStack>,
primary_endpoint_name: String,
span_uid_generator: AtomicUsize,
wg: WaitGroup,
tracer: WeakTracer,
}
impl std::fmt::Debug for TracingContext {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TracingContext")
.field("trace_id", &self.trace_id)
.field("trace_segment_id", &self.trace_segment_id)
.field("service", &self.service)
.field("service_instance", &self.service_instance)
.field("next_span_id", &self.next_span_id)
.finish()
}
}
impl TracingContext {
/// Generate a new trace context.
pub(crate) fn new(
service_name: impl Into<String>,
instance_name: impl Into<String>,
tracer: WeakTracer,
) -> Self {
TracingContext {
trace_id: RandomGenerator::generate(),
trace_segment_id: RandomGenerator::generate(),
service: service_name.into(),
service_instance: instance_name.into(),
next_span_id: Default::default(),
span_stack: Default::default(),
primary_endpoint_name: Default::default(),
span_uid_generator: AtomicUsize::new(0),
wg: Default::default(),
tracer,
}
}
/// Get trace id.
#[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
/// Get trace segment id.
#[inline]
pub fn trace_segment_id(&self) -> &str {
&self.trace_segment_id
}
/// Get service name.
#[inline]
pub fn service(&self) -> &str {
&self.service
}
/// Get service instance.
#[inline]
pub fn service_instance(&self) -> &str {
&self.service_instance
}
fn next_span_id(&self) -> i32 {
self.next_span_id
}
#[inline]
fn inc_next_span_id(&mut self) -> i32 {
let span_id = self.next_span_id;
self.next_span_id += 1;
span_id
}
/// The span uid is to identify the [Span] for crate.
fn generate_span_uid(&self) -> SpanUid {
self.span_uid_generator.fetch_add(1, Ordering::SeqCst)
}
/// Clone the last finalized span.
#[doc(hidden)]
pub fn last_span(&self) -> Option<SpanObject> {
let spans = &*self.span_stack.finalized();
spans.iter().rev().find_map(|span| span.obj.clone())
}
fn finalize_spans_mut(&mut self) -> RwLockWriteGuard<'_, Vec<FinalizeSpan>> {
self.span_stack.finalized.try_write().expect(LOCK_MSG)
}
pub(crate) fn active_span_stack(&self) -> RwLockReadGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active()
}
pub(crate) fn active_span_stack_mut(&mut self) -> RwLockWriteGuard<'_, Vec<ActiveSpan>> {
self.span_stack.active_mut()
}
pub(crate) fn active_span(&self) -> Option<MappedRwLockReadGuard<'_, ActiveSpan>> {
RwLockReadGuard::try_map(self.active_span_stack(), |stack| stack.last()).ok()
}
pub(crate) fn active_span_mut(&mut self) -> Option<MappedRwLockWriteGuard<'_, ActiveSpan>> {
RwLockWriteGuard::try_map(self.active_span_stack_mut(), |stack| stack.last_mut()).ok()
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// Typically called when no context has
/// been propagated and a new trace is to be started.
pub fn create_entry_span(&mut self, operation_name: &str) -> Span {
let span = Span::new_obj(
self.inc_next_span_id(),
self.peek_active_span_id().unwrap_or(-1),
operation_name.to_string(),
String::default(),
SpanType::Entry,
SpanLayer::Http,
false,
);
let index = self.push_active_span(&span);
Span::new(index, span, self.wg.clone(), self.span_stack.clone())
}
/// Create a new entry span, which is an initiator of collection of spans.
/// This should be called by invocation of the function which is triggered
/// by external service.
///
/// They should be propagated on `sw8` header in HTTP request with encoded
/// form. You can retrieve decoded context with
/// `skywalking::context::propagation::encoder::encode_propagation`
pub fn create_entry_span_with_propagation(
&mut self,
operation_name: &str,
propagation: &PropagationContext,
) -> Span {
let mut span = self.create_entry_span(operation_name);
self.trace_id = propagation.parent_trace_id.clone();
span.span_object_mut().refs.push(SegmentReference {
ref_type: RefType::CrossProcess as i32,
trace_id: self.trace_id().to_owned(),
parent_trace_segment_id: propagation.parent_trace_segment_id.clone(),
parent_span_id: propagation.parent_span_id,
parent_service: propagation.parent_service.clone(),
parent_service_instance: propagation.parent_service_instance.clone(),
parent_endpoint: propagation.destination_endpoint.clone(),
network_address_used_at_peer: propagation.destination_address.clone(),
});
span
}
/// Create a new exit span, which will be created when tracing context will
/// generate new span for function invocation.
///
/// Currently, this SDK supports RPC call. So we must set `remote_peer`.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_exit_span(&mut self, operation_name: &str, remote_peer: &str) -> Span {
self.create_common_span(
operation_name,
remote_peer,
SpanType::Exit,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// Create a new local span.
///
/// # Panics
///
/// Panic if entry span not existed.
#[inline]
pub fn create_local_span(&mut self, operation_name: &str) -> Span {
self.create_common_span(
operation_name,
"",
SpanType::Local,
self.peek_active_span_id().unwrap_or(-1),
)
}
/// create exit or local span common logic.
fn create_common_span(
&mut self,
operation_name: &str,
remote_peer: &str,
span_type: SpanType,
parent_span_id: i32,
) -> Span {
if self.next_span_id() == 0 {
panic!("entry span must be existed.");
}
let span = Span::new_obj(
self.inc_next_span_id(),
parent_span_id,
operation_name.to_string(),
remote_peer.to_string(),
span_type,
SpanLayer::Unknown,
false,
);
let uid = self.push_active_span(&span);
Span::new(uid, span, self.wg.clone(), self.span_stack.clone())
}
/// Capture a snapshot for cross-thread propagation.
pub fn capture(&self) -> ContextSnapshot {
ContextSnapshot {
trace_id: self.trace_id().to_owned(),
trace_segment_id: self.trace_segment_id().to_owned(),
span_id: self.peek_active_span_id().unwrap_or(-1),
parent_endpoint: self.primary_endpoint_name.clone(),
}
}
/// Build the reference between this segment and a cross-thread segment.
pub fn continued(&mut self, snapshot: ContextSnapshot) {
if snapshot.is_valid() |
}
/// Wait all async span dropped which, created by [Span::prepare_for_async].
pub fn wait(self) {
self.wg.clone().wait();
}
/// It converts tracing context into segment object.
/// This conversion should be done before sending segments into OAP.
///
/// Notice: The spans will be taken, so this method shouldn't be called
/// twice.
pub(crate) fn convert_to_segment_object(&mut self) -> SegmentObject {
let trace_id = self.trace_id().to_owned();
let trace_segment_id = self.trace_segment_id().to_owned();
let service = self.service().to_owned();
let service_instance = self.service_instance().to_owned();
let spans = take(&mut *self.finalize_spans_mut());
let spans = spans
.into_iter()
.map(|span| span.obj.expect("Some async span haven't finished"))
.collect();
SegmentObject {
trace_id,
trace_segment_id,
spans,
service,
service_instance,
is_size_limited: false,
}
}
pub(crate) fn peek_active_span_id(&self) -> Option<i32> {
self.active_span().map(|span| span.span_id)
}
fn push_active_span(&mut self, span: &SpanObject) -> SpanUid {
let uid = self.generate_span_uid();
self.primary_endpoint_name = span.operation_name.clone();
let mut stack = self.active_span_stack_mut();
stack.push(ActiveSpan::new(uid, span.span_id));
uid
}
fn upgrade_tracer(&self) -> Tracer {
self.tracer.upgrade().expect("Tracer has dropped")
}
}
impl Drop for TracingContext {
/// Convert to segment object, and send to tracer for reporting.
///
/// # Panics
///
/// Panic if tracer is dropped.
fn drop(&mut self) {
self.upgrade_tracer().finalize_context(self)
}
}
/// Cross threads context snapshot.
#[derive(Debug)]
pub struct ContextSnapshot {
trace_id: String,
trace_segment_id: String,
span_id: i32,
parent_endpoint: String,
}
impl ContextSnapshot {
/// Check if the snapshot is created from current context.
pub fn is_from_current(&self, context: &TracingContext) -> bool {
!self.trace_segment_id.is_empty() && self.trace_segment_id == context.trace_segment_id()
}
/// Check if the snapshot is valid.
pub fn is_valid(&self) -> bool {
!self.trace_segment_id.is_empty() && self.span_id > -1 && !self.trace_id.is_empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
trait AssertSend: Send {}
impl AssertSend for TracingContext {}
}
| {
self.trace_id = snapshot.trace_id.clone();
let tracer = self.upgrade_tracer();
let segment_ref = SegmentReference {
ref_type: RefType::CrossThread as i32,
trace_id: snapshot.trace_id,
parent_trace_segment_id: snapshot.trace_segment_id,
parent_span_id: snapshot.span_id,
parent_service: tracer.service_name().to_owned(),
parent_service_instance: tracer.instance_name().to_owned(),
parent_endpoint: snapshot.parent_endpoint,
network_address_used_at_peer: Default::default(),
};
if let Some(mut span) = self.active_span_mut() {
span.r#ref = Some(segment_ref);
}
} | conditional_block |
fslogical.go | // Copyright 2023 The Cockroach Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
// Package fslogical contains a logical-replication loop for streaming
// document collections from Google Cloud Firestore.
package fslogical
import (
"context"
"encoding/json"
"fmt"
"time"
"cloud.google.com/go/firestore"
"github.com/cockroachdb/cdc-sink/internal/source/logical"
"github.com/cockroachdb/cdc-sink/internal/types"
"github.com/cockroachdb/cdc-sink/internal/util/hlc"
"github.com/cockroachdb/cdc-sink/internal/util/ident"
"github.com/cockroachdb/cdc-sink/internal/util/stamp"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"google.golang.org/api/iterator"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Dialect reads data from Google Cloud Firestore.
type Dialect struct {
backfillBatchSize int // Limit backfill query response size.
docIDProperty string // Added to mutation properties.
fs *firestore.Client // Access to Firestore.
idempotent bool // Detect reprocessing the same document.
loops *logical.Factory // Support dynamic nested collections.
memo types.Memo // Durable logging of processed doc ids.
pool *types.StagingPool // Database access.
query firestore.Query // The base query build from.
recurse bool // Scan for dynamic, nested collections.
recurseFilter *ident.Map[struct{}] // Ignore nested collections with these names.
sourceCollection ident.Ident // Identifies the loop to the user-script.
sourcePath string // The source collection path, for logging.
tombstones *Tombstones // Filters already-deleted ids.
updatedAtProperty ident.Ident // Order-by property in queries.
}
var (
_ logical.Backfiller = (*Dialect)(nil)
_ logical.Dialect = (*Dialect)(nil)
)
// These are the Dialect message types.
type (
backfillEnd struct {
cp *consistentPoint
}
batchStart struct {
cp *consistentPoint
}
batchDelete struct {
ref *firestore.DocumentRef
ts time.Time
}
batchDoc struct {
doc *firestore.DocumentSnapshot
}
batchEnd struct{}
)
// BackfillInto implements logical.Dialect. It uses an ID-based cursor
// approach to scan documents in their updated-at order.
func (d *Dialect) BackfillInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
prev, _ := state.GetConsistentPoint().(*consistentPoint)
to := time.Now()
for {
log.Tracef("backfilling %s from %s", d.sourcePath, prev)
err := d.backfillOneBatch(ctx, ch, to, prev, state)
if err != nil {
return errors.Wrap(err, d.sourcePath)
}
select {
case next := <-state.NotifyConsistentPoint(ctx, logical.AwaitGT, prev):
prev = next.(*consistentPoint)
continue
case <-state.Stopping():
return nil
case <-ctx.Done():
return ctx.Err()
}
}
}
// backfillOneBatch grabs a single batch of documents from the backend.
// It will return the next incremental consistentPoint and whether the
// backfill is expected to continue.
func (d *Dialect) backfillOneBatch(
ctx context.Context,
ch chan<- logical.Message,
now time.Time,
cp *consistentPoint,
state logical.State,
) error {
// We need to make the call to snaps.Next() interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Expected path when backfillOneBatch exits.
}
}()
// Iterate over the collection by (updated_at, __doc_id__) using
// a cursor-like approach so that we can checkpoint along the way.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
OrderBy(firestore.DocumentID, firestore.Asc).
Where(d.updatedAtProperty.Raw(), "<=", now).
Limit(d.backfillBatchSize)
if !cp.IsZero() {
if cp.AsID() == "" {
q = q.Where(d.updatedAtProperty.Raw(), ">=", cp.AsTime())
} else {
q = q.StartAfter(cp.AsTime(), cp.AsID())
}
}
snaps := q.Snapshots(ctx)
defer snaps.Stop()
snap, err := snaps.Next()
if err != nil {
// Mask cancellation errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
// We're going to call GetAll since we're running with a reasonable
// limit value. This allows us to peek at the id of the last
// document, so we can compute the eventual consistent point for
// this batch of docs.
docs, err := snap.Documents.GetAll()
if err != nil {
return errors.WithStack(err)
}
log.Tracef("received %d documents from %s", len(docs), d.sourcePath)
// Workaround / BUG? It appears that the StartAfter call above
// sometimes returns the last document from the previous backfill
// loop. This loop ensures that the effective consistent point
// always goes forward in time.
for len(docs) > 0 {
firstCP, err := d.backfillPoint(docs[0])
if err != nil {
return err
}
if stamp.Compare(firstCP, cp) > 0 {
break
}
log.Tracef("filtering")
docs = docs[1:]
}
// Helper for interruptible send idiom.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// If we have read through the end of all documents in the
// collection, we want the consistent-point to jump forward in time
// to the server read-time.
if len(docs) == 0 {
cp = streamPoint(snap.ReadTime)
return send(backfillEnd{cp})
}
// Move the proposed consistent point to the last document.
lastDoc := docs[len(docs)-1]
if cp, err = d.backfillPoint(lastDoc); err != nil {
return err
}
// Send a batch of messages downstream. We use a non-blocking idiom
if err := send(batchStart{cp}); err != nil {
return err
}
for _, doc := range docs {
if err := send(batchDoc{doc}); err != nil {
return err
}
}
return send(batchEnd{})
}
// ReadInto implements logical.Dialect and subscribes to streaming
// updates from the source.
func (d *Dialect) ReadInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
// The call to snaps.Next() below needs to be made interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Normal exit path when ReadInto exits.
}
}()
cp, _ := state.GetConsistentPoint().(*consistentPoint)
// Stream from the last updated time.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
StartAt(cp.AsTime().Truncate(time.Second))
snaps := q.Snapshots(ctx)
defer snaps.Stop()
// Helper for interruptible send.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
for {
log.Tracef("getting snapshot for %s", d.sourcePath)
snap, err := snaps.Next()
if err != nil {
// Mask cancellations errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
log.Tracef("collection %s: %d events", d.sourcePath, len(snap.Changes))
if err := send(batchStart{streamPoint(snap.ReadTime)}); err != nil {
return err
}
for _, change := range snap.Changes {
switch change.Kind {
case firestore.DocumentAdded,
firestore.DocumentModified:
// Ignore documents that we already know have been deleted.
if d.tombstones.IsDeleted(change.Doc.Ref) {
continue
}
if err := send(batchDoc{change.Doc}); err != nil {
return err
}
case firestore.DocumentRemoved:
d.tombstones.NotifyDeleted(change.Doc.Ref)
if err := send(batchDelete{change.Doc.Ref, change.Doc.ReadTime}); err != nil {
return err
}
}
}
if err := send(batchEnd{}); err != nil {
return err
}
}
}
// Process implements logical.Dialect.
func (d *Dialect) Process(
ctx context.Context, ch <-chan logical.Message, events logical.Events,
) error {
// Only write idempotency mark when we've committed a db transaction.
type mark struct {
ref *firestore.DocumentRef
time time.Time
}
var toMark []mark
for msg := range ch {
if logical.IsRollback(msg) {
if err := events.OnRollback(ctx, msg); err != nil {
return err
}
continue
}
switch t := msg.(type) {
case backfillEnd:
// Just advance the consistent point.
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
if err := events.OnCommit(ctx); err != nil {
return err
}
case batchStart:
toMark = toMark[:0]
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
case batchDoc:
doc := t.doc
if ok, err := d.shouldProcess(ctx, doc.Ref, doc.UpdateTime); err != nil {
return err
} else if !ok {
continue
}
docUpdatedAt, err := d.docUpdatedAt(doc)
if err != nil {
return err
}
mut, err := d.marshalMutation(doc, docUpdatedAt)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.recurse {
if err := d.doRecurse(ctx, doc.Ref, events); err != nil {
return err
}
}
if d.idempotent {
toMark = append(toMark, mark{doc.Ref, doc.UpdateTime})
}
case batchDelete:
if ok, err := d.shouldProcess(ctx, t.ref, t.ts); err != nil {
return err
} else if !ok {
continue
}
mut, err := marshalDeletion(t.ref, t.ts)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.idempotent {
toMark = append(toMark, mark{t.ref, t.ts})
}
case batchEnd:
if err := events.OnCommit(ctx); err != nil {
return err
}
for _, mark := range toMark {
if err := d.markProcessed(ctx, mark.ref, mark.time); err != nil {
return err
}
}
default:
panic(fmt.Sprintf("unimplemented type %T", msg))
}
}
return nil
}
// ZeroStamp implements logical.Dialect.
func (d *Dialect) ZeroStamp() stamp.Stamp {
return &consistentPoint{}
}
// Compute the query-relative document start id. We need to do this so
// that sub-collections can be accessed in a consistent way.
//
// 2022-08-29: One way that does not work is to call Query.StartAfter()
// and then use Query.Serialize to hand the status over to the next
// backfill cycle.
func (d *Dialect) backfillPoint(doc *firestore.DocumentSnapshot) (*consistentPoint, error) {
topCollection := doc.Ref.Parent
for topCollection.Parent != nil {
// collection -> parent doc -> parent collection
topCollection = topCollection.Parent.Parent
}
relativePath := fmt.Sprintf("documents/%s/%s",
topCollection.ID, doc.Ref.Path[len(topCollection.Path)+1:])
updateTime, err := d.docUpdatedAt(doc)
if err != nil {
return nil, err
}
return &consistentPoint{
BackfillID: relativePath,
Time: updateTime,
}, nil
}
// docUpdatedAt extracts a timestamp from the document.
func (d *Dialect) docUpdatedAt(doc *firestore.DocumentSnapshot) (time.Time, error) {
val, err := doc.DataAt(d.updatedAtProperty.Raw())
if err != nil {
return time.Time{}, errors.WithStack(err)
}
if t, ok := val.(time.Time); ok {
return t, nil
}
return time.Time{}, errors.Errorf("document missing %q property", d.updatedAtProperty.Raw())
}
// marshalDeletion creates a mutation to represent the deletion of the
// specified document.
func marshalDeletion(id *firestore.DocumentRef, updatedAt time.Time) (types.Mutation, error) {
key, err := json.Marshal([]string{id.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
return types.Mutation{
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
}, nil
}
func (d *Dialect) marshalMutation(
doc *firestore.DocumentSnapshot, updatedAt time.Time,
) (types.Mutation, error) {
dataMap := doc.Data()
// Allow the doc id to be baked into the mutation.
if d.docIDProperty != "" {
dataMap[d.docIDProperty] = doc.Ref.ID
}
data, err := json.Marshal(dataMap)
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
key, err := json.Marshal([]string{doc.Ref.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
// Create empty slices so that we never pass a null value into JS.
parentCollections := make([]string, 0)
parentDocIds := make([]string, 0)
for parentCollection := doc.Ref.Parent; parentCollection != nil; {
parentCollections = append(parentCollections, parentCollection.ID)
if parentCollection.Parent != nil {
parentDocIds = append(parentDocIds, parentCollection.Parent.ID)
parentCollection = parentCollection.Parent.Parent
} else {
break
}
}
// The timestamps are converted to values that are easy to wrap
// a JS Date around in the user script.
// https://pkg.go.dev/github.com/dop251/goja#hdr-Handling_of_time_Time
meta := map[string]any{
"createTime": doc.CreateTime.UnixNano() / 1e6,
"id": doc.Ref.ID,
"parentCollections": parentCollections,
"parentDocIds": parentDocIds,
"path": doc.Ref.Path,
"readTime": doc.ReadTime.UnixNano() / 1e6,
"updateTime": doc.UpdateTime.UnixNano() / 1e6,
}
return types.Mutation{
Data: data,
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
Meta: meta,
}, nil
}
// doRecurse, if configured, will load dynamic sub-collections of
// the given document.
func (d *Dialect) doRecurse(
ctx context.Context, doc *firestore.DocumentRef, events logical.Events,
) error {
it := doc.Collections(ctx)
for {
coll, err := it.Next()
if err == iterator.Done {
return nil
}
if err != nil {
return errors.Wrapf(err, "loading dynamic collections of %s", doc.Path)
}
if _, skip := d.recurseFilter.Get(ident.New(coll.ID)); skip {
continue
}
fork := *d
fork.query = coll.Query
fork.sourcePath = coll.Path
if err := events.Backfill(ctx, coll.Path, &fork); err != nil {
return errors.WithMessage(err, coll.Path)
}
}
}
// markProcessed records an incoming document as having been processed.
func (d *Dialect) markProcessed(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) error {
payload := processedPayload{UpdatedAt: ts}
data, err := json.Marshal(&payload)
if err != nil {
return errors.WithStack(err)
}
return d.memo.Put(ctx, d.pool, processedKey(doc), data)
}
// shouldProcess implements idempotent processing of document snapshots.
// It ensures that the update-time of any given document always
// advances.
func (d *Dialect) shouldProcess(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) (bool, error) {
if !d.idempotent {
return true, nil
}
data, err := d.memo.Get(ctx, d.pool, processedKey(doc))
if err != nil {
return false, err
}
// No data means we're seeing the document for the first time.
if data == nil {
log.Tracef("accepting document %s at %s", doc.ID, ts)
return true, nil
}
var payload processedPayload
if err := json.Unmarshal(data, &payload); err != nil {
return false, errors.WithStack(err)
}
if ts.After(payload.UpdatedAt) {
log.Tracef("accepting document %s at %s > %s", doc.ID, ts, payload.UpdatedAt)
return true, nil
}
log.Tracef("ignoring document %s at %s <= %s", doc.ID, ts, payload.UpdatedAt)
return false, nil
}
// processedPayload is used by markProcessed and shouldProcess.
type processedPayload struct {
UpdatedAt time.Time `json:"u,omitempty"`
}
// processedKey returns the memo key used by markProcessed and
// shouldProcess.
func processedKey(ref *firestore.DocumentRef) string | {
return fmt.Sprintf("fs-doc-%s", ref.Path)
} | identifier_body | |
fslogical.go | // Copyright 2023 The Cockroach Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
// Package fslogical contains a logical-replication loop for streaming
// document collections from Google Cloud Firestore.
package fslogical
import (
"context"
"encoding/json"
"fmt"
"time"
"cloud.google.com/go/firestore"
"github.com/cockroachdb/cdc-sink/internal/source/logical"
"github.com/cockroachdb/cdc-sink/internal/types"
"github.com/cockroachdb/cdc-sink/internal/util/hlc"
"github.com/cockroachdb/cdc-sink/internal/util/ident"
"github.com/cockroachdb/cdc-sink/internal/util/stamp"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"google.golang.org/api/iterator"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Dialect reads data from Google Cloud Firestore.
type Dialect struct {
backfillBatchSize int // Limit backfill query response size.
docIDProperty string // Added to mutation properties.
fs *firestore.Client // Access to Firestore.
idempotent bool // Detect reprocessing the same document.
loops *logical.Factory // Support dynamic nested collections.
memo types.Memo // Durable logging of processed doc ids.
pool *types.StagingPool // Database access.
query firestore.Query // The base query build from.
recurse bool // Scan for dynamic, nested collections.
recurseFilter *ident.Map[struct{}] // Ignore nested collections with these names.
sourceCollection ident.Ident // Identifies the loop to the user-script.
sourcePath string // The source collection path, for logging.
tombstones *Tombstones // Filters already-deleted ids.
updatedAtProperty ident.Ident // Order-by property in queries.
}
var (
_ logical.Backfiller = (*Dialect)(nil)
_ logical.Dialect = (*Dialect)(nil)
)
// These are the Dialect message types.
type (
backfillEnd struct {
cp *consistentPoint
}
batchStart struct {
cp *consistentPoint
}
batchDelete struct {
ref *firestore.DocumentRef
ts time.Time
}
batchDoc struct {
doc *firestore.DocumentSnapshot
}
batchEnd struct{}
)
// BackfillInto implements logical.Dialect. It uses an ID-based cursor
// approach to scan documents in their updated-at order.
func (d *Dialect) BackfillInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
prev, _ := state.GetConsistentPoint().(*consistentPoint)
to := time.Now()
for {
log.Tracef("backfilling %s from %s", d.sourcePath, prev)
err := d.backfillOneBatch(ctx, ch, to, prev, state)
if err != nil {
return errors.Wrap(err, d.sourcePath)
}
select {
case next := <-state.NotifyConsistentPoint(ctx, logical.AwaitGT, prev):
prev = next.(*consistentPoint)
continue
case <-state.Stopping():
return nil
case <-ctx.Done():
return ctx.Err()
}
}
}
// backfillOneBatch grabs a single batch of documents from the backend.
// It will return the next incremental consistentPoint and whether the
// backfill is expected to continue.
func (d *Dialect) backfillOneBatch(
ctx context.Context,
ch chan<- logical.Message,
now time.Time,
cp *consistentPoint,
state logical.State,
) error {
// We need to make the call to snaps.Next() interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Expected path when backfillOneBatch exits.
}
}()
// Iterate over the collection by (updated_at, __doc_id__) using
// a cursor-like approach so that we can checkpoint along the way.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
OrderBy(firestore.DocumentID, firestore.Asc).
Where(d.updatedAtProperty.Raw(), "<=", now).
Limit(d.backfillBatchSize)
if !cp.IsZero() {
if cp.AsID() == "" {
q = q.Where(d.updatedAtProperty.Raw(), ">=", cp.AsTime())
} else {
q = q.StartAfter(cp.AsTime(), cp.AsID())
}
}
snaps := q.Snapshots(ctx)
defer snaps.Stop()
snap, err := snaps.Next()
if err != nil {
// Mask cancellation errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
// We're going to call GetAll since we're running with a reasonable
// limit value. This allows us to peek at the id of the last
// document, so we can compute the eventual consistent point for
// this batch of docs.
docs, err := snap.Documents.GetAll()
if err != nil {
return errors.WithStack(err)
}
log.Tracef("received %d documents from %s", len(docs), d.sourcePath)
// Workaround / BUG? It appears that the StartAfter call above
// sometimes returns the last document from the previous backfill
// loop. This loop ensures that the effective consistent point
// always goes forward in time.
for len(docs) > 0 {
firstCP, err := d.backfillPoint(docs[0])
if err != nil {
return err
}
if stamp.Compare(firstCP, cp) > 0 {
break
}
log.Tracef("filtering")
docs = docs[1:]
}
// Helper for interruptible send idiom.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// If we have read through the end of all documents in the
// collection, we want the consistent-point to jump forward in time
// to the server read-time.
if len(docs) == 0 {
cp = streamPoint(snap.ReadTime)
return send(backfillEnd{cp})
}
// Move the proposed consistent point to the last document.
lastDoc := docs[len(docs)-1]
if cp, err = d.backfillPoint(lastDoc); err != nil {
return err
}
// Send a batch of messages downstream. We use a non-blocking idiom
if err := send(batchStart{cp}); err != nil {
return err
}
for _, doc := range docs {
if err := send(batchDoc{doc}); err != nil {
return err
}
}
return send(batchEnd{})
}
// ReadInto implements logical.Dialect and subscribes to streaming
// updates from the source.
func (d *Dialect) ReadInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
// The call to snaps.Next() below needs to be made interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Normal exit path when ReadInto exits.
}
}()
cp, _ := state.GetConsistentPoint().(*consistentPoint)
// Stream from the last updated time.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
StartAt(cp.AsTime().Truncate(time.Second))
snaps := q.Snapshots(ctx)
defer snaps.Stop()
// Helper for interruptible send.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
for {
log.Tracef("getting snapshot for %s", d.sourcePath)
snap, err := snaps.Next()
if err != nil |
log.Tracef("collection %s: %d events", d.sourcePath, len(snap.Changes))
if err := send(batchStart{streamPoint(snap.ReadTime)}); err != nil {
return err
}
for _, change := range snap.Changes {
switch change.Kind {
case firestore.DocumentAdded,
firestore.DocumentModified:
// Ignore documents that we already know have been deleted.
if d.tombstones.IsDeleted(change.Doc.Ref) {
continue
}
if err := send(batchDoc{change.Doc}); err != nil {
return err
}
case firestore.DocumentRemoved:
d.tombstones.NotifyDeleted(change.Doc.Ref)
if err := send(batchDelete{change.Doc.Ref, change.Doc.ReadTime}); err != nil {
return err
}
}
}
if err := send(batchEnd{}); err != nil {
return err
}
}
}
// Process implements logical.Dialect.
func (d *Dialect) Process(
ctx context.Context, ch <-chan logical.Message, events logical.Events,
) error {
// Only write idempotency mark when we've committed a db transaction.
type mark struct {
ref *firestore.DocumentRef
time time.Time
}
var toMark []mark
for msg := range ch {
if logical.IsRollback(msg) {
if err := events.OnRollback(ctx, msg); err != nil {
return err
}
continue
}
switch t := msg.(type) {
case backfillEnd:
// Just advance the consistent point.
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
if err := events.OnCommit(ctx); err != nil {
return err
}
case batchStart:
toMark = toMark[:0]
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
case batchDoc:
doc := t.doc
if ok, err := d.shouldProcess(ctx, doc.Ref, doc.UpdateTime); err != nil {
return err
} else if !ok {
continue
}
docUpdatedAt, err := d.docUpdatedAt(doc)
if err != nil {
return err
}
mut, err := d.marshalMutation(doc, docUpdatedAt)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.recurse {
if err := d.doRecurse(ctx, doc.Ref, events); err != nil {
return err
}
}
if d.idempotent {
toMark = append(toMark, mark{doc.Ref, doc.UpdateTime})
}
case batchDelete:
if ok, err := d.shouldProcess(ctx, t.ref, t.ts); err != nil {
return err
} else if !ok {
continue
}
mut, err := marshalDeletion(t.ref, t.ts)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.idempotent {
toMark = append(toMark, mark{t.ref, t.ts})
}
case batchEnd:
if err := events.OnCommit(ctx); err != nil {
return err
}
for _, mark := range toMark {
if err := d.markProcessed(ctx, mark.ref, mark.time); err != nil {
return err
}
}
default:
panic(fmt.Sprintf("unimplemented type %T", msg))
}
}
return nil
}
// ZeroStamp implements logical.Dialect.
func (d *Dialect) ZeroStamp() stamp.Stamp {
return &consistentPoint{}
}
// Compute the query-relative document start id. We need to do this so
// that sub-collections can be accessed in a consistent way.
//
// 2022-08-29: One way that does not work is to call Query.StartAfter()
// and then use Query.Serialize to hand the status over to the next
// backfill cycle.
func (d *Dialect) backfillPoint(doc *firestore.DocumentSnapshot) (*consistentPoint, error) {
topCollection := doc.Ref.Parent
for topCollection.Parent != nil {
// collection -> parent doc -> parent collection
topCollection = topCollection.Parent.Parent
}
relativePath := fmt.Sprintf("documents/%s/%s",
topCollection.ID, doc.Ref.Path[len(topCollection.Path)+1:])
updateTime, err := d.docUpdatedAt(doc)
if err != nil {
return nil, err
}
return &consistentPoint{
BackfillID: relativePath,
Time: updateTime,
}, nil
}
// docUpdatedAt extracts a timestamp from the document.
func (d *Dialect) docUpdatedAt(doc *firestore.DocumentSnapshot) (time.Time, error) {
val, err := doc.DataAt(d.updatedAtProperty.Raw())
if err != nil {
return time.Time{}, errors.WithStack(err)
}
if t, ok := val.(time.Time); ok {
return t, nil
}
return time.Time{}, errors.Errorf("document missing %q property", d.updatedAtProperty.Raw())
}
// marshalDeletion creates a mutation to represent the deletion of the
// specified document.
func marshalDeletion(id *firestore.DocumentRef, updatedAt time.Time) (types.Mutation, error) {
key, err := json.Marshal([]string{id.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
return types.Mutation{
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
}, nil
}
func (d *Dialect) marshalMutation(
doc *firestore.DocumentSnapshot, updatedAt time.Time,
) (types.Mutation, error) {
dataMap := doc.Data()
// Allow the doc id to be baked into the mutation.
if d.docIDProperty != "" {
dataMap[d.docIDProperty] = doc.Ref.ID
}
data, err := json.Marshal(dataMap)
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
key, err := json.Marshal([]string{doc.Ref.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
// Create empty slices so that we never pass a null value into JS.
parentCollections := make([]string, 0)
parentDocIds := make([]string, 0)
for parentCollection := doc.Ref.Parent; parentCollection != nil; {
parentCollections = append(parentCollections, parentCollection.ID)
if parentCollection.Parent != nil {
parentDocIds = append(parentDocIds, parentCollection.Parent.ID)
parentCollection = parentCollection.Parent.Parent
} else {
break
}
}
// The timestamps are converted to values that are easy to wrap
// a JS Date around in the user script.
// https://pkg.go.dev/github.com/dop251/goja#hdr-Handling_of_time_Time
meta := map[string]any{
"createTime": doc.CreateTime.UnixNano() / 1e6,
"id": doc.Ref.ID,
"parentCollections": parentCollections,
"parentDocIds": parentDocIds,
"path": doc.Ref.Path,
"readTime": doc.ReadTime.UnixNano() / 1e6,
"updateTime": doc.UpdateTime.UnixNano() / 1e6,
}
return types.Mutation{
Data: data,
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
Meta: meta,
}, nil
}
// doRecurse, if configured, will load dynamic sub-collections of
// the given document.
func (d *Dialect) doRecurse(
ctx context.Context, doc *firestore.DocumentRef, events logical.Events,
) error {
it := doc.Collections(ctx)
for {
coll, err := it.Next()
if err == iterator.Done {
return nil
}
if err != nil {
return errors.Wrapf(err, "loading dynamic collections of %s", doc.Path)
}
if _, skip := d.recurseFilter.Get(ident.New(coll.ID)); skip {
continue
}
fork := *d
fork.query = coll.Query
fork.sourcePath = coll.Path
if err := events.Backfill(ctx, coll.Path, &fork); err != nil {
return errors.WithMessage(err, coll.Path)
}
}
}
// markProcessed records an incoming document as having been processed.
func (d *Dialect) markProcessed(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) error {
payload := processedPayload{UpdatedAt: ts}
data, err := json.Marshal(&payload)
if err != nil {
return errors.WithStack(err)
}
return d.memo.Put(ctx, d.pool, processedKey(doc), data)
}
// shouldProcess implements idempotent processing of document snapshots.
// It ensures that the update-time of any given document always
// advances.
func (d *Dialect) shouldProcess(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) (bool, error) {
if !d.idempotent {
return true, nil
}
data, err := d.memo.Get(ctx, d.pool, processedKey(doc))
if err != nil {
return false, err
}
// No data means we're seeing the document for the first time.
if data == nil {
log.Tracef("accepting document %s at %s", doc.ID, ts)
return true, nil
}
var payload processedPayload
if err := json.Unmarshal(data, &payload); err != nil {
return false, errors.WithStack(err)
}
if ts.After(payload.UpdatedAt) {
log.Tracef("accepting document %s at %s > %s", doc.ID, ts, payload.UpdatedAt)
return true, nil
}
log.Tracef("ignoring document %s at %s <= %s", doc.ID, ts, payload.UpdatedAt)
return false, nil
}
// processedPayload is used by markProcessed and shouldProcess.
type processedPayload struct {
UpdatedAt time.Time `json:"u,omitempty"`
}
// processedKey returns the memo key used by markProcessed and
// shouldProcess.
func processedKey(ref *firestore.DocumentRef) string {
return fmt.Sprintf("fs-doc-%s", ref.Path)
}
| {
// Mask cancellations errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
} | conditional_block |
fslogical.go | // Copyright 2023 The Cockroach Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
// Package fslogical contains a logical-replication loop for streaming
// document collections from Google Cloud Firestore.
package fslogical
import (
"context"
"encoding/json"
"fmt"
"time"
"cloud.google.com/go/firestore"
"github.com/cockroachdb/cdc-sink/internal/source/logical"
"github.com/cockroachdb/cdc-sink/internal/types"
"github.com/cockroachdb/cdc-sink/internal/util/hlc"
"github.com/cockroachdb/cdc-sink/internal/util/ident"
"github.com/cockroachdb/cdc-sink/internal/util/stamp"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"google.golang.org/api/iterator"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Dialect reads data from Google Cloud Firestore.
type Dialect struct {
backfillBatchSize int // Limit backfill query response size.
docIDProperty string // Added to mutation properties.
fs *firestore.Client // Access to Firestore.
idempotent bool // Detect reprocessing the same document.
loops *logical.Factory // Support dynamic nested collections.
memo types.Memo // Durable logging of processed doc ids.
pool *types.StagingPool // Database access.
query firestore.Query // The base query build from.
recurse bool // Scan for dynamic, nested collections.
recurseFilter *ident.Map[struct{}] // Ignore nested collections with these names.
sourceCollection ident.Ident // Identifies the loop to the user-script.
sourcePath string // The source collection path, for logging.
tombstones *Tombstones // Filters already-deleted ids.
updatedAtProperty ident.Ident // Order-by property in queries.
}
var (
_ logical.Backfiller = (*Dialect)(nil)
_ logical.Dialect = (*Dialect)(nil)
)
// These are the Dialect message types.
type (
backfillEnd struct {
cp *consistentPoint
}
batchStart struct {
cp *consistentPoint
}
batchDelete struct {
ref *firestore.DocumentRef
ts time.Time
}
batchDoc struct {
doc *firestore.DocumentSnapshot
}
batchEnd struct{}
)
// BackfillInto implements logical.Dialect. It uses an ID-based cursor
// approach to scan documents in their updated-at order.
func (d *Dialect) BackfillInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
prev, _ := state.GetConsistentPoint().(*consistentPoint)
to := time.Now()
for {
log.Tracef("backfilling %s from %s", d.sourcePath, prev)
err := d.backfillOneBatch(ctx, ch, to, prev, state)
if err != nil {
return errors.Wrap(err, d.sourcePath)
}
select {
case next := <-state.NotifyConsistentPoint(ctx, logical.AwaitGT, prev):
prev = next.(*consistentPoint)
continue
case <-state.Stopping():
return nil
case <-ctx.Done():
return ctx.Err()
}
}
}
// backfillOneBatch grabs a single batch of documents from the backend.
// It will return the next incremental consistentPoint and whether the
// backfill is expected to continue.
func (d *Dialect) backfillOneBatch(
ctx context.Context,
ch chan<- logical.Message,
now time.Time,
cp *consistentPoint,
state logical.State,
) error {
// We need to make the call to snaps.Next() interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Expected path when backfillOneBatch exits.
}
}()
// Iterate over the collection by (updated_at, __doc_id__) using
// a cursor-like approach so that we can checkpoint along the way.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
OrderBy(firestore.DocumentID, firestore.Asc).
Where(d.updatedAtProperty.Raw(), "<=", now).
Limit(d.backfillBatchSize)
if !cp.IsZero() {
if cp.AsID() == "" {
q = q.Where(d.updatedAtProperty.Raw(), ">=", cp.AsTime())
} else {
q = q.StartAfter(cp.AsTime(), cp.AsID())
}
}
snaps := q.Snapshots(ctx)
defer snaps.Stop()
snap, err := snaps.Next()
if err != nil {
// Mask cancellation errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
// We're going to call GetAll since we're running with a reasonable
// limit value. This allows us to peek at the id of the last
// document, so we can compute the eventual consistent point for
// this batch of docs.
docs, err := snap.Documents.GetAll()
if err != nil {
return errors.WithStack(err)
}
log.Tracef("received %d documents from %s", len(docs), d.sourcePath)
// Workaround / BUG? It appears that the StartAfter call above
// sometimes returns the last document from the previous backfill
// loop. This loop ensures that the effective consistent point
// always goes forward in time.
for len(docs) > 0 {
firstCP, err := d.backfillPoint(docs[0])
if err != nil {
return err
}
if stamp.Compare(firstCP, cp) > 0 {
break
}
log.Tracef("filtering")
docs = docs[1:]
}
// Helper for interruptible send idiom.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// If we have read through the end of all documents in the
// collection, we want the consistent-point to jump forward in time
// to the server read-time.
if len(docs) == 0 {
cp = streamPoint(snap.ReadTime)
return send(backfillEnd{cp})
}
// Move the proposed consistent point to the last document.
lastDoc := docs[len(docs)-1]
if cp, err = d.backfillPoint(lastDoc); err != nil {
return err
}
// Send a batch of messages downstream. We use a non-blocking idiom
if err := send(batchStart{cp}); err != nil {
return err
}
for _, doc := range docs {
if err := send(batchDoc{doc}); err != nil {
return err
}
}
return send(batchEnd{})
}
// ReadInto implements logical.Dialect and subscribes to streaming
// updates from the source.
func (d *Dialect) ReadInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
// The call to snaps.Next() below needs to be made interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Normal exit path when ReadInto exits.
}
}()
cp, _ := state.GetConsistentPoint().(*consistentPoint)
// Stream from the last updated time.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
StartAt(cp.AsTime().Truncate(time.Second))
snaps := q.Snapshots(ctx)
defer snaps.Stop()
// Helper for interruptible send.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
for {
log.Tracef("getting snapshot for %s", d.sourcePath)
snap, err := snaps.Next()
if err != nil {
// Mask cancellations errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
log.Tracef("collection %s: %d events", d.sourcePath, len(snap.Changes))
if err := send(batchStart{streamPoint(snap.ReadTime)}); err != nil {
return err
}
for _, change := range snap.Changes {
switch change.Kind {
case firestore.DocumentAdded,
firestore.DocumentModified:
// Ignore documents that we already know have been deleted.
if d.tombstones.IsDeleted(change.Doc.Ref) {
continue
}
if err := send(batchDoc{change.Doc}); err != nil {
return err
}
case firestore.DocumentRemoved:
d.tombstones.NotifyDeleted(change.Doc.Ref)
if err := send(batchDelete{change.Doc.Ref, change.Doc.ReadTime}); err != nil {
return err
}
}
}
if err := send(batchEnd{}); err != nil {
return err
}
}
}
// Process implements logical.Dialect.
func (d *Dialect) Process(
ctx context.Context, ch <-chan logical.Message, events logical.Events,
) error {
// Only write idempotency mark when we've committed a db transaction.
type mark struct {
ref *firestore.DocumentRef
time time.Time
}
var toMark []mark
for msg := range ch {
if logical.IsRollback(msg) {
if err := events.OnRollback(ctx, msg); err != nil {
return err
}
continue
}
switch t := msg.(type) {
case backfillEnd:
// Just advance the consistent point.
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
if err := events.OnCommit(ctx); err != nil {
return err
}
case batchStart:
toMark = toMark[:0]
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
case batchDoc:
doc := t.doc
if ok, err := d.shouldProcess(ctx, doc.Ref, doc.UpdateTime); err != nil {
return err
} else if !ok {
continue
}
docUpdatedAt, err := d.docUpdatedAt(doc)
if err != nil {
return err
}
mut, err := d.marshalMutation(doc, docUpdatedAt)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.recurse {
if err := d.doRecurse(ctx, doc.Ref, events); err != nil {
return err
}
}
if d.idempotent {
toMark = append(toMark, mark{doc.Ref, doc.UpdateTime})
}
case batchDelete:
if ok, err := d.shouldProcess(ctx, t.ref, t.ts); err != nil {
return err
} else if !ok {
continue
}
mut, err := marshalDeletion(t.ref, t.ts)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.idempotent {
toMark = append(toMark, mark{t.ref, t.ts})
}
case batchEnd:
if err := events.OnCommit(ctx); err != nil {
return err
}
for _, mark := range toMark {
if err := d.markProcessed(ctx, mark.ref, mark.time); err != nil {
return err
}
}
default:
panic(fmt.Sprintf("unimplemented type %T", msg))
}
}
return nil
}
// ZeroStamp implements logical.Dialect.
func (d *Dialect) | () stamp.Stamp {
return &consistentPoint{}
}
// Compute the query-relative document start id. We need to do this so
// that sub-collections can be accessed in a consistent way.
//
// 2022-08-29: One way that does not work is to call Query.StartAfter()
// and then use Query.Serialize to hand the status over to the next
// backfill cycle.
func (d *Dialect) backfillPoint(doc *firestore.DocumentSnapshot) (*consistentPoint, error) {
topCollection := doc.Ref.Parent
for topCollection.Parent != nil {
// collection -> parent doc -> parent collection
topCollection = topCollection.Parent.Parent
}
relativePath := fmt.Sprintf("documents/%s/%s",
topCollection.ID, doc.Ref.Path[len(topCollection.Path)+1:])
updateTime, err := d.docUpdatedAt(doc)
if err != nil {
return nil, err
}
return &consistentPoint{
BackfillID: relativePath,
Time: updateTime,
}, nil
}
// docUpdatedAt extracts a timestamp from the document.
func (d *Dialect) docUpdatedAt(doc *firestore.DocumentSnapshot) (time.Time, error) {
val, err := doc.DataAt(d.updatedAtProperty.Raw())
if err != nil {
return time.Time{}, errors.WithStack(err)
}
if t, ok := val.(time.Time); ok {
return t, nil
}
return time.Time{}, errors.Errorf("document missing %q property", d.updatedAtProperty.Raw())
}
// marshalDeletion creates a mutation to represent the deletion of the
// specified document.
func marshalDeletion(id *firestore.DocumentRef, updatedAt time.Time) (types.Mutation, error) {
key, err := json.Marshal([]string{id.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
return types.Mutation{
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
}, nil
}
func (d *Dialect) marshalMutation(
doc *firestore.DocumentSnapshot, updatedAt time.Time,
) (types.Mutation, error) {
dataMap := doc.Data()
// Allow the doc id to be baked into the mutation.
if d.docIDProperty != "" {
dataMap[d.docIDProperty] = doc.Ref.ID
}
data, err := json.Marshal(dataMap)
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
key, err := json.Marshal([]string{doc.Ref.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
// Create empty slices so that we never pass a null value into JS.
parentCollections := make([]string, 0)
parentDocIds := make([]string, 0)
for parentCollection := doc.Ref.Parent; parentCollection != nil; {
parentCollections = append(parentCollections, parentCollection.ID)
if parentCollection.Parent != nil {
parentDocIds = append(parentDocIds, parentCollection.Parent.ID)
parentCollection = parentCollection.Parent.Parent
} else {
break
}
}
// The timestamps are converted to values that are easy to wrap
// a JS Date around in the user script.
// https://pkg.go.dev/github.com/dop251/goja#hdr-Handling_of_time_Time
meta := map[string]any{
"createTime": doc.CreateTime.UnixNano() / 1e6,
"id": doc.Ref.ID,
"parentCollections": parentCollections,
"parentDocIds": parentDocIds,
"path": doc.Ref.Path,
"readTime": doc.ReadTime.UnixNano() / 1e6,
"updateTime": doc.UpdateTime.UnixNano() / 1e6,
}
return types.Mutation{
Data: data,
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
Meta: meta,
}, nil
}
// doRecurse, if configured, will load dynamic sub-collections of
// the given document.
func (d *Dialect) doRecurse(
ctx context.Context, doc *firestore.DocumentRef, events logical.Events,
) error {
it := doc.Collections(ctx)
for {
coll, err := it.Next()
if err == iterator.Done {
return nil
}
if err != nil {
return errors.Wrapf(err, "loading dynamic collections of %s", doc.Path)
}
if _, skip := d.recurseFilter.Get(ident.New(coll.ID)); skip {
continue
}
fork := *d
fork.query = coll.Query
fork.sourcePath = coll.Path
if err := events.Backfill(ctx, coll.Path, &fork); err != nil {
return errors.WithMessage(err, coll.Path)
}
}
}
// markProcessed records an incoming document as having been processed.
func (d *Dialect) markProcessed(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) error {
payload := processedPayload{UpdatedAt: ts}
data, err := json.Marshal(&payload)
if err != nil {
return errors.WithStack(err)
}
return d.memo.Put(ctx, d.pool, processedKey(doc), data)
}
// shouldProcess implements idempotent processing of document snapshots.
// It ensures that the update-time of any given document always
// advances.
func (d *Dialect) shouldProcess(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) (bool, error) {
if !d.idempotent {
return true, nil
}
data, err := d.memo.Get(ctx, d.pool, processedKey(doc))
if err != nil {
return false, err
}
// No data means we're seeing the document for the first time.
if data == nil {
log.Tracef("accepting document %s at %s", doc.ID, ts)
return true, nil
}
var payload processedPayload
if err := json.Unmarshal(data, &payload); err != nil {
return false, errors.WithStack(err)
}
if ts.After(payload.UpdatedAt) {
log.Tracef("accepting document %s at %s > %s", doc.ID, ts, payload.UpdatedAt)
return true, nil
}
log.Tracef("ignoring document %s at %s <= %s", doc.ID, ts, payload.UpdatedAt)
return false, nil
}
// processedPayload is used by markProcessed and shouldProcess.
type processedPayload struct {
UpdatedAt time.Time `json:"u,omitempty"`
}
// processedKey returns the memo key used by markProcessed and
// shouldProcess.
func processedKey(ref *firestore.DocumentRef) string {
return fmt.Sprintf("fs-doc-%s", ref.Path)
}
| ZeroStamp | identifier_name |
fslogical.go | // Copyright 2023 The Cockroach Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
// Package fslogical contains a logical-replication loop for streaming
// document collections from Google Cloud Firestore.
package fslogical
import (
"context"
"encoding/json"
"fmt"
"time"
"cloud.google.com/go/firestore"
"github.com/cockroachdb/cdc-sink/internal/source/logical"
"github.com/cockroachdb/cdc-sink/internal/types"
"github.com/cockroachdb/cdc-sink/internal/util/hlc"
"github.com/cockroachdb/cdc-sink/internal/util/ident"
"github.com/cockroachdb/cdc-sink/internal/util/stamp"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"google.golang.org/api/iterator"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Dialect reads data from Google Cloud Firestore.
type Dialect struct {
backfillBatchSize int // Limit backfill query response size.
docIDProperty string // Added to mutation properties.
fs *firestore.Client // Access to Firestore.
idempotent bool // Detect reprocessing the same document.
loops *logical.Factory // Support dynamic nested collections.
memo types.Memo // Durable logging of processed doc ids.
pool *types.StagingPool // Database access.
query firestore.Query // The base query build from.
recurse bool // Scan for dynamic, nested collections.
recurseFilter *ident.Map[struct{}] // Ignore nested collections with these names.
sourceCollection ident.Ident // Identifies the loop to the user-script.
sourcePath string // The source collection path, for logging.
tombstones *Tombstones // Filters already-deleted ids.
updatedAtProperty ident.Ident // Order-by property in queries.
}
var (
_ logical.Backfiller = (*Dialect)(nil)
_ logical.Dialect = (*Dialect)(nil)
)
// These are the Dialect message types.
type (
backfillEnd struct {
cp *consistentPoint
}
batchStart struct {
cp *consistentPoint
}
batchDelete struct {
ref *firestore.DocumentRef
ts time.Time
}
batchDoc struct {
doc *firestore.DocumentSnapshot
}
batchEnd struct{}
)
// BackfillInto implements logical.Dialect. It uses an ID-based cursor
// approach to scan documents in their updated-at order.
func (d *Dialect) BackfillInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
prev, _ := state.GetConsistentPoint().(*consistentPoint)
to := time.Now()
for {
log.Tracef("backfilling %s from %s", d.sourcePath, prev)
err := d.backfillOneBatch(ctx, ch, to, prev, state)
if err != nil {
return errors.Wrap(err, d.sourcePath)
}
select {
case next := <-state.NotifyConsistentPoint(ctx, logical.AwaitGT, prev):
prev = next.(*consistentPoint)
continue
case <-state.Stopping():
return nil
case <-ctx.Done():
return ctx.Err()
}
}
}
// backfillOneBatch grabs a single batch of documents from the backend.
// It will return the next incremental consistentPoint and whether the
// backfill is expected to continue.
func (d *Dialect) backfillOneBatch(
ctx context.Context,
ch chan<- logical.Message,
now time.Time,
cp *consistentPoint,
state logical.State,
) error {
// We need to make the call to snaps.Next() interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Expected path when backfillOneBatch exits.
}
}()
// Iterate over the collection by (updated_at, __doc_id__) using
// a cursor-like approach so that we can checkpoint along the way.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
OrderBy(firestore.DocumentID, firestore.Asc).
Where(d.updatedAtProperty.Raw(), "<=", now).
Limit(d.backfillBatchSize)
if !cp.IsZero() {
if cp.AsID() == "" {
q = q.Where(d.updatedAtProperty.Raw(), ">=", cp.AsTime())
} else {
q = q.StartAfter(cp.AsTime(), cp.AsID())
}
}
snaps := q.Snapshots(ctx)
defer snaps.Stop()
snap, err := snaps.Next()
if err != nil {
// Mask cancellation errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
// We're going to call GetAll since we're running with a reasonable
// limit value. This allows us to peek at the id of the last
// document, so we can compute the eventual consistent point for
// this batch of docs.
docs, err := snap.Documents.GetAll()
if err != nil {
return errors.WithStack(err)
}
log.Tracef("received %d documents from %s", len(docs), d.sourcePath)
// Workaround / BUG? It appears that the StartAfter call above
// sometimes returns the last document from the previous backfill
// loop. This loop ensures that the effective consistent point
// always goes forward in time.
for len(docs) > 0 {
firstCP, err := d.backfillPoint(docs[0])
if err != nil {
return err
}
if stamp.Compare(firstCP, cp) > 0 {
break
}
log.Tracef("filtering")
docs = docs[1:]
}
// Helper for interruptible send idiom.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// If we have read through the end of all documents in the
// collection, we want the consistent-point to jump forward in time
// to the server read-time.
if len(docs) == 0 {
cp = streamPoint(snap.ReadTime)
return send(backfillEnd{cp})
}
// Move the proposed consistent point to the last document.
lastDoc := docs[len(docs)-1]
if cp, err = d.backfillPoint(lastDoc); err != nil {
return err
}
// Send a batch of messages downstream. We use a non-blocking idiom
if err := send(batchStart{cp}); err != nil {
return err
}
for _, doc := range docs {
if err := send(batchDoc{doc}); err != nil {
return err
}
}
return send(batchEnd{})
}
// ReadInto implements logical.Dialect and subscribes to streaming
// updates from the source.
func (d *Dialect) ReadInto(
ctx context.Context, ch chan<- logical.Message, state logical.State,
) error {
// The call to snaps.Next() below needs to be made interruptable.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-state.Stopping():
// Cancel early to interrupt call to snaps.Next() below.
cancel()
case <-ctx.Done():
// Normal exit path when ReadInto exits.
}
}()
cp, _ := state.GetConsistentPoint().(*consistentPoint)
// Stream from the last updated time.
q := d.query.
OrderBy(d.updatedAtProperty.Raw(), firestore.Asc).
StartAt(cp.AsTime().Truncate(time.Second))
snaps := q.Snapshots(ctx)
defer snaps.Stop()
// Helper for interruptible send.
send := func(msg logical.Message) error {
select {
case ch <- msg:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
for {
log.Tracef("getting snapshot for %s", d.sourcePath)
snap, err := snaps.Next()
if err != nil {
// Mask cancellations errors.
if status.Code(err) == codes.Canceled || errors.Is(err, iterator.Done) {
return nil
}
return errors.WithStack(err)
}
log.Tracef("collection %s: %d events", d.sourcePath, len(snap.Changes))
if err := send(batchStart{streamPoint(snap.ReadTime)}); err != nil {
return err
}
for _, change := range snap.Changes {
switch change.Kind {
case firestore.DocumentAdded,
firestore.DocumentModified:
// Ignore documents that we already know have been deleted.
if d.tombstones.IsDeleted(change.Doc.Ref) {
continue
}
if err := send(batchDoc{change.Doc}); err != nil {
return err
}
case firestore.DocumentRemoved:
d.tombstones.NotifyDeleted(change.Doc.Ref)
if err := send(batchDelete{change.Doc.Ref, change.Doc.ReadTime}); err != nil {
return err
}
}
}
if err := send(batchEnd{}); err != nil {
return err
}
}
}
// Process implements logical.Dialect.
func (d *Dialect) Process(
ctx context.Context, ch <-chan logical.Message, events logical.Events,
) error {
// Only write idempotency mark when we've committed a db transaction.
type mark struct {
ref *firestore.DocumentRef
time time.Time
}
var toMark []mark
for msg := range ch {
if logical.IsRollback(msg) {
if err := events.OnRollback(ctx, msg); err != nil {
return err
}
continue
}
switch t := msg.(type) {
case backfillEnd:
// Just advance the consistent point.
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
if err := events.OnCommit(ctx); err != nil {
return err
}
case batchStart:
toMark = toMark[:0]
if err := events.OnBegin(ctx, t.cp); err != nil {
return err
}
case batchDoc:
doc := t.doc
if ok, err := d.shouldProcess(ctx, doc.Ref, doc.UpdateTime); err != nil {
return err
} else if !ok {
continue
}
docUpdatedAt, err := d.docUpdatedAt(doc)
if err != nil {
return err
}
mut, err := d.marshalMutation(doc, docUpdatedAt)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.recurse {
if err := d.doRecurse(ctx, doc.Ref, events); err != nil {
return err
}
}
if d.idempotent {
toMark = append(toMark, mark{doc.Ref, doc.UpdateTime})
}
case batchDelete:
if ok, err := d.shouldProcess(ctx, t.ref, t.ts); err != nil {
return err
} else if !ok {
continue
}
mut, err := marshalDeletion(t.ref, t.ts)
if err != nil {
return err
}
// Pass an empty destination table, because we know that
// this is configured via a user-script.
if err := events.OnData(ctx,
d.sourceCollection, ident.Table{}, []types.Mutation{mut}); err != nil {
return err
}
if d.idempotent {
toMark = append(toMark, mark{t.ref, t.ts})
}
case batchEnd:
if err := events.OnCommit(ctx); err != nil {
return err
}
for _, mark := range toMark {
if err := d.markProcessed(ctx, mark.ref, mark.time); err != nil {
return err
}
}
default:
panic(fmt.Sprintf("unimplemented type %T", msg))
}
}
return nil
}
// ZeroStamp implements logical.Dialect.
func (d *Dialect) ZeroStamp() stamp.Stamp {
return &consistentPoint{}
}
// Compute the query-relative document start id. We need to do this so
// that sub-collections can be accessed in a consistent way.
//
// 2022-08-29: One way that does not work is to call Query.StartAfter()
// and then use Query.Serialize to hand the status over to the next
// backfill cycle.
func (d *Dialect) backfillPoint(doc *firestore.DocumentSnapshot) (*consistentPoint, error) {
topCollection := doc.Ref.Parent
for topCollection.Parent != nil {
// collection -> parent doc -> parent collection
topCollection = topCollection.Parent.Parent
}
relativePath := fmt.Sprintf("documents/%s/%s",
topCollection.ID, doc.Ref.Path[len(topCollection.Path)+1:])
updateTime, err := d.docUpdatedAt(doc)
if err != nil {
return nil, err
}
return &consistentPoint{
BackfillID: relativePath,
Time: updateTime,
}, nil
}
// docUpdatedAt extracts a timestamp from the document.
func (d *Dialect) docUpdatedAt(doc *firestore.DocumentSnapshot) (time.Time, error) {
val, err := doc.DataAt(d.updatedAtProperty.Raw())
if err != nil {
return time.Time{}, errors.WithStack(err)
}
if t, ok := val.(time.Time); ok {
return t, nil
}
return time.Time{}, errors.Errorf("document missing %q property", d.updatedAtProperty.Raw())
}
// marshalDeletion creates a mutation to represent the deletion of the | key, err := json.Marshal([]string{id.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
return types.Mutation{
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
}, nil
}
func (d *Dialect) marshalMutation(
doc *firestore.DocumentSnapshot, updatedAt time.Time,
) (types.Mutation, error) {
dataMap := doc.Data()
// Allow the doc id to be baked into the mutation.
if d.docIDProperty != "" {
dataMap[d.docIDProperty] = doc.Ref.ID
}
data, err := json.Marshal(dataMap)
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
key, err := json.Marshal([]string{doc.Ref.ID})
if err != nil {
return types.Mutation{}, errors.WithStack(err)
}
// Create empty slices so that we never pass a null value into JS.
parentCollections := make([]string, 0)
parentDocIds := make([]string, 0)
for parentCollection := doc.Ref.Parent; parentCollection != nil; {
parentCollections = append(parentCollections, parentCollection.ID)
if parentCollection.Parent != nil {
parentDocIds = append(parentDocIds, parentCollection.Parent.ID)
parentCollection = parentCollection.Parent.Parent
} else {
break
}
}
// The timestamps are converted to values that are easy to wrap
// a JS Date around in the user script.
// https://pkg.go.dev/github.com/dop251/goja#hdr-Handling_of_time_Time
meta := map[string]any{
"createTime": doc.CreateTime.UnixNano() / 1e6,
"id": doc.Ref.ID,
"parentCollections": parentCollections,
"parentDocIds": parentDocIds,
"path": doc.Ref.Path,
"readTime": doc.ReadTime.UnixNano() / 1e6,
"updateTime": doc.UpdateTime.UnixNano() / 1e6,
}
return types.Mutation{
Data: data,
Key: key,
Time: hlc.New(updatedAt.UnixNano(), 0),
Meta: meta,
}, nil
}
// doRecurse, if configured, will load dynamic sub-collections of
// the given document.
func (d *Dialect) doRecurse(
ctx context.Context, doc *firestore.DocumentRef, events logical.Events,
) error {
it := doc.Collections(ctx)
for {
coll, err := it.Next()
if err == iterator.Done {
return nil
}
if err != nil {
return errors.Wrapf(err, "loading dynamic collections of %s", doc.Path)
}
if _, skip := d.recurseFilter.Get(ident.New(coll.ID)); skip {
continue
}
fork := *d
fork.query = coll.Query
fork.sourcePath = coll.Path
if err := events.Backfill(ctx, coll.Path, &fork); err != nil {
return errors.WithMessage(err, coll.Path)
}
}
}
// markProcessed records an incoming document as having been processed.
func (d *Dialect) markProcessed(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) error {
payload := processedPayload{UpdatedAt: ts}
data, err := json.Marshal(&payload)
if err != nil {
return errors.WithStack(err)
}
return d.memo.Put(ctx, d.pool, processedKey(doc), data)
}
// shouldProcess implements idempotent processing of document snapshots.
// It ensures that the update-time of any given document always
// advances.
func (d *Dialect) shouldProcess(
ctx context.Context, doc *firestore.DocumentRef, ts time.Time,
) (bool, error) {
if !d.idempotent {
return true, nil
}
data, err := d.memo.Get(ctx, d.pool, processedKey(doc))
if err != nil {
return false, err
}
// No data means we're seeing the document for the first time.
if data == nil {
log.Tracef("accepting document %s at %s", doc.ID, ts)
return true, nil
}
var payload processedPayload
if err := json.Unmarshal(data, &payload); err != nil {
return false, errors.WithStack(err)
}
if ts.After(payload.UpdatedAt) {
log.Tracef("accepting document %s at %s > %s", doc.ID, ts, payload.UpdatedAt)
return true, nil
}
log.Tracef("ignoring document %s at %s <= %s", doc.ID, ts, payload.UpdatedAt)
return false, nil
}
// processedPayload is used by markProcessed and shouldProcess.
type processedPayload struct {
UpdatedAt time.Time `json:"u,omitempty"`
}
// processedKey returns the memo key used by markProcessed and
// shouldProcess.
func processedKey(ref *firestore.DocumentRef) string {
return fmt.Sprintf("fs-doc-%s", ref.Path)
} | // specified document.
func marshalDeletion(id *firestore.DocumentRef, updatedAt time.Time) (types.Mutation, error) { | random_line_split |
test_query_performance.py | """
test jianbo's queries performance
"""
import os
import json
import sys
import re
import argparse
import codecs
from string import Template
query_template = Template("""
<query>
\t<number>$qid</number>
\t<text>$q_string</text>
</query>
""")
structure_template = Template("""
<parameters>
<index>$index</index>
<trecFormat>true</trecFormat>
<runID>$run_id</runID>
<count>$count</count>
$query_body
$rule
$stopper
$psr
</parameters>""")
index_para_template = Template("""
<parameters>
<index>$index_path</index>
<memory>$memory</memory>
$corpora
<stemmer><name>$stemmer</name></stemmer>
$fields
$stopper
</parameters>""")
corpus_template = Template("""
<corpus>
\t<path>$path</path>
\t<class>trectext</class>
</corpus>
""")
text_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<TEXT>$text</TEXT>$fields
</DOC>""")
class Query(object):
"""Base query class
"""
def __init__(self,qid,query_text):
self._qid = qid
self._text = query_text
self._text_struct = Text(query_text)
@property
def | (self):
return self._text_struct.raw_model()
@property
def text(self):
return "%s" %self._text
class ExpandedQuery(Query):
"""Queries with expansion
"""
def __init__(self,qid,query_text,para_lambda):
self._para_lambda = para_lambda
super(ExpandedQuery,self).__init__(qid,query_text)
self._expanding_model = None
def expand(self,expanding_term_weights):
self._expanding_model = Model(False,text_dict=expanding_term_weights)
@property
def expanding_model(self):
if not self._expanding_model:
raise RuntimeError("Not expanded yet!")
return self._expanding_model.model
@property
def para_lambda(self):
return self._para_lambda
class IndriQueryFactory(object):
"""Take in query related parameters for indri and
generate indri query file
"""
def __init__(self,count,rule=None,
use_stopper=False,date_when=None,
numeric_compare=None, psr=False):
self._count,self._rule,self._use_stopper,self._psr = count,rule,use_stopper,psr
if date_when:
if date_when not in ["dateafter","datebefore", "datebetween","dateequals"]:
raise ValueError("When value %s is not supported" %(date_when))
if numeric_compare is not None:
if numeric_compare not in ["less","greater","between","equals"]:
raise ValueError("Compare value %s is not supported" %(numeric_compare))
self._date_when,self._numeric_compare = date_when,numeric_compare
def _gene_query(self,file_path,queries,index,run_id,
date_value=None,numeric_value=None,
numeric_field_name=None,fbDocs=None,
fbTerms=None,fbOrigWeight=None):
query_body = ""
if self._rule is None:
rule = ""
else:
rule = "<rule>%s</rule>" %self._rule
if self._use_stopper:
stopper = "<stopper>\n"
stopwords = get_stopwords()
for stopword in stopwords:
stopper += "<word>%s</word>\n" %stopword
stopper += "</stopper>"
else:
stopper = ""
for qid in queries:
sinlge_query_data = queries[qid]
if isinstance(sinlge_query_data,Query):
original_text = re.sub("[^\w]"," ",sinlge_query_data.text)
if isinstance(sinlge_query_data,ExpandedQuery):
original_weight = sinlge_query_data.para_lambda
expanding_weight = 1-sinlge_query_data.para_lambda
expanding_string = ""
for term in sinlge_query_data.expanding_model:
term_weight = sinlge_query_data.expanding_model[term]
expanding_string += "%f %s " %(term_weight,term)
if len(expanding_string) == 0:
q_string = "#combine( %s )" %(original_text)
else:
q_string = "#weight( %f #combine( %s) %f #weight( %s ) )" \
%(original_weight,original_text,
expanding_weight,expanding_string)
else:
q_string = "#combine( %s )" %(original_text)
elif isinstance(sinlge_query_data,str) or isinstance(sinlge_query_data,unicode):
q_string = sinlge_query_data.lower()
q_string = re.sub("[^\w]"," ",q_string)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,list):
q_string = " ".join(sinlge_query_data)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,dict):
q_string = ""
for term in sinlge_query_data:
weight = sinlge_query_data[term]
q_string += "%f %s " %(weight,term)
q_string = "#weight( %s )" %(q_string)
else:
raise TypeError("unsupported value type %s for query data" %type(sinlge_query_data))
if self._date_when:
q_string = "#filreq( #%s( %s ) %s)" %(self._date_when,date_value,
q_string)
if self._numeric_compare is not None:
q_string = "#filreq( #%s( %s %d ) %s)" %(self._numeric_compare,
numeric_field_name,numeric_value,q_string)
psr = ""
if self._psr :
if not (fbDocs and fbTerms and fbOrigWeight):
raise ValueError("need valid fbDocs and fbTerms and fbOrigWeight!")
psr += "<fbDocs>%d</fbDocs>" %(fbDocs)
psr += "<fbTerms>%d</fbTerms>" %(fbTerms)
psr += "<fbOrigWeight>%f</fbOrigWeight>" %(fbOrigWeight)
query_body+=query_template.substitute(
qid=qid,q_string=q_string)
with codecs.open(file_path, 'w','utf-8') as f:
f.write(structure_template.substitute(query_body=query_body,index=index,
run_id=run_id,count=str(self._count),
rule=rule,stopper=stopper,psr=psr))
def gene_query_with_date_filter(self,file_path,queries,index,
date_value,run_id="test",fbDocs=None,
fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id=run_id,date_value=date_value,
fbDocs=fbDocs,fbTerms=fbTerms,fbOrigWeight=fbOrigWeight)
def gene_query_with_numeric_filter(self,file_path,queries,index,
numeric_value,numeric_field_name,run_id="test",
fbDocs=None,fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id,numeric_value=numeric_value,
numeric_field_name=numeric_field_name,fbDocs=fbDocs,fbTerms=fbTerms,
fbOrigWeight=fbOrigWeight)
def gene_normal_query(self,file_path,queries,index,run_id="test"):
self._gene_query(file_path,queries,index,run_id=run_id)
#
#-------------------before are utility code----------------------------
#-------------------below are the code that SHOULD be modified---------
#
def read_qrels(eval_dir):
qrel_file = os.path.join(eval_dir,"qrels.txt")
qrels = {}
with open(qrel_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
jud = max(0,int(parts[3]) )
if qid not in qrels:
qrels[qid] = {}
qrels[qid][docid] = jud
return qrels
def read_query_file(query_file,qrels):
queries = {}
data = json.load(open(query_file))
for single_query in data:
qid = single_query["topid"]
if qid not in qrels:
continue
# text = re.sub("[^\w ]+"," ",single_query["title"])
# queries[qid] = text
queries[qid] = single_query["title"]
return queries
def build_temp_query(queries,temp_query_para_file,index_dir):
retrieval_method = "method:f2exp,s:0.1"
temp_query_builder = IndriQueryFactory(count=100,
rule=retrieval_method)
temp_query_builder.gene_normal_query(temp_query_para_file,
queries,index_dir)
def run_query(temp_query_para_file,temp_result_file):
os.system("IndriRunQuery %s > %s" %(temp_query_para_file,temp_result_file))
def evaluate_temp_result(temp_result_file,qrels):
performance = {}
with open(temp_result_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
if qid not in qrels:
# print "query %s does not have judgement" %(qid)
continue
else:
if qid not in performance:
performance[qid] = .0
if docid in qrels[qid]:
performance[qid] += qrels[qid][docid]*1.0/100
final_performance = sum(performance.values())*1.0/len(qrels)
print "the number of queries evaluated %d" %(len(qrels))
print "the final performance is %f" %(final_performance)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("query_file")
parser.add_argument("--index_dir","-ir",default="/infolab/headnode2/lukuang/2016-rts/data/incremental_index")
parser.add_argument("--eval_dir","-er",default="/infolab/node4/lukuang/2015-RTS/src/2016/eval")
args=parser.parse_args()
temp_dir = "/tmp"
prefix = "jianbo_mb_test_"
temp_query_para_file = os.path.join(temp_dir,prefix+"temp_query_para")
temp_result_file = os.path.join(temp_dir,prefix+"temp_result")
qrels = read_qrels(args.eval_dir)
"Got qrels"
queries = read_query_file(args.query_file,qrels)
print "Got queries"
build_temp_query(queries,temp_query_para_file,args.index_dir)
print "Built Indri queries"
run_query(temp_query_para_file,temp_result_file)
print "Ran query and got results"
evaluate_temp_result(temp_result_file,qrels)
if __name__=="__main__":
main()
| original_model | identifier_name |
test_query_performance.py | """
test jianbo's queries performance
"""
import os
import json
import sys
import re
import argparse
import codecs
from string import Template
query_template = Template("""
<query>
\t<number>$qid</number>
\t<text>$q_string</text>
</query>
""")
structure_template = Template("""
<parameters>
<index>$index</index>
<trecFormat>true</trecFormat>
<runID>$run_id</runID>
<count>$count</count>
$query_body
$rule
$stopper
$psr
</parameters>""")
index_para_template = Template("""
<parameters>
<index>$index_path</index>
<memory>$memory</memory>
$corpora
<stemmer><name>$stemmer</name></stemmer>
$fields
$stopper
</parameters>""")
corpus_template = Template("""
<corpus>
\t<path>$path</path>
\t<class>trectext</class>
</corpus>
""")
text_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<TEXT>$text</TEXT>$fields
</DOC>""")
class Query(object):
"""Base query class
"""
def __init__(self,qid,query_text):
self._qid = qid
self._text = query_text
self._text_struct = Text(query_text)
@property
def original_model(self):
return self._text_struct.raw_model()
@property
def text(self):
return "%s" %self._text
class ExpandedQuery(Query):
"""Queries with expansion
"""
def __init__(self,qid,query_text,para_lambda):
self._para_lambda = para_lambda
super(ExpandedQuery,self).__init__(qid,query_text)
self._expanding_model = None
def expand(self,expanding_term_weights):
self._expanding_model = Model(False,text_dict=expanding_term_weights)
@property
def expanding_model(self):
if not self._expanding_model:
raise RuntimeError("Not expanded yet!")
return self._expanding_model.model
@property
def para_lambda(self):
return self._para_lambda
class IndriQueryFactory(object):
"""Take in query related parameters for indri and
generate indri query file
"""
def __init__(self,count,rule=None,
use_stopper=False,date_when=None,
numeric_compare=None, psr=False):
self._count,self._rule,self._use_stopper,self._psr = count,rule,use_stopper,psr
if date_when:
if date_when not in ["dateafter","datebefore", "datebetween","dateequals"]:
raise ValueError("When value %s is not supported" %(date_when))
if numeric_compare is not None:
if numeric_compare not in ["less","greater","between","equals"]:
raise ValueError("Compare value %s is not supported" %(numeric_compare))
self._date_when,self._numeric_compare = date_when,numeric_compare
def _gene_query(self,file_path,queries,index,run_id,
date_value=None,numeric_value=None,
numeric_field_name=None,fbDocs=None,
fbTerms=None,fbOrigWeight=None):
query_body = ""
if self._rule is None:
rule = ""
else:
rule = "<rule>%s</rule>" %self._rule
if self._use_stopper:
stopper = "<stopper>\n"
stopwords = get_stopwords()
for stopword in stopwords:
stopper += "<word>%s</word>\n" %stopword
stopper += "</stopper>"
else:
stopper = ""
for qid in queries:
sinlge_query_data = queries[qid]
if isinstance(sinlge_query_data,Query):
original_text = re.sub("[^\w]"," ",sinlge_query_data.text)
if isinstance(sinlge_query_data,ExpandedQuery):
original_weight = sinlge_query_data.para_lambda
expanding_weight = 1-sinlge_query_data.para_lambda
expanding_string = ""
for term in sinlge_query_data.expanding_model:
term_weight = sinlge_query_data.expanding_model[term]
expanding_string += "%f %s " %(term_weight,term)
if len(expanding_string) == 0:
q_string = "#combine( %s )" %(original_text)
else:
q_string = "#weight( %f #combine( %s) %f #weight( %s ) )" \
%(original_weight,original_text,
expanding_weight,expanding_string)
else:
q_string = "#combine( %s )" %(original_text)
elif isinstance(sinlge_query_data,str) or isinstance(sinlge_query_data,unicode):
q_string = sinlge_query_data.lower()
q_string = re.sub("[^\w]"," ",q_string)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,list):
q_string = " ".join(sinlge_query_data)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,dict):
q_string = ""
for term in sinlge_query_data:
weight = sinlge_query_data[term]
q_string += "%f %s " %(weight,term)
q_string = "#weight( %s )" %(q_string)
else:
raise TypeError("unsupported value type %s for query data" %type(sinlge_query_data))
if self._date_when:
q_string = "#filreq( #%s( %s ) %s)" %(self._date_when,date_value,
q_string)
if self._numeric_compare is not None:
q_string = "#filreq( #%s( %s %d ) %s)" %(self._numeric_compare,
numeric_field_name,numeric_value,q_string)
psr = ""
if self._psr :
if not (fbDocs and fbTerms and fbOrigWeight):
raise ValueError("need valid fbDocs and fbTerms and fbOrigWeight!")
psr += "<fbDocs>%d</fbDocs>" %(fbDocs)
psr += "<fbTerms>%d</fbTerms>" %(fbTerms)
psr += "<fbOrigWeight>%f</fbOrigWeight>" %(fbOrigWeight)
query_body+=query_template.substitute(
qid=qid,q_string=q_string)
with codecs.open(file_path, 'w','utf-8') as f:
f.write(structure_template.substitute(query_body=query_body,index=index,
run_id=run_id,count=str(self._count),
rule=rule,stopper=stopper,psr=psr))
def gene_query_with_date_filter(self,file_path,queries,index,
date_value,run_id="test",fbDocs=None,
fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id=run_id,date_value=date_value,
fbDocs=fbDocs,fbTerms=fbTerms,fbOrigWeight=fbOrigWeight)
def gene_query_with_numeric_filter(self,file_path,queries,index,
numeric_value,numeric_field_name,run_id="test",
fbDocs=None,fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id,numeric_value=numeric_value,
numeric_field_name=numeric_field_name,fbDocs=fbDocs,fbTerms=fbTerms,
fbOrigWeight=fbOrigWeight)
def gene_normal_query(self,file_path,queries,index,run_id="test"):
self._gene_query(file_path,queries,index,run_id=run_id)
#
#-------------------before are utility code----------------------------
#-------------------below are the code that SHOULD be modified---------
#
def read_qrels(eval_dir):
|
def read_query_file(query_file,qrels):
queries = {}
data = json.load(open(query_file))
for single_query in data:
qid = single_query["topid"]
if qid not in qrels:
continue
# text = re.sub("[^\w ]+"," ",single_query["title"])
# queries[qid] = text
queries[qid] = single_query["title"]
return queries
def build_temp_query(queries,temp_query_para_file,index_dir):
retrieval_method = "method:f2exp,s:0.1"
temp_query_builder = IndriQueryFactory(count=100,
rule=retrieval_method)
temp_query_builder.gene_normal_query(temp_query_para_file,
queries,index_dir)
def run_query(temp_query_para_file,temp_result_file):
os.system("IndriRunQuery %s > %s" %(temp_query_para_file,temp_result_file))
def evaluate_temp_result(temp_result_file,qrels):
performance = {}
with open(temp_result_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
if qid not in qrels:
# print "query %s does not have judgement" %(qid)
continue
else:
if qid not in performance:
performance[qid] = .0
if docid in qrels[qid]:
performance[qid] += qrels[qid][docid]*1.0/100
final_performance = sum(performance.values())*1.0/len(qrels)
print "the number of queries evaluated %d" %(len(qrels))
print "the final performance is %f" %(final_performance)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("query_file")
parser.add_argument("--index_dir","-ir",default="/infolab/headnode2/lukuang/2016-rts/data/incremental_index")
parser.add_argument("--eval_dir","-er",default="/infolab/node4/lukuang/2015-RTS/src/2016/eval")
args=parser.parse_args()
temp_dir = "/tmp"
prefix = "jianbo_mb_test_"
temp_query_para_file = os.path.join(temp_dir,prefix+"temp_query_para")
temp_result_file = os.path.join(temp_dir,prefix+"temp_result")
qrels = read_qrels(args.eval_dir)
"Got qrels"
queries = read_query_file(args.query_file,qrels)
print "Got queries"
build_temp_query(queries,temp_query_para_file,args.index_dir)
print "Built Indri queries"
run_query(temp_query_para_file,temp_result_file)
print "Ran query and got results"
evaluate_temp_result(temp_result_file,qrels)
if __name__=="__main__":
main()
| qrel_file = os.path.join(eval_dir,"qrels.txt")
qrels = {}
with open(qrel_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
jud = max(0,int(parts[3]) )
if qid not in qrels:
qrels[qid] = {}
qrels[qid][docid] = jud
return qrels | identifier_body |
test_query_performance.py | """
test jianbo's queries performance
"""
import os
import json
import sys
import re | query_template = Template("""
<query>
\t<number>$qid</number>
\t<text>$q_string</text>
</query>
""")
structure_template = Template("""
<parameters>
<index>$index</index>
<trecFormat>true</trecFormat>
<runID>$run_id</runID>
<count>$count</count>
$query_body
$rule
$stopper
$psr
</parameters>""")
index_para_template = Template("""
<parameters>
<index>$index_path</index>
<memory>$memory</memory>
$corpora
<stemmer><name>$stemmer</name></stemmer>
$fields
$stopper
</parameters>""")
corpus_template = Template("""
<corpus>
\t<path>$path</path>
\t<class>trectext</class>
</corpus>
""")
text_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<TEXT>$text</TEXT>$fields
</DOC>""")
class Query(object):
"""Base query class
"""
def __init__(self,qid,query_text):
self._qid = qid
self._text = query_text
self._text_struct = Text(query_text)
@property
def original_model(self):
return self._text_struct.raw_model()
@property
def text(self):
return "%s" %self._text
class ExpandedQuery(Query):
"""Queries with expansion
"""
def __init__(self,qid,query_text,para_lambda):
self._para_lambda = para_lambda
super(ExpandedQuery,self).__init__(qid,query_text)
self._expanding_model = None
def expand(self,expanding_term_weights):
self._expanding_model = Model(False,text_dict=expanding_term_weights)
@property
def expanding_model(self):
if not self._expanding_model:
raise RuntimeError("Not expanded yet!")
return self._expanding_model.model
@property
def para_lambda(self):
return self._para_lambda
class IndriQueryFactory(object):
"""Take in query related parameters for indri and
generate indri query file
"""
def __init__(self,count,rule=None,
use_stopper=False,date_when=None,
numeric_compare=None, psr=False):
self._count,self._rule,self._use_stopper,self._psr = count,rule,use_stopper,psr
if date_when:
if date_when not in ["dateafter","datebefore", "datebetween","dateequals"]:
raise ValueError("When value %s is not supported" %(date_when))
if numeric_compare is not None:
if numeric_compare not in ["less","greater","between","equals"]:
raise ValueError("Compare value %s is not supported" %(numeric_compare))
self._date_when,self._numeric_compare = date_when,numeric_compare
def _gene_query(self,file_path,queries,index,run_id,
date_value=None,numeric_value=None,
numeric_field_name=None,fbDocs=None,
fbTerms=None,fbOrigWeight=None):
query_body = ""
if self._rule is None:
rule = ""
else:
rule = "<rule>%s</rule>" %self._rule
if self._use_stopper:
stopper = "<stopper>\n"
stopwords = get_stopwords()
for stopword in stopwords:
stopper += "<word>%s</word>\n" %stopword
stopper += "</stopper>"
else:
stopper = ""
for qid in queries:
sinlge_query_data = queries[qid]
if isinstance(sinlge_query_data,Query):
original_text = re.sub("[^\w]"," ",sinlge_query_data.text)
if isinstance(sinlge_query_data,ExpandedQuery):
original_weight = sinlge_query_data.para_lambda
expanding_weight = 1-sinlge_query_data.para_lambda
expanding_string = ""
for term in sinlge_query_data.expanding_model:
term_weight = sinlge_query_data.expanding_model[term]
expanding_string += "%f %s " %(term_weight,term)
if len(expanding_string) == 0:
q_string = "#combine( %s )" %(original_text)
else:
q_string = "#weight( %f #combine( %s) %f #weight( %s ) )" \
%(original_weight,original_text,
expanding_weight,expanding_string)
else:
q_string = "#combine( %s )" %(original_text)
elif isinstance(sinlge_query_data,str) or isinstance(sinlge_query_data,unicode):
q_string = sinlge_query_data.lower()
q_string = re.sub("[^\w]"," ",q_string)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,list):
q_string = " ".join(sinlge_query_data)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,dict):
q_string = ""
for term in sinlge_query_data:
weight = sinlge_query_data[term]
q_string += "%f %s " %(weight,term)
q_string = "#weight( %s )" %(q_string)
else:
raise TypeError("unsupported value type %s for query data" %type(sinlge_query_data))
if self._date_when:
q_string = "#filreq( #%s( %s ) %s)" %(self._date_when,date_value,
q_string)
if self._numeric_compare is not None:
q_string = "#filreq( #%s( %s %d ) %s)" %(self._numeric_compare,
numeric_field_name,numeric_value,q_string)
psr = ""
if self._psr :
if not (fbDocs and fbTerms and fbOrigWeight):
raise ValueError("need valid fbDocs and fbTerms and fbOrigWeight!")
psr += "<fbDocs>%d</fbDocs>" %(fbDocs)
psr += "<fbTerms>%d</fbTerms>" %(fbTerms)
psr += "<fbOrigWeight>%f</fbOrigWeight>" %(fbOrigWeight)
query_body+=query_template.substitute(
qid=qid,q_string=q_string)
with codecs.open(file_path, 'w','utf-8') as f:
f.write(structure_template.substitute(query_body=query_body,index=index,
run_id=run_id,count=str(self._count),
rule=rule,stopper=stopper,psr=psr))
def gene_query_with_date_filter(self,file_path,queries,index,
date_value,run_id="test",fbDocs=None,
fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id=run_id,date_value=date_value,
fbDocs=fbDocs,fbTerms=fbTerms,fbOrigWeight=fbOrigWeight)
def gene_query_with_numeric_filter(self,file_path,queries,index,
numeric_value,numeric_field_name,run_id="test",
fbDocs=None,fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id,numeric_value=numeric_value,
numeric_field_name=numeric_field_name,fbDocs=fbDocs,fbTerms=fbTerms,
fbOrigWeight=fbOrigWeight)
def gene_normal_query(self,file_path,queries,index,run_id="test"):
self._gene_query(file_path,queries,index,run_id=run_id)
#
#-------------------before are utility code----------------------------
#-------------------below are the code that SHOULD be modified---------
#
def read_qrels(eval_dir):
qrel_file = os.path.join(eval_dir,"qrels.txt")
qrels = {}
with open(qrel_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
jud = max(0,int(parts[3]) )
if qid not in qrels:
qrels[qid] = {}
qrels[qid][docid] = jud
return qrels
def read_query_file(query_file,qrels):
queries = {}
data = json.load(open(query_file))
for single_query in data:
qid = single_query["topid"]
if qid not in qrels:
continue
# text = re.sub("[^\w ]+"," ",single_query["title"])
# queries[qid] = text
queries[qid] = single_query["title"]
return queries
def build_temp_query(queries,temp_query_para_file,index_dir):
retrieval_method = "method:f2exp,s:0.1"
temp_query_builder = IndriQueryFactory(count=100,
rule=retrieval_method)
temp_query_builder.gene_normal_query(temp_query_para_file,
queries,index_dir)
def run_query(temp_query_para_file,temp_result_file):
os.system("IndriRunQuery %s > %s" %(temp_query_para_file,temp_result_file))
def evaluate_temp_result(temp_result_file,qrels):
performance = {}
with open(temp_result_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
if qid not in qrels:
# print "query %s does not have judgement" %(qid)
continue
else:
if qid not in performance:
performance[qid] = .0
if docid in qrels[qid]:
performance[qid] += qrels[qid][docid]*1.0/100
final_performance = sum(performance.values())*1.0/len(qrels)
print "the number of queries evaluated %d" %(len(qrels))
print "the final performance is %f" %(final_performance)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("query_file")
parser.add_argument("--index_dir","-ir",default="/infolab/headnode2/lukuang/2016-rts/data/incremental_index")
parser.add_argument("--eval_dir","-er",default="/infolab/node4/lukuang/2015-RTS/src/2016/eval")
args=parser.parse_args()
temp_dir = "/tmp"
prefix = "jianbo_mb_test_"
temp_query_para_file = os.path.join(temp_dir,prefix+"temp_query_para")
temp_result_file = os.path.join(temp_dir,prefix+"temp_result")
qrels = read_qrels(args.eval_dir)
"Got qrels"
queries = read_query_file(args.query_file,qrels)
print "Got queries"
build_temp_query(queries,temp_query_para_file,args.index_dir)
print "Built Indri queries"
run_query(temp_query_para_file,temp_result_file)
print "Ran query and got results"
evaluate_temp_result(temp_result_file,qrels)
if __name__=="__main__":
main() | import argparse
import codecs
from string import Template
| random_line_split |
test_query_performance.py | """
test jianbo's queries performance
"""
import os
import json
import sys
import re
import argparse
import codecs
from string import Template
query_template = Template("""
<query>
\t<number>$qid</number>
\t<text>$q_string</text>
</query>
""")
structure_template = Template("""
<parameters>
<index>$index</index>
<trecFormat>true</trecFormat>
<runID>$run_id</runID>
<count>$count</count>
$query_body
$rule
$stopper
$psr
</parameters>""")
index_para_template = Template("""
<parameters>
<index>$index_path</index>
<memory>$memory</memory>
$corpora
<stemmer><name>$stemmer</name></stemmer>
$fields
$stopper
</parameters>""")
corpus_template = Template("""
<corpus>
\t<path>$path</path>
\t<class>trectext</class>
</corpus>
""")
text_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<TEXT>$text</TEXT>$fields
</DOC>""")
class Query(object):
"""Base query class
"""
def __init__(self,qid,query_text):
self._qid = qid
self._text = query_text
self._text_struct = Text(query_text)
@property
def original_model(self):
return self._text_struct.raw_model()
@property
def text(self):
return "%s" %self._text
class ExpandedQuery(Query):
"""Queries with expansion
"""
def __init__(self,qid,query_text,para_lambda):
self._para_lambda = para_lambda
super(ExpandedQuery,self).__init__(qid,query_text)
self._expanding_model = None
def expand(self,expanding_term_weights):
self._expanding_model = Model(False,text_dict=expanding_term_weights)
@property
def expanding_model(self):
if not self._expanding_model:
raise RuntimeError("Not expanded yet!")
return self._expanding_model.model
@property
def para_lambda(self):
return self._para_lambda
class IndriQueryFactory(object):
"""Take in query related parameters for indri and
generate indri query file
"""
def __init__(self,count,rule=None,
use_stopper=False,date_when=None,
numeric_compare=None, psr=False):
self._count,self._rule,self._use_stopper,self._psr = count,rule,use_stopper,psr
if date_when:
if date_when not in ["dateafter","datebefore", "datebetween","dateequals"]:
raise ValueError("When value %s is not supported" %(date_when))
if numeric_compare is not None:
if numeric_compare not in ["less","greater","between","equals"]:
raise ValueError("Compare value %s is not supported" %(numeric_compare))
self._date_when,self._numeric_compare = date_when,numeric_compare
def _gene_query(self,file_path,queries,index,run_id,
date_value=None,numeric_value=None,
numeric_field_name=None,fbDocs=None,
fbTerms=None,fbOrigWeight=None):
query_body = ""
if self._rule is None:
rule = ""
else:
rule = "<rule>%s</rule>" %self._rule
if self._use_stopper:
stopper = "<stopper>\n"
stopwords = get_stopwords()
for stopword in stopwords:
|
stopper += "</stopper>"
else:
stopper = ""
for qid in queries:
sinlge_query_data = queries[qid]
if isinstance(sinlge_query_data,Query):
original_text = re.sub("[^\w]"," ",sinlge_query_data.text)
if isinstance(sinlge_query_data,ExpandedQuery):
original_weight = sinlge_query_data.para_lambda
expanding_weight = 1-sinlge_query_data.para_lambda
expanding_string = ""
for term in sinlge_query_data.expanding_model:
term_weight = sinlge_query_data.expanding_model[term]
expanding_string += "%f %s " %(term_weight,term)
if len(expanding_string) == 0:
q_string = "#combine( %s )" %(original_text)
else:
q_string = "#weight( %f #combine( %s) %f #weight( %s ) )" \
%(original_weight,original_text,
expanding_weight,expanding_string)
else:
q_string = "#combine( %s )" %(original_text)
elif isinstance(sinlge_query_data,str) or isinstance(sinlge_query_data,unicode):
q_string = sinlge_query_data.lower()
q_string = re.sub("[^\w]"," ",q_string)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,list):
q_string = " ".join(sinlge_query_data)
q_string = "#combine( %s )" %(q_string)
elif isinstance(sinlge_query_data,dict):
q_string = ""
for term in sinlge_query_data:
weight = sinlge_query_data[term]
q_string += "%f %s " %(weight,term)
q_string = "#weight( %s )" %(q_string)
else:
raise TypeError("unsupported value type %s for query data" %type(sinlge_query_data))
if self._date_when:
q_string = "#filreq( #%s( %s ) %s)" %(self._date_when,date_value,
q_string)
if self._numeric_compare is not None:
q_string = "#filreq( #%s( %s %d ) %s)" %(self._numeric_compare,
numeric_field_name,numeric_value,q_string)
psr = ""
if self._psr :
if not (fbDocs and fbTerms and fbOrigWeight):
raise ValueError("need valid fbDocs and fbTerms and fbOrigWeight!")
psr += "<fbDocs>%d</fbDocs>" %(fbDocs)
psr += "<fbTerms>%d</fbTerms>" %(fbTerms)
psr += "<fbOrigWeight>%f</fbOrigWeight>" %(fbOrigWeight)
query_body+=query_template.substitute(
qid=qid,q_string=q_string)
with codecs.open(file_path, 'w','utf-8') as f:
f.write(structure_template.substitute(query_body=query_body,index=index,
run_id=run_id,count=str(self._count),
rule=rule,stopper=stopper,psr=psr))
def gene_query_with_date_filter(self,file_path,queries,index,
date_value,run_id="test",fbDocs=None,
fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id=run_id,date_value=date_value,
fbDocs=fbDocs,fbTerms=fbTerms,fbOrigWeight=fbOrigWeight)
def gene_query_with_numeric_filter(self,file_path,queries,index,
numeric_value,numeric_field_name,run_id="test",
fbDocs=None,fbTerms=None,fbOrigWeight=None):
self._gene_query(file_path,queries,index,run_id,numeric_value=numeric_value,
numeric_field_name=numeric_field_name,fbDocs=fbDocs,fbTerms=fbTerms,
fbOrigWeight=fbOrigWeight)
def gene_normal_query(self,file_path,queries,index,run_id="test"):
self._gene_query(file_path,queries,index,run_id=run_id)
#
#-------------------before are utility code----------------------------
#-------------------below are the code that SHOULD be modified---------
#
def read_qrels(eval_dir):
qrel_file = os.path.join(eval_dir,"qrels.txt")
qrels = {}
with open(qrel_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
jud = max(0,int(parts[3]) )
if qid not in qrels:
qrels[qid] = {}
qrels[qid][docid] = jud
return qrels
def read_query_file(query_file,qrels):
queries = {}
data = json.load(open(query_file))
for single_query in data:
qid = single_query["topid"]
if qid not in qrels:
continue
# text = re.sub("[^\w ]+"," ",single_query["title"])
# queries[qid] = text
queries[qid] = single_query["title"]
return queries
def build_temp_query(queries,temp_query_para_file,index_dir):
retrieval_method = "method:f2exp,s:0.1"
temp_query_builder = IndriQueryFactory(count=100,
rule=retrieval_method)
temp_query_builder.gene_normal_query(temp_query_para_file,
queries,index_dir)
def run_query(temp_query_para_file,temp_result_file):
os.system("IndriRunQuery %s > %s" %(temp_query_para_file,temp_result_file))
def evaluate_temp_result(temp_result_file,qrels):
performance = {}
with open(temp_result_file) as f:
for line in f:
line = line.rstrip()
parts = line.split()
qid = parts[0]
docid = parts[2]
if qid not in qrels:
# print "query %s does not have judgement" %(qid)
continue
else:
if qid not in performance:
performance[qid] = .0
if docid in qrels[qid]:
performance[qid] += qrels[qid][docid]*1.0/100
final_performance = sum(performance.values())*1.0/len(qrels)
print "the number of queries evaluated %d" %(len(qrels))
print "the final performance is %f" %(final_performance)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("query_file")
parser.add_argument("--index_dir","-ir",default="/infolab/headnode2/lukuang/2016-rts/data/incremental_index")
parser.add_argument("--eval_dir","-er",default="/infolab/node4/lukuang/2015-RTS/src/2016/eval")
args=parser.parse_args()
temp_dir = "/tmp"
prefix = "jianbo_mb_test_"
temp_query_para_file = os.path.join(temp_dir,prefix+"temp_query_para")
temp_result_file = os.path.join(temp_dir,prefix+"temp_result")
qrels = read_qrels(args.eval_dir)
"Got qrels"
queries = read_query_file(args.query_file,qrels)
print "Got queries"
build_temp_query(queries,temp_query_para_file,args.index_dir)
print "Built Indri queries"
run_query(temp_query_para_file,temp_result_file)
print "Ran query and got results"
evaluate_temp_result(temp_result_file,qrels)
if __name__=="__main__":
main()
| stopper += "<word>%s</word>\n" %stopword | conditional_block |
translator.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import icu
import locale
import os
import re
import shutil
import subprocess
import sys
import tex_math
import tzlocal
import unittest
RE_PO_FILE = re.compile(r'.*\.(.*)\.po$')
DEFAULT_PLURAL = 'nplurals=2; plural=n != 1'
class | :
class Argument:
def __init__(self, content, begin_pos, end_pos):
self.content = content
self.begin_pos = begin_pos
self.end_pos = end_pos
def __hash__(self):
return hash(self.content)
def __eq__(self, other):
return isinstance(other, Tag.Argument) and self.content == other.content
def __str__(self):
return self.content
def __init__(self, name, args, begin_pos, end_pos):
self.name = name
self.args = args
self.begin_pos = begin_pos
self.end_pos = end_pos
def __eq__(self, other):
return isinstance(other, Tag) and self.name == other.name and self.args == other.args
def __hash__(self):
return hash(self.name)+sum([hash(i) for i in self.args])
def __str__(self):
return self.name+''.join(['{'+str(i)+'}' for i in self.args])
class Document:
@staticmethod
def load(file):
return Document(file)
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def generate(self):
root, _ = os.path.splitext(self.name)
output = root+'.pdf'
subprocess.check_call(['xelatex', self.name])
return output
def find_tags(self, tag, nargs=1):
with open(self.name) as file:
doc = file.read()
line_number = [ 0 for i in range(len(doc)) ]
line = 1
for i in range(len(doc)):
line_number[i] = line
if doc[i] == '\n':
line += 1
texts = list()
pos = 0
def _find_matching_closing(i):
depth = 0
while True:
pc = doc[i-1] if i-1 > 0 else None
c = doc[i]
if c == '{' and pc != '\\':
depth += 1
elif c == '}' and pc != '\\':
depth -= 1
if depth == 0:
break
i += 1
return i
while True:
i = doc.find(tag, pos)
if i < 0:
break
args = []
start_tag = i
end = start = pos = start_tag+len(tag)
for n in range(nargs):
try:
end = _find_matching_closing(start)
except Exception as e:
raise Exception(
'Could not find end for tag that starts at line '+
'{line} ({text})'.format(
line=line_number[start],
text=(
doc[max(start-20, 0):start]+' --> '+
doc[start:min(start+20, len(doc))])
))
start += 1 #skip initial '{'
args.append(Tag.Argument(doc[start:end], start, end))
start = doc.find('{', end)
texts.append(Tag(tag, args, start_tag, end))
return texts
class Translation:
ALLOW_NOT_EXISTING = 1
TAG_MSGID = 'msgid'
TAG_MSGID_PLURAL = 'msgid_plural'
TAG_MSGSTR = 'msgstr'
TAG_MSGCTXT = 'msgctxt'
@staticmethod
def load(input_file, file, flags=0):
_, name = os.path.split(file)
name = RE_PO_FILE.match(name)
if not flags & Translation.ALLOW_NOT_EXISTING:
if not os.path.exists(file):
raise Exception('File "{}" does not exists'.format(file))
return Translation(input_file, name.group(1), file)
def __init__(self, input, locale, file=None):
self.input = input
self.locale = locale
self.file = file
self._parsed = None
self._icu_locale = icu.Locale.createFromName(self.locale)
self._icu_date_full = icu.DateFormat.createDateInstance(icu.DateFormat.FULL, self._icu_locale)
def __repr__(self):
return 'Translation(input={input}, locale={locale}, file={file})'.format(
input=self.input, locale=self.locale, file=self.file
)
def update(self, document):
if not self.file:
return False #nothing to update
template_name = self.generate_template(document)
sys.stderr.write('Updating translation {}...\n'.format(self))
if not os.path.exists(self.file):
sys.stderr.write('Generating new translation file: {}...\n'.format(self.file))
subprocess.check_call(['msginit', '-i', template_name,
'-l', self.locale, '-o', self.file])
return True
with open(self.file, 'rb') as f:
old = f.read()
sys.stderr.write('Merging template into translation file: {}...\n'.format(self.file))
new = subprocess.check_output(['msgmerge', self.file, template_name])
with open(self.file, 'wb') as f:
f.write(new)
return old != new
def translate(self, document):
sys.stderr.write('Translating {} to {}...\n'.format(document, self))
tags = self.find_all_tags(document)
tags += document.find_tags('\\today', 0)
tags += document.find_tags('\\formatdate', 3)
tags = sorted(tags, key=lambda x: x.begin_pos)
translated, ext = os.path.splitext(self.input)
translated += '.' + self.locale + ext
with open(document.name) as input_file:
doc = input_file.read()
sys.stderr.write('Generating file {}...\n'.format(translated))
with open(translated, 'w') as output:
elems = []
prev = 0
for i in tags:
elems.append(doc[prev:i.begin_pos])
elems.append(self.translate_tag(i))
prev = i.end_pos+1
elems.append(doc[prev:])
output.write(''.join(elems))
return Document.load(translated)
def find_all_tags(self, document):
tags = []
tags += document.find_tags('\\gettext')
tags += document.find_tags('\\pgettext', 2)
tags += document.find_tags('\\ngettext', 3)
tags += document.find_tags('\\npgettext', 4)
return tags
def generate_template(self, document):
with open(document.name) as doc:
doc = doc.read()
tags = self.find_all_tags(document)
tags = set(tags)
tags = sorted(tags, key=lambda x: x.begin_pos)
template_name, _ = os.path.splitext(document.name)
template_name = template_name+'.pot'
sys.stderr.write('Generating template "{}"...\n'.format(template_name))
with open(template_name, 'w') as template:
template.write('msgid ""\n')
template.write('msgstr ""\n')
#template.write('"Project-Id-Version: PACKAGE VERSION\\n"\n')
#template.write('"Report-Msgid-Bugs-To: \\n"\n')
##template.write('"POT-Creation-Date: 2014-05-03 22:18+0200\\n"\n')
#time = datetime.datetime.now(tz=tzlocal.get_localzone())
#time = time.strftime('%Y-%m-%d %H:%M%z')
#template.write('"POT-Creation-Date: {}\\n"\n'.format(time))
#template.write('"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n')
template.write('"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n')
#template.write('"Language-Team: LANGUAGE <LL@li.org>\\n"\n')
template.write('"Language: \\n"\n')
template.write('"MIME-Version: 1.0\\n"\n')
template.write('"Content-Type: text/plain; charset=UTF-8\\n"\n')
template.write('"Content-Transfer-Encoding: 8bit\\n"\n')
template.write('"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"\n')
template.write('\n')
for tag in tags:
def escape(s):
return s.replace('\\', '\\\\').replace('\n', '"\n"')
if tag.name == '\\gettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\ngettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[1].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\pgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\npgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[2].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
template.write('\n')
return template_name
def translate_tag(self, tag):
if tag.name == '\\gettext':
if not self.file:
return tag.args[0].content
else:
return self[(tag.args[0].content, None)][self.TAG_MSGSTR]
elif tag.name == '\\ngettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[0].content, tag.args[1].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[0].content, None)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[2].content, variants)
elif tag.name == '\\pgettext':
if not self.file:
return tag.args[1].content
return self[(tag.args[1].content, tag.args[0].content)][self.TAG_MSGSTR]
elif tag.name == '\\npgettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[1].content, tag.args[2].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[1].content, tag.args[0].content)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[3].content, variants)
elif tag.name == '\\today':
return self._icu_date_full.format(float(datetime.datetime.now().timestamp()))
elif tag.name == '\\formatdate':
return self._icu_date_full.format(float(datetime.datetime(*[int(i.content) for i in tag.args][::-1]).timestamp()))
else:
raise Exception('Unknown tag: '+tag.name)
def _ensure_parsed(self):
if not self.file:
raise Exception('Translation instance has no associated file')
if self._parsed:
return
sys.stderr.write('Parsing {}\n'.format(self.file))
with open(self.file) as f:
self._parsed = {}
def add_tr(tag):
key = (tag[self.TAG_MSGID], tag.get(self.TAG_MSGCTXT, None))
if key in self._parsed:
raise Exception('Key already exists: '+repr(key))
self._parsed[key] = tag
tag = {}
def add_tag(key, value):
value = value.replace('\n', '').replace('""', '').strip('"').replace('\\\\', '\\')
tag[key] = value
next_tag = None
for line in f:
if line.startswith('#'):
continue
if not line.startswith('"'):
if tag and line.startswith(self.TAG_MSGID+' '):
add_tr(tag)
tag = {}
if next_tag is not None:
add_tag(next_tag, next_tag_content)
sep = line.find(' ')
next_tag = line[:sep].strip()
next_tag_content = line[sep:].strip()
else:
next_tag_content += line
add_tag(next_tag, next_tag_content)
add_tr(tag)
if ('',None) in self._parsed:
self._header = {}
headers = self._parsed.pop(('',None))[self.TAG_MSGSTR].split('\\n')
for i in headers:
sep = i.find(':')
key = i[:sep].strip()
value = i[sep+1:].strip()
if key:
self._header[key] = value
def get_header(self, key):
self._ensure_parsed()
return self._header[key]
def __getitem__(self, key):
self._ensure_parsed()
key = (key[0], key[1])
return self._parsed[key]
def find_translations(input_file, directory=None, languages=None):
directory = directory or os.getcwd()
result = []
if languages:
base_name, _ = os.path.splitext(input_file)
for i in languages:
filename = os.path.join(directory, base_name+'.'+i+'.po')
result.append(Translation.load(input_file, filename, Translation.ALLOW_NOT_EXISTING))
else:
for i in os.listdir(directory):
if RE_PO_FILE.match(i):
result.append(Translation.load(input_file, os.path.join(directory, i)))
return result
def convert_plurals(description, n, variants):
try:
NPLURALS='nplurals'
PLURAL='plural'
desc = description.split(';')
nplurals = desc[0].strip()
if not nplurals.startswith(NPLURALS):
raise Exception('First element "{}" does not start with "{}"'.format(
nplurals, NPLURALS))
nplurals = nplurals[len(NPLURALS):]
nplurals = int(nplurals.strip('='))
plural = desc[1].strip()
if not plural.startswith(PLURAL):
raise Exception('Second element "{}" does not start with "{}"'.format(
plural, PLURAL))
plural = plural[len(PLURAL):]
plural = plural.strip('=')
plural = tex_math.Parser(plural)
plural.override_identifier('n', n)
plural = tex_math.Generator(plural.parse()).generate()
except Exception as e:
raise Exception('Plurals definition must be formed as "nplurals: <n>; plural=<rule>"')
if len(variants) != nplurals:
raise Exception('Invalid number of variants found (expected {}, but {} found)'.format(nplurals, len(variants)))
s = ''
ending = ''
s += '\\setcounter{_gettext_n}{'
s += plural
s += '}'
for i in range(nplurals-1):
s += '\\ifthenelse{\\equal{\\value{_gettext_n}}{'+str(i)+'}}{'
s += variants[i]
s += '}{'
ending += '}'
s += variants[-1]
s += ending
return s
return 'convert\_plurals('+description+','+msgid1+','+msgid2+','+n+')'
if __name__ == '__main__':
import unittest
unittest.main()
| Tag | identifier_name |
translator.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import icu
import locale
import os
import re
import shutil
import subprocess
import sys
import tex_math
import tzlocal
import unittest
RE_PO_FILE = re.compile(r'.*\.(.*)\.po$')
DEFAULT_PLURAL = 'nplurals=2; plural=n != 1'
class Tag:
class Argument:
def __init__(self, content, begin_pos, end_pos):
self.content = content
self.begin_pos = begin_pos
self.end_pos = end_pos
def __hash__(self):
return hash(self.content)
def __eq__(self, other):
return isinstance(other, Tag.Argument) and self.content == other.content
def __str__(self):
return self.content
def __init__(self, name, args, begin_pos, end_pos):
self.name = name
self.args = args
self.begin_pos = begin_pos
self.end_pos = end_pos
def __eq__(self, other):
return isinstance(other, Tag) and self.name == other.name and self.args == other.args
def __hash__(self):
return hash(self.name)+sum([hash(i) for i in self.args])
def __str__(self):
return self.name+''.join(['{'+str(i)+'}' for i in self.args])
class Document:
@staticmethod
def load(file):
return Document(file)
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def generate(self):
root, _ = os.path.splitext(self.name)
output = root+'.pdf'
subprocess.check_call(['xelatex', self.name])
return output
def find_tags(self, tag, nargs=1):
with open(self.name) as file:
doc = file.read()
line_number = [ 0 for i in range(len(doc)) ]
line = 1
for i in range(len(doc)):
line_number[i] = line
if doc[i] == '\n':
line += 1
texts = list()
pos = 0
def _find_matching_closing(i):
depth = 0
while True:
pc = doc[i-1] if i-1 > 0 else None
c = doc[i]
if c == '{' and pc != '\\':
depth += 1
elif c == '}' and pc != '\\':
depth -= 1
if depth == 0:
break
i += 1
return i
while True:
i = doc.find(tag, pos)
if i < 0:
break
args = []
start_tag = i
end = start = pos = start_tag+len(tag)
for n in range(nargs):
|
texts.append(Tag(tag, args, start_tag, end))
return texts
class Translation:
ALLOW_NOT_EXISTING = 1
TAG_MSGID = 'msgid'
TAG_MSGID_PLURAL = 'msgid_plural'
TAG_MSGSTR = 'msgstr'
TAG_MSGCTXT = 'msgctxt'
@staticmethod
def load(input_file, file, flags=0):
_, name = os.path.split(file)
name = RE_PO_FILE.match(name)
if not flags & Translation.ALLOW_NOT_EXISTING:
if not os.path.exists(file):
raise Exception('File "{}" does not exists'.format(file))
return Translation(input_file, name.group(1), file)
def __init__(self, input, locale, file=None):
self.input = input
self.locale = locale
self.file = file
self._parsed = None
self._icu_locale = icu.Locale.createFromName(self.locale)
self._icu_date_full = icu.DateFormat.createDateInstance(icu.DateFormat.FULL, self._icu_locale)
def __repr__(self):
return 'Translation(input={input}, locale={locale}, file={file})'.format(
input=self.input, locale=self.locale, file=self.file
)
def update(self, document):
if not self.file:
return False #nothing to update
template_name = self.generate_template(document)
sys.stderr.write('Updating translation {}...\n'.format(self))
if not os.path.exists(self.file):
sys.stderr.write('Generating new translation file: {}...\n'.format(self.file))
subprocess.check_call(['msginit', '-i', template_name,
'-l', self.locale, '-o', self.file])
return True
with open(self.file, 'rb') as f:
old = f.read()
sys.stderr.write('Merging template into translation file: {}...\n'.format(self.file))
new = subprocess.check_output(['msgmerge', self.file, template_name])
with open(self.file, 'wb') as f:
f.write(new)
return old != new
def translate(self, document):
sys.stderr.write('Translating {} to {}...\n'.format(document, self))
tags = self.find_all_tags(document)
tags += document.find_tags('\\today', 0)
tags += document.find_tags('\\formatdate', 3)
tags = sorted(tags, key=lambda x: x.begin_pos)
translated, ext = os.path.splitext(self.input)
translated += '.' + self.locale + ext
with open(document.name) as input_file:
doc = input_file.read()
sys.stderr.write('Generating file {}...\n'.format(translated))
with open(translated, 'w') as output:
elems = []
prev = 0
for i in tags:
elems.append(doc[prev:i.begin_pos])
elems.append(self.translate_tag(i))
prev = i.end_pos+1
elems.append(doc[prev:])
output.write(''.join(elems))
return Document.load(translated)
def find_all_tags(self, document):
tags = []
tags += document.find_tags('\\gettext')
tags += document.find_tags('\\pgettext', 2)
tags += document.find_tags('\\ngettext', 3)
tags += document.find_tags('\\npgettext', 4)
return tags
def generate_template(self, document):
with open(document.name) as doc:
doc = doc.read()
tags = self.find_all_tags(document)
tags = set(tags)
tags = sorted(tags, key=lambda x: x.begin_pos)
template_name, _ = os.path.splitext(document.name)
template_name = template_name+'.pot'
sys.stderr.write('Generating template "{}"...\n'.format(template_name))
with open(template_name, 'w') as template:
template.write('msgid ""\n')
template.write('msgstr ""\n')
#template.write('"Project-Id-Version: PACKAGE VERSION\\n"\n')
#template.write('"Report-Msgid-Bugs-To: \\n"\n')
##template.write('"POT-Creation-Date: 2014-05-03 22:18+0200\\n"\n')
#time = datetime.datetime.now(tz=tzlocal.get_localzone())
#time = time.strftime('%Y-%m-%d %H:%M%z')
#template.write('"POT-Creation-Date: {}\\n"\n'.format(time))
#template.write('"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n')
template.write('"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n')
#template.write('"Language-Team: LANGUAGE <LL@li.org>\\n"\n')
template.write('"Language: \\n"\n')
template.write('"MIME-Version: 1.0\\n"\n')
template.write('"Content-Type: text/plain; charset=UTF-8\\n"\n')
template.write('"Content-Transfer-Encoding: 8bit\\n"\n')
template.write('"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"\n')
template.write('\n')
for tag in tags:
def escape(s):
return s.replace('\\', '\\\\').replace('\n', '"\n"')
if tag.name == '\\gettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\ngettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[1].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\pgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\npgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[2].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
template.write('\n')
return template_name
def translate_tag(self, tag):
if tag.name == '\\gettext':
if not self.file:
return tag.args[0].content
else:
return self[(tag.args[0].content, None)][self.TAG_MSGSTR]
elif tag.name == '\\ngettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[0].content, tag.args[1].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[0].content, None)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[2].content, variants)
elif tag.name == '\\pgettext':
if not self.file:
return tag.args[1].content
return self[(tag.args[1].content, tag.args[0].content)][self.TAG_MSGSTR]
elif tag.name == '\\npgettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[1].content, tag.args[2].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[1].content, tag.args[0].content)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[3].content, variants)
elif tag.name == '\\today':
return self._icu_date_full.format(float(datetime.datetime.now().timestamp()))
elif tag.name == '\\formatdate':
return self._icu_date_full.format(float(datetime.datetime(*[int(i.content) for i in tag.args][::-1]).timestamp()))
else:
raise Exception('Unknown tag: '+tag.name)
def _ensure_parsed(self):
if not self.file:
raise Exception('Translation instance has no associated file')
if self._parsed:
return
sys.stderr.write('Parsing {}\n'.format(self.file))
with open(self.file) as f:
self._parsed = {}
def add_tr(tag):
key = (tag[self.TAG_MSGID], tag.get(self.TAG_MSGCTXT, None))
if key in self._parsed:
raise Exception('Key already exists: '+repr(key))
self._parsed[key] = tag
tag = {}
def add_tag(key, value):
value = value.replace('\n', '').replace('""', '').strip('"').replace('\\\\', '\\')
tag[key] = value
next_tag = None
for line in f:
if line.startswith('#'):
continue
if not line.startswith('"'):
if tag and line.startswith(self.TAG_MSGID+' '):
add_tr(tag)
tag = {}
if next_tag is not None:
add_tag(next_tag, next_tag_content)
sep = line.find(' ')
next_tag = line[:sep].strip()
next_tag_content = line[sep:].strip()
else:
next_tag_content += line
add_tag(next_tag, next_tag_content)
add_tr(tag)
if ('',None) in self._parsed:
self._header = {}
headers = self._parsed.pop(('',None))[self.TAG_MSGSTR].split('\\n')
for i in headers:
sep = i.find(':')
key = i[:sep].strip()
value = i[sep+1:].strip()
if key:
self._header[key] = value
def get_header(self, key):
self._ensure_parsed()
return self._header[key]
def __getitem__(self, key):
self._ensure_parsed()
key = (key[0], key[1])
return self._parsed[key]
def find_translations(input_file, directory=None, languages=None):
directory = directory or os.getcwd()
result = []
if languages:
base_name, _ = os.path.splitext(input_file)
for i in languages:
filename = os.path.join(directory, base_name+'.'+i+'.po')
result.append(Translation.load(input_file, filename, Translation.ALLOW_NOT_EXISTING))
else:
for i in os.listdir(directory):
if RE_PO_FILE.match(i):
result.append(Translation.load(input_file, os.path.join(directory, i)))
return result
def convert_plurals(description, n, variants):
try:
NPLURALS='nplurals'
PLURAL='plural'
desc = description.split(';')
nplurals = desc[0].strip()
if not nplurals.startswith(NPLURALS):
raise Exception('First element "{}" does not start with "{}"'.format(
nplurals, NPLURALS))
nplurals = nplurals[len(NPLURALS):]
nplurals = int(nplurals.strip('='))
plural = desc[1].strip()
if not plural.startswith(PLURAL):
raise Exception('Second element "{}" does not start with "{}"'.format(
plural, PLURAL))
plural = plural[len(PLURAL):]
plural = plural.strip('=')
plural = tex_math.Parser(plural)
plural.override_identifier('n', n)
plural = tex_math.Generator(plural.parse()).generate()
except Exception as e:
raise Exception('Plurals definition must be formed as "nplurals: <n>; plural=<rule>"')
if len(variants) != nplurals:
raise Exception('Invalid number of variants found (expected {}, but {} found)'.format(nplurals, len(variants)))
s = ''
ending = ''
s += '\\setcounter{_gettext_n}{'
s += plural
s += '}'
for i in range(nplurals-1):
s += '\\ifthenelse{\\equal{\\value{_gettext_n}}{'+str(i)+'}}{'
s += variants[i]
s += '}{'
ending += '}'
s += variants[-1]
s += ending
return s
return 'convert\_plurals('+description+','+msgid1+','+msgid2+','+n+')'
if __name__ == '__main__':
import unittest
unittest.main()
| try:
end = _find_matching_closing(start)
except Exception as e:
raise Exception(
'Could not find end for tag that starts at line '+
'{line} ({text})'.format(
line=line_number[start],
text=(
doc[max(start-20, 0):start]+' --> '+
doc[start:min(start+20, len(doc))])
))
start += 1 #skip initial '{'
args.append(Tag.Argument(doc[start:end], start, end))
start = doc.find('{', end) | conditional_block |
translator.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import icu
import locale
import os
import re
import shutil
import subprocess
import sys
import tex_math
import tzlocal
import unittest
RE_PO_FILE = re.compile(r'.*\.(.*)\.po$')
DEFAULT_PLURAL = 'nplurals=2; plural=n != 1'
class Tag:
class Argument:
def __init__(self, content, begin_pos, end_pos):
self.content = content
self.begin_pos = begin_pos
self.end_pos = end_pos
def __hash__(self):
return hash(self.content)
def __eq__(self, other):
return isinstance(other, Tag.Argument) and self.content == other.content
def __str__(self):
return self.content
def __init__(self, name, args, begin_pos, end_pos):
self.name = name
self.args = args
self.begin_pos = begin_pos
self.end_pos = end_pos
def __eq__(self, other):
return isinstance(other, Tag) and self.name == other.name and self.args == other.args
def __hash__(self):
return hash(self.name)+sum([hash(i) for i in self.args])
def __str__(self):
return self.name+''.join(['{'+str(i)+'}' for i in self.args])
class Document:
@staticmethod
def load(file):
return Document(file)
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def generate(self):
root, _ = os.path.splitext(self.name)
output = root+'.pdf'
subprocess.check_call(['xelatex', self.name])
return output
def find_tags(self, tag, nargs=1):
with open(self.name) as file:
doc = file.read()
line_number = [ 0 for i in range(len(doc)) ]
line = 1
for i in range(len(doc)):
line_number[i] = line
if doc[i] == '\n':
line += 1
texts = list()
pos = 0
def _find_matching_closing(i):
depth = 0
while True:
pc = doc[i-1] if i-1 > 0 else None
c = doc[i]
if c == '{' and pc != '\\':
depth += 1
elif c == '}' and pc != '\\':
depth -= 1
if depth == 0:
break
i += 1
return i
while True:
i = doc.find(tag, pos)
if i < 0:
break
args = []
start_tag = i
end = start = pos = start_tag+len(tag)
for n in range(nargs):
try:
end = _find_matching_closing(start)
except Exception as e:
raise Exception(
'Could not find end for tag that starts at line '+
'{line} ({text})'.format(
line=line_number[start],
text=(
doc[max(start-20, 0):start]+' --> '+
doc[start:min(start+20, len(doc))])
))
start += 1 #skip initial '{'
args.append(Tag.Argument(doc[start:end], start, end))
start = doc.find('{', end)
texts.append(Tag(tag, args, start_tag, end))
return texts
class Translation:
ALLOW_NOT_EXISTING = 1
TAG_MSGID = 'msgid'
TAG_MSGID_PLURAL = 'msgid_plural'
TAG_MSGSTR = 'msgstr'
TAG_MSGCTXT = 'msgctxt'
@staticmethod
def load(input_file, file, flags=0):
_, name = os.path.split(file)
name = RE_PO_FILE.match(name)
if not flags & Translation.ALLOW_NOT_EXISTING:
if not os.path.exists(file):
raise Exception('File "{}" does not exists'.format(file))
return Translation(input_file, name.group(1), file)
def __init__(self, input, locale, file=None):
self.input = input
self.locale = locale
self.file = file
self._parsed = None
self._icu_locale = icu.Locale.createFromName(self.locale)
self._icu_date_full = icu.DateFormat.createDateInstance(icu.DateFormat.FULL, self._icu_locale)
def __repr__(self):
return 'Translation(input={input}, locale={locale}, file={file})'.format(
input=self.input, locale=self.locale, file=self.file
)
def update(self, document):
if not self.file:
return False #nothing to update | sys.stderr.write('Updating translation {}...\n'.format(self))
if not os.path.exists(self.file):
sys.stderr.write('Generating new translation file: {}...\n'.format(self.file))
subprocess.check_call(['msginit', '-i', template_name,
'-l', self.locale, '-o', self.file])
return True
with open(self.file, 'rb') as f:
old = f.read()
sys.stderr.write('Merging template into translation file: {}...\n'.format(self.file))
new = subprocess.check_output(['msgmerge', self.file, template_name])
with open(self.file, 'wb') as f:
f.write(new)
return old != new
def translate(self, document):
sys.stderr.write('Translating {} to {}...\n'.format(document, self))
tags = self.find_all_tags(document)
tags += document.find_tags('\\today', 0)
tags += document.find_tags('\\formatdate', 3)
tags = sorted(tags, key=lambda x: x.begin_pos)
translated, ext = os.path.splitext(self.input)
translated += '.' + self.locale + ext
with open(document.name) as input_file:
doc = input_file.read()
sys.stderr.write('Generating file {}...\n'.format(translated))
with open(translated, 'w') as output:
elems = []
prev = 0
for i in tags:
elems.append(doc[prev:i.begin_pos])
elems.append(self.translate_tag(i))
prev = i.end_pos+1
elems.append(doc[prev:])
output.write(''.join(elems))
return Document.load(translated)
def find_all_tags(self, document):
tags = []
tags += document.find_tags('\\gettext')
tags += document.find_tags('\\pgettext', 2)
tags += document.find_tags('\\ngettext', 3)
tags += document.find_tags('\\npgettext', 4)
return tags
def generate_template(self, document):
with open(document.name) as doc:
doc = doc.read()
tags = self.find_all_tags(document)
tags = set(tags)
tags = sorted(tags, key=lambda x: x.begin_pos)
template_name, _ = os.path.splitext(document.name)
template_name = template_name+'.pot'
sys.stderr.write('Generating template "{}"...\n'.format(template_name))
with open(template_name, 'w') as template:
template.write('msgid ""\n')
template.write('msgstr ""\n')
#template.write('"Project-Id-Version: PACKAGE VERSION\\n"\n')
#template.write('"Report-Msgid-Bugs-To: \\n"\n')
##template.write('"POT-Creation-Date: 2014-05-03 22:18+0200\\n"\n')
#time = datetime.datetime.now(tz=tzlocal.get_localzone())
#time = time.strftime('%Y-%m-%d %H:%M%z')
#template.write('"POT-Creation-Date: {}\\n"\n'.format(time))
#template.write('"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n')
template.write('"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n')
#template.write('"Language-Team: LANGUAGE <LL@li.org>\\n"\n')
template.write('"Language: \\n"\n')
template.write('"MIME-Version: 1.0\\n"\n')
template.write('"Content-Type: text/plain; charset=UTF-8\\n"\n')
template.write('"Content-Transfer-Encoding: 8bit\\n"\n')
template.write('"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"\n')
template.write('\n')
for tag in tags:
def escape(s):
return s.replace('\\', '\\\\').replace('\n', '"\n"')
if tag.name == '\\gettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\ngettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[1].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\pgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\npgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[2].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
template.write('\n')
return template_name
def translate_tag(self, tag):
if tag.name == '\\gettext':
if not self.file:
return tag.args[0].content
else:
return self[(tag.args[0].content, None)][self.TAG_MSGSTR]
elif tag.name == '\\ngettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[0].content, tag.args[1].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[0].content, None)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[2].content, variants)
elif tag.name == '\\pgettext':
if not self.file:
return tag.args[1].content
return self[(tag.args[1].content, tag.args[0].content)][self.TAG_MSGSTR]
elif tag.name == '\\npgettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[1].content, tag.args[2].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[1].content, tag.args[0].content)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[3].content, variants)
elif tag.name == '\\today':
return self._icu_date_full.format(float(datetime.datetime.now().timestamp()))
elif tag.name == '\\formatdate':
return self._icu_date_full.format(float(datetime.datetime(*[int(i.content) for i in tag.args][::-1]).timestamp()))
else:
raise Exception('Unknown tag: '+tag.name)
def _ensure_parsed(self):
if not self.file:
raise Exception('Translation instance has no associated file')
if self._parsed:
return
sys.stderr.write('Parsing {}\n'.format(self.file))
with open(self.file) as f:
self._parsed = {}
def add_tr(tag):
key = (tag[self.TAG_MSGID], tag.get(self.TAG_MSGCTXT, None))
if key in self._parsed:
raise Exception('Key already exists: '+repr(key))
self._parsed[key] = tag
tag = {}
def add_tag(key, value):
value = value.replace('\n', '').replace('""', '').strip('"').replace('\\\\', '\\')
tag[key] = value
next_tag = None
for line in f:
if line.startswith('#'):
continue
if not line.startswith('"'):
if tag and line.startswith(self.TAG_MSGID+' '):
add_tr(tag)
tag = {}
if next_tag is not None:
add_tag(next_tag, next_tag_content)
sep = line.find(' ')
next_tag = line[:sep].strip()
next_tag_content = line[sep:].strip()
else:
next_tag_content += line
add_tag(next_tag, next_tag_content)
add_tr(tag)
if ('',None) in self._parsed:
self._header = {}
headers = self._parsed.pop(('',None))[self.TAG_MSGSTR].split('\\n')
for i in headers:
sep = i.find(':')
key = i[:sep].strip()
value = i[sep+1:].strip()
if key:
self._header[key] = value
def get_header(self, key):
self._ensure_parsed()
return self._header[key]
def __getitem__(self, key):
self._ensure_parsed()
key = (key[0], key[1])
return self._parsed[key]
def find_translations(input_file, directory=None, languages=None):
directory = directory or os.getcwd()
result = []
if languages:
base_name, _ = os.path.splitext(input_file)
for i in languages:
filename = os.path.join(directory, base_name+'.'+i+'.po')
result.append(Translation.load(input_file, filename, Translation.ALLOW_NOT_EXISTING))
else:
for i in os.listdir(directory):
if RE_PO_FILE.match(i):
result.append(Translation.load(input_file, os.path.join(directory, i)))
return result
def convert_plurals(description, n, variants):
try:
NPLURALS='nplurals'
PLURAL='plural'
desc = description.split(';')
nplurals = desc[0].strip()
if not nplurals.startswith(NPLURALS):
raise Exception('First element "{}" does not start with "{}"'.format(
nplurals, NPLURALS))
nplurals = nplurals[len(NPLURALS):]
nplurals = int(nplurals.strip('='))
plural = desc[1].strip()
if not plural.startswith(PLURAL):
raise Exception('Second element "{}" does not start with "{}"'.format(
plural, PLURAL))
plural = plural[len(PLURAL):]
plural = plural.strip('=')
plural = tex_math.Parser(plural)
plural.override_identifier('n', n)
plural = tex_math.Generator(plural.parse()).generate()
except Exception as e:
raise Exception('Plurals definition must be formed as "nplurals: <n>; plural=<rule>"')
if len(variants) != nplurals:
raise Exception('Invalid number of variants found (expected {}, but {} found)'.format(nplurals, len(variants)))
s = ''
ending = ''
s += '\\setcounter{_gettext_n}{'
s += plural
s += '}'
for i in range(nplurals-1):
s += '\\ifthenelse{\\equal{\\value{_gettext_n}}{'+str(i)+'}}{'
s += variants[i]
s += '}{'
ending += '}'
s += variants[-1]
s += ending
return s
return 'convert\_plurals('+description+','+msgid1+','+msgid2+','+n+')'
if __name__ == '__main__':
import unittest
unittest.main() | template_name = self.generate_template(document) | random_line_split |
translator.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import icu
import locale
import os
import re
import shutil
import subprocess
import sys
import tex_math
import tzlocal
import unittest
RE_PO_FILE = re.compile(r'.*\.(.*)\.po$')
DEFAULT_PLURAL = 'nplurals=2; plural=n != 1'
class Tag:
class Argument:
def __init__(self, content, begin_pos, end_pos):
self.content = content
self.begin_pos = begin_pos
self.end_pos = end_pos
def __hash__(self):
return hash(self.content)
def __eq__(self, other):
|
def __str__(self):
return self.content
def __init__(self, name, args, begin_pos, end_pos):
self.name = name
self.args = args
self.begin_pos = begin_pos
self.end_pos = end_pos
def __eq__(self, other):
return isinstance(other, Tag) and self.name == other.name and self.args == other.args
def __hash__(self):
return hash(self.name)+sum([hash(i) for i in self.args])
def __str__(self):
return self.name+''.join(['{'+str(i)+'}' for i in self.args])
class Document:
@staticmethod
def load(file):
return Document(file)
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def generate(self):
root, _ = os.path.splitext(self.name)
output = root+'.pdf'
subprocess.check_call(['xelatex', self.name])
return output
def find_tags(self, tag, nargs=1):
with open(self.name) as file:
doc = file.read()
line_number = [ 0 for i in range(len(doc)) ]
line = 1
for i in range(len(doc)):
line_number[i] = line
if doc[i] == '\n':
line += 1
texts = list()
pos = 0
def _find_matching_closing(i):
depth = 0
while True:
pc = doc[i-1] if i-1 > 0 else None
c = doc[i]
if c == '{' and pc != '\\':
depth += 1
elif c == '}' and pc != '\\':
depth -= 1
if depth == 0:
break
i += 1
return i
while True:
i = doc.find(tag, pos)
if i < 0:
break
args = []
start_tag = i
end = start = pos = start_tag+len(tag)
for n in range(nargs):
try:
end = _find_matching_closing(start)
except Exception as e:
raise Exception(
'Could not find end for tag that starts at line '+
'{line} ({text})'.format(
line=line_number[start],
text=(
doc[max(start-20, 0):start]+' --> '+
doc[start:min(start+20, len(doc))])
))
start += 1 #skip initial '{'
args.append(Tag.Argument(doc[start:end], start, end))
start = doc.find('{', end)
texts.append(Tag(tag, args, start_tag, end))
return texts
class Translation:
ALLOW_NOT_EXISTING = 1
TAG_MSGID = 'msgid'
TAG_MSGID_PLURAL = 'msgid_plural'
TAG_MSGSTR = 'msgstr'
TAG_MSGCTXT = 'msgctxt'
@staticmethod
def load(input_file, file, flags=0):
_, name = os.path.split(file)
name = RE_PO_FILE.match(name)
if not flags & Translation.ALLOW_NOT_EXISTING:
if not os.path.exists(file):
raise Exception('File "{}" does not exists'.format(file))
return Translation(input_file, name.group(1), file)
def __init__(self, input, locale, file=None):
self.input = input
self.locale = locale
self.file = file
self._parsed = None
self._icu_locale = icu.Locale.createFromName(self.locale)
self._icu_date_full = icu.DateFormat.createDateInstance(icu.DateFormat.FULL, self._icu_locale)
def __repr__(self):
return 'Translation(input={input}, locale={locale}, file={file})'.format(
input=self.input, locale=self.locale, file=self.file
)
def update(self, document):
if not self.file:
return False #nothing to update
template_name = self.generate_template(document)
sys.stderr.write('Updating translation {}...\n'.format(self))
if not os.path.exists(self.file):
sys.stderr.write('Generating new translation file: {}...\n'.format(self.file))
subprocess.check_call(['msginit', '-i', template_name,
'-l', self.locale, '-o', self.file])
return True
with open(self.file, 'rb') as f:
old = f.read()
sys.stderr.write('Merging template into translation file: {}...\n'.format(self.file))
new = subprocess.check_output(['msgmerge', self.file, template_name])
with open(self.file, 'wb') as f:
f.write(new)
return old != new
def translate(self, document):
sys.stderr.write('Translating {} to {}...\n'.format(document, self))
tags = self.find_all_tags(document)
tags += document.find_tags('\\today', 0)
tags += document.find_tags('\\formatdate', 3)
tags = sorted(tags, key=lambda x: x.begin_pos)
translated, ext = os.path.splitext(self.input)
translated += '.' + self.locale + ext
with open(document.name) as input_file:
doc = input_file.read()
sys.stderr.write('Generating file {}...\n'.format(translated))
with open(translated, 'w') as output:
elems = []
prev = 0
for i in tags:
elems.append(doc[prev:i.begin_pos])
elems.append(self.translate_tag(i))
prev = i.end_pos+1
elems.append(doc[prev:])
output.write(''.join(elems))
return Document.load(translated)
def find_all_tags(self, document):
tags = []
tags += document.find_tags('\\gettext')
tags += document.find_tags('\\pgettext', 2)
tags += document.find_tags('\\ngettext', 3)
tags += document.find_tags('\\npgettext', 4)
return tags
def generate_template(self, document):
with open(document.name) as doc:
doc = doc.read()
tags = self.find_all_tags(document)
tags = set(tags)
tags = sorted(tags, key=lambda x: x.begin_pos)
template_name, _ = os.path.splitext(document.name)
template_name = template_name+'.pot'
sys.stderr.write('Generating template "{}"...\n'.format(template_name))
with open(template_name, 'w') as template:
template.write('msgid ""\n')
template.write('msgstr ""\n')
#template.write('"Project-Id-Version: PACKAGE VERSION\\n"\n')
#template.write('"Report-Msgid-Bugs-To: \\n"\n')
##template.write('"POT-Creation-Date: 2014-05-03 22:18+0200\\n"\n')
#time = datetime.datetime.now(tz=tzlocal.get_localzone())
#time = time.strftime('%Y-%m-%d %H:%M%z')
#template.write('"POT-Creation-Date: {}\\n"\n'.format(time))
#template.write('"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"\n')
template.write('"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"\n')
#template.write('"Language-Team: LANGUAGE <LL@li.org>\\n"\n')
template.write('"Language: \\n"\n')
template.write('"MIME-Version: 1.0\\n"\n')
template.write('"Content-Type: text/plain; charset=UTF-8\\n"\n')
template.write('"Content-Transfer-Encoding: 8bit\\n"\n')
template.write('"Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\\n"\n')
template.write('\n')
for tag in tags:
def escape(s):
return s.replace('\\', '\\\\').replace('\n', '"\n"')
if tag.name == '\\gettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\ngettext':
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[1].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\pgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} ""\n'.format(self.TAG_MSGSTR))
elif tag.name == '\\npgettext':
template.write('{} "{}"\n'.format(self.TAG_MSGCTXT, escape(tag.args[0].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID, escape(tag.args[1].content)))
template.write('{} "{}"\n'.format(self.TAG_MSGID_PLURAL, escape(tag.args[2].content)))
template.write('{}[0] ""\n'.format(self.TAG_MSGSTR))
template.write('{}[1] ""\n'.format(self.TAG_MSGSTR))
template.write('\n')
return template_name
def translate_tag(self, tag):
if tag.name == '\\gettext':
if not self.file:
return tag.args[0].content
else:
return self[(tag.args[0].content, None)][self.TAG_MSGSTR]
elif tag.name == '\\ngettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[0].content, tag.args[1].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[0].content, None)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[2].content, variants)
elif tag.name == '\\pgettext':
if not self.file:
return tag.args[1].content
return self[(tag.args[1].content, tag.args[0].content)][self.TAG_MSGSTR]
elif tag.name == '\\npgettext':
if not self.file:
rule = DEFAULT_PLURAL
variants = (tag.args[1].content, tag.args[2].content)
else:
rule = self.get_header('Plural-Forms')
variants = self[(tag.args[1].content, tag.args[0].content)]
variants = [ (k, v) for k,v in variants.items() if k.startswith(self.TAG_MSGSTR+'[')]
variants = sorted(variants, key=lambda x: x[0])
variants = [ i[1] for i in variants ]
return convert_plurals(rule, tag.args[3].content, variants)
elif tag.name == '\\today':
return self._icu_date_full.format(float(datetime.datetime.now().timestamp()))
elif tag.name == '\\formatdate':
return self._icu_date_full.format(float(datetime.datetime(*[int(i.content) for i in tag.args][::-1]).timestamp()))
else:
raise Exception('Unknown tag: '+tag.name)
def _ensure_parsed(self):
if not self.file:
raise Exception('Translation instance has no associated file')
if self._parsed:
return
sys.stderr.write('Parsing {}\n'.format(self.file))
with open(self.file) as f:
self._parsed = {}
def add_tr(tag):
key = (tag[self.TAG_MSGID], tag.get(self.TAG_MSGCTXT, None))
if key in self._parsed:
raise Exception('Key already exists: '+repr(key))
self._parsed[key] = tag
tag = {}
def add_tag(key, value):
value = value.replace('\n', '').replace('""', '').strip('"').replace('\\\\', '\\')
tag[key] = value
next_tag = None
for line in f:
if line.startswith('#'):
continue
if not line.startswith('"'):
if tag and line.startswith(self.TAG_MSGID+' '):
add_tr(tag)
tag = {}
if next_tag is not None:
add_tag(next_tag, next_tag_content)
sep = line.find(' ')
next_tag = line[:sep].strip()
next_tag_content = line[sep:].strip()
else:
next_tag_content += line
add_tag(next_tag, next_tag_content)
add_tr(tag)
if ('',None) in self._parsed:
self._header = {}
headers = self._parsed.pop(('',None))[self.TAG_MSGSTR].split('\\n')
for i in headers:
sep = i.find(':')
key = i[:sep].strip()
value = i[sep+1:].strip()
if key:
self._header[key] = value
def get_header(self, key):
self._ensure_parsed()
return self._header[key]
def __getitem__(self, key):
self._ensure_parsed()
key = (key[0], key[1])
return self._parsed[key]
def find_translations(input_file, directory=None, languages=None):
directory = directory or os.getcwd()
result = []
if languages:
base_name, _ = os.path.splitext(input_file)
for i in languages:
filename = os.path.join(directory, base_name+'.'+i+'.po')
result.append(Translation.load(input_file, filename, Translation.ALLOW_NOT_EXISTING))
else:
for i in os.listdir(directory):
if RE_PO_FILE.match(i):
result.append(Translation.load(input_file, os.path.join(directory, i)))
return result
def convert_plurals(description, n, variants):
try:
NPLURALS='nplurals'
PLURAL='plural'
desc = description.split(';')
nplurals = desc[0].strip()
if not nplurals.startswith(NPLURALS):
raise Exception('First element "{}" does not start with "{}"'.format(
nplurals, NPLURALS))
nplurals = nplurals[len(NPLURALS):]
nplurals = int(nplurals.strip('='))
plural = desc[1].strip()
if not plural.startswith(PLURAL):
raise Exception('Second element "{}" does not start with "{}"'.format(
plural, PLURAL))
plural = plural[len(PLURAL):]
plural = plural.strip('=')
plural = tex_math.Parser(plural)
plural.override_identifier('n', n)
plural = tex_math.Generator(plural.parse()).generate()
except Exception as e:
raise Exception('Plurals definition must be formed as "nplurals: <n>; plural=<rule>"')
if len(variants) != nplurals:
raise Exception('Invalid number of variants found (expected {}, but {} found)'.format(nplurals, len(variants)))
s = ''
ending = ''
s += '\\setcounter{_gettext_n}{'
s += plural
s += '}'
for i in range(nplurals-1):
s += '\\ifthenelse{\\equal{\\value{_gettext_n}}{'+str(i)+'}}{'
s += variants[i]
s += '}{'
ending += '}'
s += variants[-1]
s += ending
return s
return 'convert\_plurals('+description+','+msgid1+','+msgid2+','+n+')'
if __name__ == '__main__':
import unittest
unittest.main()
| return isinstance(other, Tag.Argument) and self.content == other.content | identifier_body |
obj.rs | use std::error::Error;
use std::f32::consts::PI;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use gleam::gl;
use gleam::gl::types::{GLint, GLsizei};
use image::GenericImageView;
use super::Context;
use error::io_error;
use matrix::{identity, matmul, rotate_x, rotate_y, scale, translate, vec2, vec3, Vec2, Vec3};
use render::{get_tex_const, Color, Drawable};
#[derive(Debug)]
pub struct Face<T> {
indices: Vec<FaceIndex<T>>,
}
fn face<T>(indices: Vec<FaceIndex<T>>) -> Face<T> {
Face { indices }
}
#[derive(Debug)]
pub struct FaceIndex<T> {
vertex_index: T,
texture_index: Option<T>,
normal_index: Option<T>,
}
impl<T> FromStr for FaceIndex<T>
where
T: FromStr + Default,
<T as FromStr>::Err: 'static + Error + Send + Sync,
{
type Err = io::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split('/');
// Get vertex index
let vertex_index: T = tokens
.next()
.ok_or_else(|| io_error("Missing vertex index"))?
.parse()
.map_err(io_error)?;
let texture_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
let normal_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
Ok(FaceIndex {
vertex_index,
texture_index,
normal_index,
})
}
}
#[derive(Debug)]
pub struct Group {
pub name: String,
pub faces: Vec<Face<u32>>,
}
impl Group {
pub fn new(name: &str) -> Self {
Group {
name: name.into(),
faces: Vec::new(),
}
}
}
struct Material {
/// Ka
ambient_color: Color,
/// Kd
diffuse_color: Color,
/// Ks
specular_color: Color,
/// Ns
specular_exponent: f32,
/// Ni
optical_density: f32,
/// d or Tr
transparency: f32,
// TODO: illum
// TODO: maps
}
pub struct Obj {
groups: Vec<Group>,
vert_start: GLint,
num_verts: GLsizei,
pub vertices: Vec<Vec3>,
pub normals: Vec<Vec3>,
pub texture_coords: Vec<Vec2>,
center: Vec3,
scale: Vec3,
translate: Vec3,
texture_path: PathBuf,
cur_texture: u8,
}
impl Obj {
/// Loads a render object from a path
pub fn load<P, PP>(
obj_path: P,
texture_path: PP,
cur_texture: &mut u8,
scale: Vec3,
translate: Vec3,
) -> Result<Self, io::Error>
where
P: AsRef<Path> + std::fmt::Display,
PP: AsRef<OsStr> + Sized,
{
// Get the path as string for later
let path_str = obj_path.to_string();
// Read the obj file
let obj_file = File::open(obj_path)?;
// Create reader for the file
let obj_file = BufReader::new(obj_file);
// Buffers for data
let mut vertices: Vec<Vec3> = Vec::new();
let mut normals: Vec<Vec3> = Vec::new();
let mut texture_coords: Vec<Vec2> = Vec::new();
// Create list of groups
let mut groups: Vec<Group> = Vec::new();
// current group
let mut cur_group: Group = Group::new("");
// Keep track of center
let mut center: Vec3 = Vec3::origin();
// Keep track of vertices for averaging center
// Float is used here for division
let mut num_vertices: f32 = 0.0;
for line in obj_file.lines() {
// Unwrap the line
let line = line?;
// Ignore comments
if line.starts_with('#') {
continue;
}
// Split line into tokens
let mut tokens = line.split_whitespace();
// Read the first token
let ty = match tokens.next() {
Some(token) => token,
// Skip empty lines
None => {
continue;
}
};
// Handle it
match ty {
"g" => {
// Read group name
let name = tokens.next().unwrap_or("unnamed");
// Insert old group into groups
if !cur_group.faces.is_empty() {
groups.push(cur_group);
}
// Create new group
cur_group = Group::new(name);
}
"v" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
// Collect into a vector
let v = vec3(x, y, z);
// Factor vertex into the center
center = ¢er + v;
// Add to number of vertices
num_vertices += 1.0;
// Add vector into the list
vertices.push(v);
}
"vn" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
normals.push(vec3(x, y, z));
}
"vt" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
texture_coords.push(vec2(x, y));
}
"f" => |
other => {
eprintln!("Unhandled line type: {}", other);
}
}
}
// Push the last group
groups.push(cur_group);
// Average out the center
let center = center * (1.0 / (num_vertices as f32));
println!("Center for {} is {:?}", path_str, center);
// Iterate texture counter forward
*cur_texture += 1;
// Generate the render object
Ok(Obj {
groups,
vert_start: 0,
num_verts: 0,
vertices,
normals,
texture_coords,
center,
scale,
translate,
texture_path: Path::new(&texture_path).to_path_buf(),
cur_texture: *cur_texture,
})
}
pub fn to_vertices(&self, group: &Group) -> Vec<f32> {
// Generate vertex list from face list
group
.faces
.iter()
// For each face, get the vertex, normal, and texture coordinates
// of all its components
.flat_map(|face| {
face.indices.iter().map(|index| {
(
// Get the vertex for this
/*(&(&self.vertices[(index.vertex_index - 1) as usize] - self.center)
+ self.translate)
.scale(self.scale.x, self.scale.y, self.scale.z),*/
// Get the vertex for this
&self.vertices[(index.vertex_index - 1) as usize] - self.center,
index
.normal_index
.map(|normal_index| self.normals[(normal_index - 1) as usize])
.unwrap_or_else(Vec3::origin),
index
.texture_index
.map(|texture_index| self.texture_coords[(texture_index - 1) as usize])
.unwrap_or_else(Vec2::origin),
)
})
})
// Flatten out everything
.flat_map(|(vertex, normal, texture)| {
#[cfg_attr(rustfmt, rustfmt_skip)]
vec![
vertex.x, vertex.y, vertex.z,
normal.x, normal.y, normal.z,
texture.x, texture.y,
]
})
.collect()
}
}
impl Drawable for Obj {
/// Returns buffer data
fn buffer_data(&mut self, vertex_start: GLint) -> Vec<f32> {
// Store element start
self.vert_start = vertex_start;
// Store vertex data
let mut vertices: Vec<f32> = Vec::new();
// Iterate over groups
for group in &self.groups {
// Extract data for the current group
let cur_vertices = self.to_vertices(group);
// Add existing data
vertices.extend_from_slice(&cur_vertices);
}
// Store the number of vertices
self.num_verts = (vertices.len() / 8) as GLsizei;
// Return vertices
vertices
}
/// Loads textures
fn load_texture(&self, ctx: &Context) {
let gl = &ctx.gl;
// Read texture
let tex_image = image::open(self.texture_path.clone()).unwrap();
// Extract dimensions
let (width, height) = tex_image.dimensions();
// Get image as raw bytes
let tex_image = tex_image.as_rgb8().unwrap().clone();
// Create a texture
let texture = gl.gen_textures(1)[0];
// Get the texture index as a glenum
let tex_enum = get_tex_const(self.cur_texture);
gl.active_texture(tex_enum);
gl.bind_texture(gl::TEXTURE_2D, texture);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
gl.tex_image_2d(
gl::TEXTURE_2D,
0,
gl::RGB as i32,
width as i32,
height as i32,
0,
gl::RGB,
gl::UNSIGNED_BYTE,
Some(&tex_image),
);
gl.generate_mipmap(gl::TEXTURE_2D);
gl.tex_parameter_i(
gl::TEXTURE_2D,
gl::TEXTURE_MIN_FILTER,
gl::LINEAR_MIPMAP_LINEAR as i32,
);
}
/// Draws the object
// Return groups
fn draw(&self, ctx: &Context) {
let gl = &ctx.gl;
let mv_location = gl.get_uniform_location(ctx.program, "uMVMatrix");
let m_matrix = identity();
let v_matrix = matmul(
rotate_y(PI),
matmul(
scale(self.scale.x, self.scale.y, self.scale.z),
matmul(
translate(self.translate.x, self.translate.y, self.translate.z),
ctx.camera,
),
),
);
let mv_matrix = matmul(v_matrix, m_matrix);
gl.uniform_matrix_4fv(mv_location, false, &mv_matrix);
let sampler_location = gl.get_uniform_location(ctx.program, "uSampler");
gl.uniform_1i(sampler_location, self.cur_texture as i32);
// Lighting properties
let ambient_location = gl.get_uniform_location(ctx.program, "uAmbientProduct");
let diffuse_location = gl.get_uniform_location(ctx.program, "uDiffuseProduct");
let specular_location = gl.get_uniform_location(ctx.program, "uSpecularProduct");
// Light position
let shininess_location = gl.get_uniform_location(ctx.program, "uShininess");
gl.uniform_4f(ambient_location, 0.8, 0.8, 0.8, 1.0);
gl.uniform_4f(diffuse_location, 0.75164, 0.60648, 0.22648, 1.0);
gl.uniform_4f(specular_location, 0.628281, 0.555802, 0.366065, 1.0);
gl.uniform_1f(shininess_location, 0.4 * 128.0);
gl.draw_arrays(gl::TRIANGLES, self.vert_start / 8, self.num_verts);
}
}
| {
let face_indices = tokens.map(FaceIndex::from_str).flatten().collect();
cur_group.faces.push(face(face_indices));
} | conditional_block |
obj.rs | use std::error::Error;
use std::f32::consts::PI;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use gleam::gl;
use gleam::gl::types::{GLint, GLsizei};
use image::GenericImageView;
use super::Context;
use error::io_error;
use matrix::{identity, matmul, rotate_x, rotate_y, scale, translate, vec2, vec3, Vec2, Vec3};
use render::{get_tex_const, Color, Drawable};
#[derive(Debug)]
pub struct Face<T> {
indices: Vec<FaceIndex<T>>,
}
fn face<T>(indices: Vec<FaceIndex<T>>) -> Face<T> {
Face { indices }
}
#[derive(Debug)]
pub struct FaceIndex<T> {
vertex_index: T,
texture_index: Option<T>,
normal_index: Option<T>,
}
impl<T> FromStr for FaceIndex<T>
where
T: FromStr + Default,
<T as FromStr>::Err: 'static + Error + Send + Sync,
{
type Err = io::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split('/');
// Get vertex index
let vertex_index: T = tokens
.next()
.ok_or_else(|| io_error("Missing vertex index"))?
.parse()
.map_err(io_error)?;
let texture_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
let normal_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
Ok(FaceIndex {
vertex_index,
texture_index,
normal_index,
})
}
}
#[derive(Debug)]
pub struct Group {
pub name: String,
pub faces: Vec<Face<u32>>,
}
impl Group {
pub fn | (name: &str) -> Self {
Group {
name: name.into(),
faces: Vec::new(),
}
}
}
struct Material {
/// Ka
ambient_color: Color,
/// Kd
diffuse_color: Color,
/// Ks
specular_color: Color,
/// Ns
specular_exponent: f32,
/// Ni
optical_density: f32,
/// d or Tr
transparency: f32,
// TODO: illum
// TODO: maps
}
pub struct Obj {
groups: Vec<Group>,
vert_start: GLint,
num_verts: GLsizei,
pub vertices: Vec<Vec3>,
pub normals: Vec<Vec3>,
pub texture_coords: Vec<Vec2>,
center: Vec3,
scale: Vec3,
translate: Vec3,
texture_path: PathBuf,
cur_texture: u8,
}
impl Obj {
/// Loads a render object from a path
pub fn load<P, PP>(
obj_path: P,
texture_path: PP,
cur_texture: &mut u8,
scale: Vec3,
translate: Vec3,
) -> Result<Self, io::Error>
where
P: AsRef<Path> + std::fmt::Display,
PP: AsRef<OsStr> + Sized,
{
// Get the path as string for later
let path_str = obj_path.to_string();
// Read the obj file
let obj_file = File::open(obj_path)?;
// Create reader for the file
let obj_file = BufReader::new(obj_file);
// Buffers for data
let mut vertices: Vec<Vec3> = Vec::new();
let mut normals: Vec<Vec3> = Vec::new();
let mut texture_coords: Vec<Vec2> = Vec::new();
// Create list of groups
let mut groups: Vec<Group> = Vec::new();
// current group
let mut cur_group: Group = Group::new("");
// Keep track of center
let mut center: Vec3 = Vec3::origin();
// Keep track of vertices for averaging center
// Float is used here for division
let mut num_vertices: f32 = 0.0;
for line in obj_file.lines() {
// Unwrap the line
let line = line?;
// Ignore comments
if line.starts_with('#') {
continue;
}
// Split line into tokens
let mut tokens = line.split_whitespace();
// Read the first token
let ty = match tokens.next() {
Some(token) => token,
// Skip empty lines
None => {
continue;
}
};
// Handle it
match ty {
"g" => {
// Read group name
let name = tokens.next().unwrap_or("unnamed");
// Insert old group into groups
if !cur_group.faces.is_empty() {
groups.push(cur_group);
}
// Create new group
cur_group = Group::new(name);
}
"v" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
// Collect into a vector
let v = vec3(x, y, z);
// Factor vertex into the center
center = ¢er + v;
// Add to number of vertices
num_vertices += 1.0;
// Add vector into the list
vertices.push(v);
}
"vn" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
normals.push(vec3(x, y, z));
}
"vt" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
texture_coords.push(vec2(x, y));
}
"f" => {
let face_indices = tokens.map(FaceIndex::from_str).flatten().collect();
cur_group.faces.push(face(face_indices));
}
other => {
eprintln!("Unhandled line type: {}", other);
}
}
}
// Push the last group
groups.push(cur_group);
// Average out the center
let center = center * (1.0 / (num_vertices as f32));
println!("Center for {} is {:?}", path_str, center);
// Iterate texture counter forward
*cur_texture += 1;
// Generate the render object
Ok(Obj {
groups,
vert_start: 0,
num_verts: 0,
vertices,
normals,
texture_coords,
center,
scale,
translate,
texture_path: Path::new(&texture_path).to_path_buf(),
cur_texture: *cur_texture,
})
}
pub fn to_vertices(&self, group: &Group) -> Vec<f32> {
// Generate vertex list from face list
group
.faces
.iter()
// For each face, get the vertex, normal, and texture coordinates
// of all its components
.flat_map(|face| {
face.indices.iter().map(|index| {
(
// Get the vertex for this
/*(&(&self.vertices[(index.vertex_index - 1) as usize] - self.center)
+ self.translate)
.scale(self.scale.x, self.scale.y, self.scale.z),*/
// Get the vertex for this
&self.vertices[(index.vertex_index - 1) as usize] - self.center,
index
.normal_index
.map(|normal_index| self.normals[(normal_index - 1) as usize])
.unwrap_or_else(Vec3::origin),
index
.texture_index
.map(|texture_index| self.texture_coords[(texture_index - 1) as usize])
.unwrap_or_else(Vec2::origin),
)
})
})
// Flatten out everything
.flat_map(|(vertex, normal, texture)| {
#[cfg_attr(rustfmt, rustfmt_skip)]
vec![
vertex.x, vertex.y, vertex.z,
normal.x, normal.y, normal.z,
texture.x, texture.y,
]
})
.collect()
}
}
impl Drawable for Obj {
/// Returns buffer data
fn buffer_data(&mut self, vertex_start: GLint) -> Vec<f32> {
// Store element start
self.vert_start = vertex_start;
// Store vertex data
let mut vertices: Vec<f32> = Vec::new();
// Iterate over groups
for group in &self.groups {
// Extract data for the current group
let cur_vertices = self.to_vertices(group);
// Add existing data
vertices.extend_from_slice(&cur_vertices);
}
// Store the number of vertices
self.num_verts = (vertices.len() / 8) as GLsizei;
// Return vertices
vertices
}
/// Loads textures
fn load_texture(&self, ctx: &Context) {
let gl = &ctx.gl;
// Read texture
let tex_image = image::open(self.texture_path.clone()).unwrap();
// Extract dimensions
let (width, height) = tex_image.dimensions();
// Get image as raw bytes
let tex_image = tex_image.as_rgb8().unwrap().clone();
// Create a texture
let texture = gl.gen_textures(1)[0];
// Get the texture index as a glenum
let tex_enum = get_tex_const(self.cur_texture);
gl.active_texture(tex_enum);
gl.bind_texture(gl::TEXTURE_2D, texture);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
gl.tex_image_2d(
gl::TEXTURE_2D,
0,
gl::RGB as i32,
width as i32,
height as i32,
0,
gl::RGB,
gl::UNSIGNED_BYTE,
Some(&tex_image),
);
gl.generate_mipmap(gl::TEXTURE_2D);
gl.tex_parameter_i(
gl::TEXTURE_2D,
gl::TEXTURE_MIN_FILTER,
gl::LINEAR_MIPMAP_LINEAR as i32,
);
}
/// Draws the object
// Return groups
fn draw(&self, ctx: &Context) {
let gl = &ctx.gl;
let mv_location = gl.get_uniform_location(ctx.program, "uMVMatrix");
let m_matrix = identity();
let v_matrix = matmul(
rotate_y(PI),
matmul(
scale(self.scale.x, self.scale.y, self.scale.z),
matmul(
translate(self.translate.x, self.translate.y, self.translate.z),
ctx.camera,
),
),
);
let mv_matrix = matmul(v_matrix, m_matrix);
gl.uniform_matrix_4fv(mv_location, false, &mv_matrix);
let sampler_location = gl.get_uniform_location(ctx.program, "uSampler");
gl.uniform_1i(sampler_location, self.cur_texture as i32);
// Lighting properties
let ambient_location = gl.get_uniform_location(ctx.program, "uAmbientProduct");
let diffuse_location = gl.get_uniform_location(ctx.program, "uDiffuseProduct");
let specular_location = gl.get_uniform_location(ctx.program, "uSpecularProduct");
// Light position
let shininess_location = gl.get_uniform_location(ctx.program, "uShininess");
gl.uniform_4f(ambient_location, 0.8, 0.8, 0.8, 1.0);
gl.uniform_4f(diffuse_location, 0.75164, 0.60648, 0.22648, 1.0);
gl.uniform_4f(specular_location, 0.628281, 0.555802, 0.366065, 1.0);
gl.uniform_1f(shininess_location, 0.4 * 128.0);
gl.draw_arrays(gl::TRIANGLES, self.vert_start / 8, self.num_verts);
}
}
| new | identifier_name |
obj.rs | use std::error::Error;
use std::f32::consts::PI;
use std::ffi::OsStr;
use std::fs::File;
use std::io::{self, BufRead, BufReader};
use std::path::{Path, PathBuf};
use std::str::FromStr;
use gleam::gl;
use gleam::gl::types::{GLint, GLsizei};
use image::GenericImageView;
use super::Context;
use error::io_error;
use matrix::{identity, matmul, rotate_x, rotate_y, scale, translate, vec2, vec3, Vec2, Vec3};
use render::{get_tex_const, Color, Drawable};
#[derive(Debug)]
pub struct Face<T> {
indices: Vec<FaceIndex<T>>,
}
fn face<T>(indices: Vec<FaceIndex<T>>) -> Face<T> {
Face { indices }
}
#[derive(Debug)]
pub struct FaceIndex<T> {
vertex_index: T,
texture_index: Option<T>,
normal_index: Option<T>,
}
impl<T> FromStr for FaceIndex<T>
where
T: FromStr + Default,
<T as FromStr>::Err: 'static + Error + Send + Sync,
{
type Err = io::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split('/');
// Get vertex index
let vertex_index: T = tokens
.next()
.ok_or_else(|| io_error("Missing vertex index"))?
.parse()
.map_err(io_error)?;
let texture_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
let normal_index: Option<T> = tokens
.next()
.map(|token| token.parse::<T>().ok())
.unwrap_or(None);
Ok(FaceIndex {
vertex_index,
texture_index,
normal_index,
})
}
}
#[derive(Debug)]
pub struct Group {
pub name: String,
pub faces: Vec<Face<u32>>,
}
impl Group {
pub fn new(name: &str) -> Self {
Group { | name: name.into(),
faces: Vec::new(),
}
}
}
struct Material {
/// Ka
ambient_color: Color,
/// Kd
diffuse_color: Color,
/// Ks
specular_color: Color,
/// Ns
specular_exponent: f32,
/// Ni
optical_density: f32,
/// d or Tr
transparency: f32,
// TODO: illum
// TODO: maps
}
pub struct Obj {
groups: Vec<Group>,
vert_start: GLint,
num_verts: GLsizei,
pub vertices: Vec<Vec3>,
pub normals: Vec<Vec3>,
pub texture_coords: Vec<Vec2>,
center: Vec3,
scale: Vec3,
translate: Vec3,
texture_path: PathBuf,
cur_texture: u8,
}
impl Obj {
/// Loads a render object from a path
pub fn load<P, PP>(
obj_path: P,
texture_path: PP,
cur_texture: &mut u8,
scale: Vec3,
translate: Vec3,
) -> Result<Self, io::Error>
where
P: AsRef<Path> + std::fmt::Display,
PP: AsRef<OsStr> + Sized,
{
// Get the path as string for later
let path_str = obj_path.to_string();
// Read the obj file
let obj_file = File::open(obj_path)?;
// Create reader for the file
let obj_file = BufReader::new(obj_file);
// Buffers for data
let mut vertices: Vec<Vec3> = Vec::new();
let mut normals: Vec<Vec3> = Vec::new();
let mut texture_coords: Vec<Vec2> = Vec::new();
// Create list of groups
let mut groups: Vec<Group> = Vec::new();
// current group
let mut cur_group: Group = Group::new("");
// Keep track of center
let mut center: Vec3 = Vec3::origin();
// Keep track of vertices for averaging center
// Float is used here for division
let mut num_vertices: f32 = 0.0;
for line in obj_file.lines() {
// Unwrap the line
let line = line?;
// Ignore comments
if line.starts_with('#') {
continue;
}
// Split line into tokens
let mut tokens = line.split_whitespace();
// Read the first token
let ty = match tokens.next() {
Some(token) => token,
// Skip empty lines
None => {
continue;
}
};
// Handle it
match ty {
"g" => {
// Read group name
let name = tokens.next().unwrap_or("unnamed");
// Insert old group into groups
if !cur_group.faces.is_empty() {
groups.push(cur_group);
}
// Create new group
cur_group = Group::new(name);
}
"v" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
// Collect into a vector
let v = vec3(x, y, z);
// Factor vertex into the center
center = ¢er + v;
// Add to number of vertices
num_vertices += 1.0;
// Add vector into the list
vertices.push(v);
}
"vn" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let z: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
normals.push(vec3(x, y, z));
}
"vt" => {
// Read coordinates
let x: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
let y: f32 = tokens
.next()
.unwrap_or_else(|| "0")
.parse()
.unwrap_or_else(|_| 0.0);
texture_coords.push(vec2(x, y));
}
"f" => {
let face_indices = tokens.map(FaceIndex::from_str).flatten().collect();
cur_group.faces.push(face(face_indices));
}
other => {
eprintln!("Unhandled line type: {}", other);
}
}
}
// Push the last group
groups.push(cur_group);
// Average out the center
let center = center * (1.0 / (num_vertices as f32));
println!("Center for {} is {:?}", path_str, center);
// Iterate texture counter forward
*cur_texture += 1;
// Generate the render object
Ok(Obj {
groups,
vert_start: 0,
num_verts: 0,
vertices,
normals,
texture_coords,
center,
scale,
translate,
texture_path: Path::new(&texture_path).to_path_buf(),
cur_texture: *cur_texture,
})
}
pub fn to_vertices(&self, group: &Group) -> Vec<f32> {
// Generate vertex list from face list
group
.faces
.iter()
// For each face, get the vertex, normal, and texture coordinates
// of all its components
.flat_map(|face| {
face.indices.iter().map(|index| {
(
// Get the vertex for this
/*(&(&self.vertices[(index.vertex_index - 1) as usize] - self.center)
+ self.translate)
.scale(self.scale.x, self.scale.y, self.scale.z),*/
// Get the vertex for this
&self.vertices[(index.vertex_index - 1) as usize] - self.center,
index
.normal_index
.map(|normal_index| self.normals[(normal_index - 1) as usize])
.unwrap_or_else(Vec3::origin),
index
.texture_index
.map(|texture_index| self.texture_coords[(texture_index - 1) as usize])
.unwrap_or_else(Vec2::origin),
)
})
})
// Flatten out everything
.flat_map(|(vertex, normal, texture)| {
#[cfg_attr(rustfmt, rustfmt_skip)]
vec![
vertex.x, vertex.y, vertex.z,
normal.x, normal.y, normal.z,
texture.x, texture.y,
]
})
.collect()
}
}
impl Drawable for Obj {
/// Returns buffer data
fn buffer_data(&mut self, vertex_start: GLint) -> Vec<f32> {
// Store element start
self.vert_start = vertex_start;
// Store vertex data
let mut vertices: Vec<f32> = Vec::new();
// Iterate over groups
for group in &self.groups {
// Extract data for the current group
let cur_vertices = self.to_vertices(group);
// Add existing data
vertices.extend_from_slice(&cur_vertices);
}
// Store the number of vertices
self.num_verts = (vertices.len() / 8) as GLsizei;
// Return vertices
vertices
}
/// Loads textures
fn load_texture(&self, ctx: &Context) {
let gl = &ctx.gl;
// Read texture
let tex_image = image::open(self.texture_path.clone()).unwrap();
// Extract dimensions
let (width, height) = tex_image.dimensions();
// Get image as raw bytes
let tex_image = tex_image.as_rgb8().unwrap().clone();
// Create a texture
let texture = gl.gen_textures(1)[0];
// Get the texture index as a glenum
let tex_enum = get_tex_const(self.cur_texture);
gl.active_texture(tex_enum);
gl.bind_texture(gl::TEXTURE_2D, texture);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
gl.tex_image_2d(
gl::TEXTURE_2D,
0,
gl::RGB as i32,
width as i32,
height as i32,
0,
gl::RGB,
gl::UNSIGNED_BYTE,
Some(&tex_image),
);
gl.generate_mipmap(gl::TEXTURE_2D);
gl.tex_parameter_i(
gl::TEXTURE_2D,
gl::TEXTURE_MIN_FILTER,
gl::LINEAR_MIPMAP_LINEAR as i32,
);
}
/// Draws the object
// Return groups
fn draw(&self, ctx: &Context) {
let gl = &ctx.gl;
let mv_location = gl.get_uniform_location(ctx.program, "uMVMatrix");
let m_matrix = identity();
let v_matrix = matmul(
rotate_y(PI),
matmul(
scale(self.scale.x, self.scale.y, self.scale.z),
matmul(
translate(self.translate.x, self.translate.y, self.translate.z),
ctx.camera,
),
),
);
let mv_matrix = matmul(v_matrix, m_matrix);
gl.uniform_matrix_4fv(mv_location, false, &mv_matrix);
let sampler_location = gl.get_uniform_location(ctx.program, "uSampler");
gl.uniform_1i(sampler_location, self.cur_texture as i32);
// Lighting properties
let ambient_location = gl.get_uniform_location(ctx.program, "uAmbientProduct");
let diffuse_location = gl.get_uniform_location(ctx.program, "uDiffuseProduct");
let specular_location = gl.get_uniform_location(ctx.program, "uSpecularProduct");
// Light position
let shininess_location = gl.get_uniform_location(ctx.program, "uShininess");
gl.uniform_4f(ambient_location, 0.8, 0.8, 0.8, 1.0);
gl.uniform_4f(diffuse_location, 0.75164, 0.60648, 0.22648, 1.0);
gl.uniform_4f(specular_location, 0.628281, 0.555802, 0.366065, 1.0);
gl.uniform_1f(shininess_location, 0.4 * 128.0);
gl.draw_arrays(gl::TRIANGLES, self.vert_start / 8, self.num_verts);
}
} | random_line_split | |
cortex.pb.go | // Code generated by protoc-gen-go.
// source: cortex.proto
// DO NOT EDIT!
/*
Package cortex is a generated protocol buffer package.
It is generated from these files:
cortex.proto
It has these top-level messages:
Sample
LabelPair
TimeSeries
LabelMatcher
ReadRequest
ReadResponse
LabelValuesRequest
LabelValuesResponse
UserStatsResponse
*/
package cortex
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type MatchType int32
const (
MatchType_EQUAL MatchType = 0
MatchType_NOT_EQUAL MatchType = 1
MatchType_REGEX_MATCH MatchType = 2
MatchType_REGEX_NO_MATCH MatchType = 3
)
var MatchType_name = map[int32]string{
0: "EQUAL",
1: "NOT_EQUAL",
2: "REGEX_MATCH",
3: "REGEX_NO_MATCH",
}
var MatchType_value = map[string]int32{
"EQUAL": 0,
"NOT_EQUAL": 1,
"REGEX_MATCH": 2,
"REGEX_NO_MATCH": 3,
}
func (x MatchType) String() string {
return proto.EnumName(MatchType_name, int32(x))
}
func (MatchType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
}
func (m *Sample) Reset() { *m = Sample{} }
func (m *Sample) String() string { return proto.CompactTextString(m) }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type LabelPair struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
type TimeSeries struct {
Labels []*LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty"`
// Sorted by time, oldest sample first.
Samples []*Sample `protobuf:"bytes,2,rep,name=samples" json:"samples,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TimeSeries) GetLabels() []*LabelPair {
if m != nil {
return m.Labels
}
return nil
}
func (m *TimeSeries) GetSamples() []*Sample {
if m != nil {
return m.Samples
}
return nil
}
type LabelMatcher struct {
Type MatchType `protobuf:"varint,1,opt,name=type,enum=cortex.MatchType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) | () string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type ReadRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs" json:"end_timestamp_ms,omitempty"`
Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ReadRequest) GetMatchers() []*LabelMatcher {
if m != nil {
return m.Matchers
}
return nil
}
type ReadResponse struct {
Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *ReadResponse) GetTimeseries() []*TimeSeries {
if m != nil {
return m.Timeseries
}
return nil
}
type LabelValuesRequest struct {
LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName" json:"label_name,omitempty"`
}
func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} }
func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) }
func (*LabelValuesRequest) ProtoMessage() {}
func (*LabelValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type LabelValuesResponse struct {
LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
}
func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} }
func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) }
func (*LabelValuesResponse) ProtoMessage() {}
func (*LabelValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type UserStatsResponse struct {
IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate" json:"ingestion_rate,omitempty"`
NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries" json:"num_series,omitempty"`
}
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
func (m *UserStatsResponse) String() string { return proto.CompactTextString(m) }
func (*UserStatsResponse) ProtoMessage() {}
func (*UserStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func init() {
proto.RegisterType((*Sample)(nil), "cortex.Sample")
proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair")
proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries")
proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse")
proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest")
proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse")
proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse")
proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
}
func init() { proto.RegisterFile("cortex.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 455 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6b, 0x13, 0x41,
0x14, 0x75, 0x93, 0x34, 0x3a, 0x37, 0xe9, 0xba, 0xbd, 0xf6, 0xa1, 0x2f, 0x82, 0x0e, 0x14, 0xa2,
0x48, 0x91, 0x16, 0xc1, 0xd7, 0x28, 0x41, 0x91, 0x26, 0xd5, 0xc9, 0x56, 0xf4, 0x69, 0x98, 0xb6,
0x83, 0x2e, 0xec, 0x97, 0x3b, 0x13, 0xd1, 0x5f, 0xe2, 0xdf, 0xf5, 0xee, 0xcc, 0x7e, 0x05, 0xfa,
0xb6, 0xf7, 0xdc, 0xaf, 0x73, 0xce, 0x9d, 0x85, 0xf9, 0x6d, 0x51, 0x59, 0xfd, 0xe7, 0xac, 0xac,
0x0a, 0x5b, 0xe0, 0xd4, 0x47, 0x7c, 0x09, 0xd3, 0xad, 0xca, 0xca, 0x54, 0xe3, 0x31, 0x1c, 0xfc,
0x56, 0xe9, 0x4e, 0x9f, 0x04, 0xcf, 0x82, 0x45, 0x20, 0x7c, 0x80, 0xcf, 0x61, 0x6e, 0x93, 0x4c,
0x1b, 0x4b, 0x45, 0x32, 0x33, 0x27, 0x23, 0x4a, 0x8e, 0xc5, 0xac, 0xc3, 0xd6, 0x86, 0xbf, 0x01,
0x76, 0xa9, 0x6e, 0x74, 0xfa, 0x59, 0x25, 0x15, 0x22, 0x4c, 0x72, 0x95, 0xf9, 0x21, 0x4c, 0xb8,
0xef, 0x7e, 0xf2, 0xc8, 0x81, 0x3e, 0xe0, 0x0a, 0x20, 0xa6, 0x29, 0x5b, 0x5d, 0x25, 0xda, 0xe0,
0x0b, 0x98, 0xa6, 0xf5, 0x10, 0x43, 0x9d, 0xe3, 0xc5, 0xec, 0xfc, 0xe8, 0xac, 0xa1, 0xdb, 0x8d,
0x16, 0x4d, 0x01, 0x2e, 0xe0, 0xa1, 0x71, 0x94, 0x6b, 0x36, 0x75, 0x6d, 0xd8, 0xd6, 0x7a, 0x25,
0xa2, 0x4d, 0x73, 0x09, 0x73, 0xd7, 0xbe, 0x56, 0xf6, 0xf6, 0xa7, 0xae, 0xf0, 0x14, 0x26, 0xf6,
0x6f, 0xe9, 0xc9, 0x85, 0xfd, 0x0a, 0x97, 0x8e, 0x29, 0x21, 0x5c, 0xba, 0xd3, 0x30, 0xba, 0x4f,
0xc3, 0x78, 0xa8, 0xe1, 0x5f, 0x00, 0x33, 0xa1, 0xd5, 0x9d, 0xd0, 0xbf, 0x76, 0xe4, 0x07, 0xbe,
0x02, 0x24, 0x57, 0x2a, 0x2b, 0xf7, 0x3c, 0x0b, 0x9c, 0x67, 0x91, 0xcb, 0xc4, 0xbd, 0x71, 0x24,
0x24, 0xd2, 0xf9, 0x9d, 0xbc, 0xc7, 0xdf, 0x90, 0xf0, 0x61, 0xe5, 0x6b, 0x78, 0x94, 0x79, 0x0d,
0x86, 0x08, 0xd4, 0x9a, 0x8f, 0xf7, 0xfc, 0x69, 0x04, 0x8a, 0xae, 0x8a, 0xbf, 0x83, 0xb9, 0x27,
0x66, 0xca, 0x22, 0x37, 0x1a, 0xcf, 0x01, 0xdc, 0x1e, 0xe7, 0x76, 0xe3, 0x31, 0xb6, 0x33, 0xfa,
0x3b, 0x88, 0x41, 0x15, 0xbf, 0x00, 0x74, 0xd3, 0xbf, 0xd6, 0x5a, 0x4d, 0xab, 0xf1, 0x29, 0x80,
0x3b, 0x84, 0x1c, 0xdc, 0x99, 0x39, 0x64, 0x43, 0x00, 0x7f, 0x0b, 0x4f, 0xf6, 0x9a, 0x9a, 0xfd,
0xf4, 0x8e, 0x7c, 0x97, 0x33, 0xce, 0x33, 0x60, 0x62, 0x96, 0xf6, 0xa5, 0xfc, 0x3b, 0x1c, 0x5d,
0xd3, 0xe6, 0xad, 0x55, 0xb6, 0xef, 0x3b, 0x85, 0x30, 0xc9, 0x7f, 0xd0, 0xde, 0xa4, 0xc8, 0x65,
0xa5, 0x6c, 0xfb, 0x3c, 0x0f, 0x3b, 0x54, 0x10, 0x58, 0x93, 0xca, 0x77, 0x99, 0x6c, 0xe4, 0xd5,
0x26, 0x4e, 0x04, 0x23, 0xc4, 0xab, 0x7a, 0xf9, 0x09, 0x58, 0x77, 0x64, 0x64, 0x70, 0xb0, 0xfa,
0x72, 0xbd, 0xbc, 0x8c, 0x1e, 0xe0, 0x21, 0xb0, 0xcd, 0x55, 0x2c, 0x7d, 0x18, 0xe0, 0x63, 0xba,
0xe6, 0xea, 0xc3, 0xea, 0x9b, 0x5c, 0x2f, 0xe3, 0xf7, 0x1f, 0xa3, 0x11, 0xbd, 0x84, 0xd0, 0x03,
0x9b, 0xab, 0x06, 0x1b, 0xdf, 0x4c, 0xdd, 0x0f, 0x74, 0xf1, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x72,
0xd8, 0x59, 0x34, 0x50, 0x03, 0x00, 0x00,
}
| String | identifier_name |
cortex.pb.go | // Code generated by protoc-gen-go.
// source: cortex.proto
// DO NOT EDIT!
/*
Package cortex is a generated protocol buffer package.
It is generated from these files:
cortex.proto
It has these top-level messages:
Sample
LabelPair
TimeSeries
LabelMatcher
ReadRequest
ReadResponse
LabelValuesRequest
LabelValuesResponse
UserStatsResponse
*/
package cortex
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type MatchType int32
const (
MatchType_EQUAL MatchType = 0
MatchType_NOT_EQUAL MatchType = 1
MatchType_REGEX_MATCH MatchType = 2
MatchType_REGEX_NO_MATCH MatchType = 3
)
var MatchType_name = map[int32]string{
0: "EQUAL",
1: "NOT_EQUAL",
2: "REGEX_MATCH",
3: "REGEX_NO_MATCH",
}
var MatchType_value = map[string]int32{
"EQUAL": 0,
"NOT_EQUAL": 1,
"REGEX_MATCH": 2,
"REGEX_NO_MATCH": 3,
}
func (x MatchType) String() string {
return proto.EnumName(MatchType_name, int32(x))
}
func (MatchType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
}
func (m *Sample) Reset() { *m = Sample{} }
func (m *Sample) String() string { return proto.CompactTextString(m) }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type LabelPair struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
type TimeSeries struct {
Labels []*LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty"`
// Sorted by time, oldest sample first.
Samples []*Sample `protobuf:"bytes,2,rep,name=samples" json:"samples,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TimeSeries) GetLabels() []*LabelPair {
if m != nil {
return m.Labels
}
return nil
}
func (m *TimeSeries) GetSamples() []*Sample {
if m != nil {
return m.Samples
}
return nil
}
type LabelMatcher struct {
Type MatchType `protobuf:"varint,1,opt,name=type,enum=cortex.MatchType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type ReadRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs" json:"end_timestamp_ms,omitempty"`
Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ReadRequest) GetMatchers() []*LabelMatcher {
if m != nil {
return m.Matchers
}
return nil
}
type ReadResponse struct {
Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {} | }
return nil
}
type LabelValuesRequest struct {
LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName" json:"label_name,omitempty"`
}
func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} }
func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) }
func (*LabelValuesRequest) ProtoMessage() {}
func (*LabelValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type LabelValuesResponse struct {
LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
}
func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} }
func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) }
func (*LabelValuesResponse) ProtoMessage() {}
func (*LabelValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type UserStatsResponse struct {
IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate" json:"ingestion_rate,omitempty"`
NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries" json:"num_series,omitempty"`
}
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
func (m *UserStatsResponse) String() string { return proto.CompactTextString(m) }
func (*UserStatsResponse) ProtoMessage() {}
func (*UserStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func init() {
proto.RegisterType((*Sample)(nil), "cortex.Sample")
proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair")
proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries")
proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse")
proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest")
proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse")
proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse")
proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
}
func init() { proto.RegisterFile("cortex.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 455 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6b, 0x13, 0x41,
0x14, 0x75, 0x93, 0x34, 0x3a, 0x37, 0xe9, 0xba, 0xbd, 0xf6, 0xa1, 0x2f, 0x82, 0x0e, 0x14, 0xa2,
0x48, 0x91, 0x16, 0xc1, 0xd7, 0x28, 0x41, 0x91, 0x26, 0xd5, 0xc9, 0x56, 0xf4, 0x69, 0x98, 0xb6,
0x83, 0x2e, 0xec, 0x97, 0x3b, 0x13, 0xd1, 0x5f, 0xe2, 0xdf, 0xf5, 0xee, 0xcc, 0x7e, 0x05, 0xfa,
0xb6, 0xf7, 0xdc, 0xaf, 0x73, 0xce, 0x9d, 0x85, 0xf9, 0x6d, 0x51, 0x59, 0xfd, 0xe7, 0xac, 0xac,
0x0a, 0x5b, 0xe0, 0xd4, 0x47, 0x7c, 0x09, 0xd3, 0xad, 0xca, 0xca, 0x54, 0xe3, 0x31, 0x1c, 0xfc,
0x56, 0xe9, 0x4e, 0x9f, 0x04, 0xcf, 0x82, 0x45, 0x20, 0x7c, 0x80, 0xcf, 0x61, 0x6e, 0x93, 0x4c,
0x1b, 0x4b, 0x45, 0x32, 0x33, 0x27, 0x23, 0x4a, 0x8e, 0xc5, 0xac, 0xc3, 0xd6, 0x86, 0xbf, 0x01,
0x76, 0xa9, 0x6e, 0x74, 0xfa, 0x59, 0x25, 0x15, 0x22, 0x4c, 0x72, 0x95, 0xf9, 0x21, 0x4c, 0xb8,
0xef, 0x7e, 0xf2, 0xc8, 0x81, 0x3e, 0xe0, 0x0a, 0x20, 0xa6, 0x29, 0x5b, 0x5d, 0x25, 0xda, 0xe0,
0x0b, 0x98, 0xa6, 0xf5, 0x10, 0x43, 0x9d, 0xe3, 0xc5, 0xec, 0xfc, 0xe8, 0xac, 0xa1, 0xdb, 0x8d,
0x16, 0x4d, 0x01, 0x2e, 0xe0, 0xa1, 0x71, 0x94, 0x6b, 0x36, 0x75, 0x6d, 0xd8, 0xd6, 0x7a, 0x25,
0xa2, 0x4d, 0x73, 0x09, 0x73, 0xd7, 0xbe, 0x56, 0xf6, 0xf6, 0xa7, 0xae, 0xf0, 0x14, 0x26, 0xf6,
0x6f, 0xe9, 0xc9, 0x85, 0xfd, 0x0a, 0x97, 0x8e, 0x29, 0x21, 0x5c, 0xba, 0xd3, 0x30, 0xba, 0x4f,
0xc3, 0x78, 0xa8, 0xe1, 0x5f, 0x00, 0x33, 0xa1, 0xd5, 0x9d, 0xd0, 0xbf, 0x76, 0xe4, 0x07, 0xbe,
0x02, 0x24, 0x57, 0x2a, 0x2b, 0xf7, 0x3c, 0x0b, 0x9c, 0x67, 0x91, 0xcb, 0xc4, 0xbd, 0x71, 0x24,
0x24, 0xd2, 0xf9, 0x9d, 0xbc, 0xc7, 0xdf, 0x90, 0xf0, 0x61, 0xe5, 0x6b, 0x78, 0x94, 0x79, 0x0d,
0x86, 0x08, 0xd4, 0x9a, 0x8f, 0xf7, 0xfc, 0x69, 0x04, 0x8a, 0xae, 0x8a, 0xbf, 0x83, 0xb9, 0x27,
0x66, 0xca, 0x22, 0x37, 0x1a, 0xcf, 0x01, 0xdc, 0x1e, 0xe7, 0x76, 0xe3, 0x31, 0xb6, 0x33, 0xfa,
0x3b, 0x88, 0x41, 0x15, 0xbf, 0x00, 0x74, 0xd3, 0xbf, 0xd6, 0x5a, 0x4d, 0xab, 0xf1, 0x29, 0x80,
0x3b, 0x84, 0x1c, 0xdc, 0x99, 0x39, 0x64, 0x43, 0x00, 0x7f, 0x0b, 0x4f, 0xf6, 0x9a, 0x9a, 0xfd,
0xf4, 0x8e, 0x7c, 0x97, 0x33, 0xce, 0x33, 0x60, 0x62, 0x96, 0xf6, 0xa5, 0xfc, 0x3b, 0x1c, 0x5d,
0xd3, 0xe6, 0xad, 0x55, 0xb6, 0xef, 0x3b, 0x85, 0x30, 0xc9, 0x7f, 0xd0, 0xde, 0xa4, 0xc8, 0x65,
0xa5, 0x6c, 0xfb, 0x3c, 0x0f, 0x3b, 0x54, 0x10, 0x58, 0x93, 0xca, 0x77, 0x99, 0x6c, 0xe4, 0xd5,
0x26, 0x4e, 0x04, 0x23, 0xc4, 0xab, 0x7a, 0xf9, 0x09, 0x58, 0x77, 0x64, 0x64, 0x70, 0xb0, 0xfa,
0x72, 0xbd, 0xbc, 0x8c, 0x1e, 0xe0, 0x21, 0xb0, 0xcd, 0x55, 0x2c, 0x7d, 0x18, 0xe0, 0x63, 0xba,
0xe6, 0xea, 0xc3, 0xea, 0x9b, 0x5c, 0x2f, 0xe3, 0xf7, 0x1f, 0xa3, 0x11, 0xbd, 0x84, 0xd0, 0x03,
0x9b, 0xab, 0x06, 0x1b, 0xdf, 0x4c, 0xdd, 0x0f, 0x74, 0xf1, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x72,
0xd8, 0x59, 0x34, 0x50, 0x03, 0x00, 0x00,
} | func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *ReadResponse) GetTimeseries() []*TimeSeries {
if m != nil {
return m.Timeseries | random_line_split |
cortex.pb.go | // Code generated by protoc-gen-go.
// source: cortex.proto
// DO NOT EDIT!
/*
Package cortex is a generated protocol buffer package.
It is generated from these files:
cortex.proto
It has these top-level messages:
Sample
LabelPair
TimeSeries
LabelMatcher
ReadRequest
ReadResponse
LabelValuesRequest
LabelValuesResponse
UserStatsResponse
*/
package cortex
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type MatchType int32
const (
MatchType_EQUAL MatchType = 0
MatchType_NOT_EQUAL MatchType = 1
MatchType_REGEX_MATCH MatchType = 2
MatchType_REGEX_NO_MATCH MatchType = 3
)
var MatchType_name = map[int32]string{
0: "EQUAL",
1: "NOT_EQUAL",
2: "REGEX_MATCH",
3: "REGEX_NO_MATCH",
}
var MatchType_value = map[string]int32{
"EQUAL": 0,
"NOT_EQUAL": 1,
"REGEX_MATCH": 2,
"REGEX_NO_MATCH": 3,
}
func (x MatchType) String() string {
return proto.EnumName(MatchType_name, int32(x))
}
func (MatchType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
}
func (m *Sample) Reset() { *m = Sample{} }
func (m *Sample) String() string { return proto.CompactTextString(m) }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type LabelPair struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
type TimeSeries struct {
Labels []*LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty"`
// Sorted by time, oldest sample first.
Samples []*Sample `protobuf:"bytes,2,rep,name=samples" json:"samples,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TimeSeries) GetLabels() []*LabelPair {
if m != nil {
return m.Labels
}
return nil
}
func (m *TimeSeries) GetSamples() []*Sample {
if m != nil {
return m.Samples
}
return nil
}
type LabelMatcher struct {
Type MatchType `protobuf:"varint,1,opt,name=type,enum=cortex.MatchType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type ReadRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs" json:"end_timestamp_ms,omitempty"`
Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ReadRequest) GetMatchers() []*LabelMatcher {
if m != nil {
return m.Matchers
}
return nil
}
type ReadResponse struct {
Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *ReadResponse) GetTimeseries() []*TimeSeries {
if m != nil {
return m.Timeseries
}
return nil
}
type LabelValuesRequest struct {
LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName" json:"label_name,omitempty"`
}
func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} }
func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) }
func (*LabelValuesRequest) ProtoMessage() {}
func (*LabelValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type LabelValuesResponse struct {
LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
}
func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} }
func (m *LabelValuesResponse) String() string |
func (*LabelValuesResponse) ProtoMessage() {}
func (*LabelValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type UserStatsResponse struct {
IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate" json:"ingestion_rate,omitempty"`
NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries" json:"num_series,omitempty"`
}
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
func (m *UserStatsResponse) String() string { return proto.CompactTextString(m) }
func (*UserStatsResponse) ProtoMessage() {}
func (*UserStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func init() {
proto.RegisterType((*Sample)(nil), "cortex.Sample")
proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair")
proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries")
proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse")
proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest")
proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse")
proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse")
proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
}
func init() { proto.RegisterFile("cortex.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 455 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6b, 0x13, 0x41,
0x14, 0x75, 0x93, 0x34, 0x3a, 0x37, 0xe9, 0xba, 0xbd, 0xf6, 0xa1, 0x2f, 0x82, 0x0e, 0x14, 0xa2,
0x48, 0x91, 0x16, 0xc1, 0xd7, 0x28, 0x41, 0x91, 0x26, 0xd5, 0xc9, 0x56, 0xf4, 0x69, 0x98, 0xb6,
0x83, 0x2e, 0xec, 0x97, 0x3b, 0x13, 0xd1, 0x5f, 0xe2, 0xdf, 0xf5, 0xee, 0xcc, 0x7e, 0x05, 0xfa,
0xb6, 0xf7, 0xdc, 0xaf, 0x73, 0xce, 0x9d, 0x85, 0xf9, 0x6d, 0x51, 0x59, 0xfd, 0xe7, 0xac, 0xac,
0x0a, 0x5b, 0xe0, 0xd4, 0x47, 0x7c, 0x09, 0xd3, 0xad, 0xca, 0xca, 0x54, 0xe3, 0x31, 0x1c, 0xfc,
0x56, 0xe9, 0x4e, 0x9f, 0x04, 0xcf, 0x82, 0x45, 0x20, 0x7c, 0x80, 0xcf, 0x61, 0x6e, 0x93, 0x4c,
0x1b, 0x4b, 0x45, 0x32, 0x33, 0x27, 0x23, 0x4a, 0x8e, 0xc5, 0xac, 0xc3, 0xd6, 0x86, 0xbf, 0x01,
0x76, 0xa9, 0x6e, 0x74, 0xfa, 0x59, 0x25, 0x15, 0x22, 0x4c, 0x72, 0x95, 0xf9, 0x21, 0x4c, 0xb8,
0xef, 0x7e, 0xf2, 0xc8, 0x81, 0x3e, 0xe0, 0x0a, 0x20, 0xa6, 0x29, 0x5b, 0x5d, 0x25, 0xda, 0xe0,
0x0b, 0x98, 0xa6, 0xf5, 0x10, 0x43, 0x9d, 0xe3, 0xc5, 0xec, 0xfc, 0xe8, 0xac, 0xa1, 0xdb, 0x8d,
0x16, 0x4d, 0x01, 0x2e, 0xe0, 0xa1, 0x71, 0x94, 0x6b, 0x36, 0x75, 0x6d, 0xd8, 0xd6, 0x7a, 0x25,
0xa2, 0x4d, 0x73, 0x09, 0x73, 0xd7, 0xbe, 0x56, 0xf6, 0xf6, 0xa7, 0xae, 0xf0, 0x14, 0x26, 0xf6,
0x6f, 0xe9, 0xc9, 0x85, 0xfd, 0x0a, 0x97, 0x8e, 0x29, 0x21, 0x5c, 0xba, 0xd3, 0x30, 0xba, 0x4f,
0xc3, 0x78, 0xa8, 0xe1, 0x5f, 0x00, 0x33, 0xa1, 0xd5, 0x9d, 0xd0, 0xbf, 0x76, 0xe4, 0x07, 0xbe,
0x02, 0x24, 0x57, 0x2a, 0x2b, 0xf7, 0x3c, 0x0b, 0x9c, 0x67, 0x91, 0xcb, 0xc4, 0xbd, 0x71, 0x24,
0x24, 0xd2, 0xf9, 0x9d, 0xbc, 0xc7, 0xdf, 0x90, 0xf0, 0x61, 0xe5, 0x6b, 0x78, 0x94, 0x79, 0x0d,
0x86, 0x08, 0xd4, 0x9a, 0x8f, 0xf7, 0xfc, 0x69, 0x04, 0x8a, 0xae, 0x8a, 0xbf, 0x83, 0xb9, 0x27,
0x66, 0xca, 0x22, 0x37, 0x1a, 0xcf, 0x01, 0xdc, 0x1e, 0xe7, 0x76, 0xe3, 0x31, 0xb6, 0x33, 0xfa,
0x3b, 0x88, 0x41, 0x15, 0xbf, 0x00, 0x74, 0xd3, 0xbf, 0xd6, 0x5a, 0x4d, 0xab, 0xf1, 0x29, 0x80,
0x3b, 0x84, 0x1c, 0xdc, 0x99, 0x39, 0x64, 0x43, 0x00, 0x7f, 0x0b, 0x4f, 0xf6, 0x9a, 0x9a, 0xfd,
0xf4, 0x8e, 0x7c, 0x97, 0x33, 0xce, 0x33, 0x60, 0x62, 0x96, 0xf6, 0xa5, 0xfc, 0x3b, 0x1c, 0x5d,
0xd3, 0xe6, 0xad, 0x55, 0xb6, 0xef, 0x3b, 0x85, 0x30, 0xc9, 0x7f, 0xd0, 0xde, 0xa4, 0xc8, 0x65,
0xa5, 0x6c, 0xfb, 0x3c, 0x0f, 0x3b, 0x54, 0x10, 0x58, 0x93, 0xca, 0x77, 0x99, 0x6c, 0xe4, 0xd5,
0x26, 0x4e, 0x04, 0x23, 0xc4, 0xab, 0x7a, 0xf9, 0x09, 0x58, 0x77, 0x64, 0x64, 0x70, 0xb0, 0xfa,
0x72, 0xbd, 0xbc, 0x8c, 0x1e, 0xe0, 0x21, 0xb0, 0xcd, 0x55, 0x2c, 0x7d, 0x18, 0xe0, 0x63, 0xba,
0xe6, 0xea, 0xc3, 0xea, 0x9b, 0x5c, 0x2f, 0xe3, 0xf7, 0x1f, 0xa3, 0x11, 0xbd, 0x84, 0xd0, 0x03,
0x9b, 0xab, 0x06, 0x1b, 0xdf, 0x4c, 0xdd, 0x0f, 0x74, 0xf1, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x72,
0xd8, 0x59, 0x34, 0x50, 0x03, 0x00, 0x00,
}
| { return proto.CompactTextString(m) } | identifier_body |
cortex.pb.go | // Code generated by protoc-gen-go.
// source: cortex.proto
// DO NOT EDIT!
/*
Package cortex is a generated protocol buffer package.
It is generated from these files:
cortex.proto
It has these top-level messages:
Sample
LabelPair
TimeSeries
LabelMatcher
ReadRequest
ReadResponse
LabelValuesRequest
LabelValuesResponse
UserStatsResponse
*/
package cortex
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type MatchType int32
const (
MatchType_EQUAL MatchType = 0
MatchType_NOT_EQUAL MatchType = 1
MatchType_REGEX_MATCH MatchType = 2
MatchType_REGEX_NO_MATCH MatchType = 3
)
var MatchType_name = map[int32]string{
0: "EQUAL",
1: "NOT_EQUAL",
2: "REGEX_MATCH",
3: "REGEX_NO_MATCH",
}
var MatchType_value = map[string]int32{
"EQUAL": 0,
"NOT_EQUAL": 1,
"REGEX_MATCH": 2,
"REGEX_NO_MATCH": 3,
}
func (x MatchType) String() string {
return proto.EnumName(MatchType_name, int32(x))
}
func (MatchType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
}
func (m *Sample) Reset() { *m = Sample{} }
func (m *Sample) String() string { return proto.CompactTextString(m) }
func (*Sample) ProtoMessage() {}
func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type LabelPair struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
func (*LabelPair) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
type TimeSeries struct {
Labels []*LabelPair `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty"`
// Sorted by time, oldest sample first.
Samples []*Sample `protobuf:"bytes,2,rep,name=samples" json:"samples,omitempty"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
func (*TimeSeries) ProtoMessage() {}
func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *TimeSeries) GetLabels() []*LabelPair {
if m != nil {
return m.Labels
}
return nil
}
func (m *TimeSeries) GetSamples() []*Sample {
if m != nil {
return m.Samples
}
return nil
}
type LabelMatcher struct {
Type MatchType `protobuf:"varint,1,opt,name=type,enum=cortex.MatchType" json:"type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
Value string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
}
func (m *LabelMatcher) Reset() { *m = LabelMatcher{} }
func (m *LabelMatcher) String() string { return proto.CompactTextString(m) }
func (*LabelMatcher) ProtoMessage() {}
func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type ReadRequest struct {
StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs" json:"start_timestamp_ms,omitempty"`
EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs" json:"end_timestamp_ms,omitempty"`
Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"`
}
func (m *ReadRequest) Reset() { *m = ReadRequest{} }
func (m *ReadRequest) String() string { return proto.CompactTextString(m) }
func (*ReadRequest) ProtoMessage() {}
func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ReadRequest) GetMatchers() []*LabelMatcher {
if m != nil {
return m.Matchers
}
return nil
}
type ReadResponse struct {
Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"`
}
func (m *ReadResponse) Reset() { *m = ReadResponse{} }
func (m *ReadResponse) String() string { return proto.CompactTextString(m) }
func (*ReadResponse) ProtoMessage() {}
func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *ReadResponse) GetTimeseries() []*TimeSeries {
if m != nil |
return nil
}
type LabelValuesRequest struct {
LabelName string `protobuf:"bytes,1,opt,name=label_name,json=labelName" json:"label_name,omitempty"`
}
func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} }
func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) }
func (*LabelValuesRequest) ProtoMessage() {}
func (*LabelValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
type LabelValuesResponse struct {
LabelValues []string `protobuf:"bytes,1,rep,name=label_values,json=labelValues" json:"label_values,omitempty"`
}
func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} }
func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) }
func (*LabelValuesResponse) ProtoMessage() {}
func (*LabelValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
type UserStatsResponse struct {
IngestionRate float64 `protobuf:"fixed64,1,opt,name=ingestion_rate,json=ingestionRate" json:"ingestion_rate,omitempty"`
NumSeries uint64 `protobuf:"varint,2,opt,name=num_series,json=numSeries" json:"num_series,omitempty"`
}
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
func (m *UserStatsResponse) String() string { return proto.CompactTextString(m) }
func (*UserStatsResponse) ProtoMessage() {}
func (*UserStatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func init() {
proto.RegisterType((*Sample)(nil), "cortex.Sample")
proto.RegisterType((*LabelPair)(nil), "cortex.LabelPair")
proto.RegisterType((*TimeSeries)(nil), "cortex.TimeSeries")
proto.RegisterType((*LabelMatcher)(nil), "cortex.LabelMatcher")
proto.RegisterType((*ReadRequest)(nil), "cortex.ReadRequest")
proto.RegisterType((*ReadResponse)(nil), "cortex.ReadResponse")
proto.RegisterType((*LabelValuesRequest)(nil), "cortex.LabelValuesRequest")
proto.RegisterType((*LabelValuesResponse)(nil), "cortex.LabelValuesResponse")
proto.RegisterType((*UserStatsResponse)(nil), "cortex.UserStatsResponse")
proto.RegisterEnum("cortex.MatchType", MatchType_name, MatchType_value)
}
func init() { proto.RegisterFile("cortex.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 455 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x6b, 0x13, 0x41,
0x14, 0x75, 0x93, 0x34, 0x3a, 0x37, 0xe9, 0xba, 0xbd, 0xf6, 0xa1, 0x2f, 0x82, 0x0e, 0x14, 0xa2,
0x48, 0x91, 0x16, 0xc1, 0xd7, 0x28, 0x41, 0x91, 0x26, 0xd5, 0xc9, 0x56, 0xf4, 0x69, 0x98, 0xb6,
0x83, 0x2e, 0xec, 0x97, 0x3b, 0x13, 0xd1, 0x5f, 0xe2, 0xdf, 0xf5, 0xee, 0xcc, 0x7e, 0x05, 0xfa,
0xb6, 0xf7, 0xdc, 0xaf, 0x73, 0xce, 0x9d, 0x85, 0xf9, 0x6d, 0x51, 0x59, 0xfd, 0xe7, 0xac, 0xac,
0x0a, 0x5b, 0xe0, 0xd4, 0x47, 0x7c, 0x09, 0xd3, 0xad, 0xca, 0xca, 0x54, 0xe3, 0x31, 0x1c, 0xfc,
0x56, 0xe9, 0x4e, 0x9f, 0x04, 0xcf, 0x82, 0x45, 0x20, 0x7c, 0x80, 0xcf, 0x61, 0x6e, 0x93, 0x4c,
0x1b, 0x4b, 0x45, 0x32, 0x33, 0x27, 0x23, 0x4a, 0x8e, 0xc5, 0xac, 0xc3, 0xd6, 0x86, 0xbf, 0x01,
0x76, 0xa9, 0x6e, 0x74, 0xfa, 0x59, 0x25, 0x15, 0x22, 0x4c, 0x72, 0x95, 0xf9, 0x21, 0x4c, 0xb8,
0xef, 0x7e, 0xf2, 0xc8, 0x81, 0x3e, 0xe0, 0x0a, 0x20, 0xa6, 0x29, 0x5b, 0x5d, 0x25, 0xda, 0xe0,
0x0b, 0x98, 0xa6, 0xf5, 0x10, 0x43, 0x9d, 0xe3, 0xc5, 0xec, 0xfc, 0xe8, 0xac, 0xa1, 0xdb, 0x8d,
0x16, 0x4d, 0x01, 0x2e, 0xe0, 0xa1, 0x71, 0x94, 0x6b, 0x36, 0x75, 0x6d, 0xd8, 0xd6, 0x7a, 0x25,
0xa2, 0x4d, 0x73, 0x09, 0x73, 0xd7, 0xbe, 0x56, 0xf6, 0xf6, 0xa7, 0xae, 0xf0, 0x14, 0x26, 0xf6,
0x6f, 0xe9, 0xc9, 0x85, 0xfd, 0x0a, 0x97, 0x8e, 0x29, 0x21, 0x5c, 0xba, 0xd3, 0x30, 0xba, 0x4f,
0xc3, 0x78, 0xa8, 0xe1, 0x5f, 0x00, 0x33, 0xa1, 0xd5, 0x9d, 0xd0, 0xbf, 0x76, 0xe4, 0x07, 0xbe,
0x02, 0x24, 0x57, 0x2a, 0x2b, 0xf7, 0x3c, 0x0b, 0x9c, 0x67, 0x91, 0xcb, 0xc4, 0xbd, 0x71, 0x24,
0x24, 0xd2, 0xf9, 0x9d, 0xbc, 0xc7, 0xdf, 0x90, 0xf0, 0x61, 0xe5, 0x6b, 0x78, 0x94, 0x79, 0x0d,
0x86, 0x08, 0xd4, 0x9a, 0x8f, 0xf7, 0xfc, 0x69, 0x04, 0x8a, 0xae, 0x8a, 0xbf, 0x83, 0xb9, 0x27,
0x66, 0xca, 0x22, 0x37, 0x1a, 0xcf, 0x01, 0xdc, 0x1e, 0xe7, 0x76, 0xe3, 0x31, 0xb6, 0x33, 0xfa,
0x3b, 0x88, 0x41, 0x15, 0xbf, 0x00, 0x74, 0xd3, 0xbf, 0xd6, 0x5a, 0x4d, 0xab, 0xf1, 0x29, 0x80,
0x3b, 0x84, 0x1c, 0xdc, 0x99, 0x39, 0x64, 0x43, 0x00, 0x7f, 0x0b, 0x4f, 0xf6, 0x9a, 0x9a, 0xfd,
0xf4, 0x8e, 0x7c, 0x97, 0x33, 0xce, 0x33, 0x60, 0x62, 0x96, 0xf6, 0xa5, 0xfc, 0x3b, 0x1c, 0x5d,
0xd3, 0xe6, 0xad, 0x55, 0xb6, 0xef, 0x3b, 0x85, 0x30, 0xc9, 0x7f, 0xd0, 0xde, 0xa4, 0xc8, 0x65,
0xa5, 0x6c, 0xfb, 0x3c, 0x0f, 0x3b, 0x54, 0x10, 0x58, 0x93, 0xca, 0x77, 0x99, 0x6c, 0xe4, 0xd5,
0x26, 0x4e, 0x04, 0x23, 0xc4, 0xab, 0x7a, 0xf9, 0x09, 0x58, 0x77, 0x64, 0x64, 0x70, 0xb0, 0xfa,
0x72, 0xbd, 0xbc, 0x8c, 0x1e, 0xe0, 0x21, 0xb0, 0xcd, 0x55, 0x2c, 0x7d, 0x18, 0xe0, 0x63, 0xba,
0xe6, 0xea, 0xc3, 0xea, 0x9b, 0x5c, 0x2f, 0xe3, 0xf7, 0x1f, 0xa3, 0x11, 0xbd, 0x84, 0xd0, 0x03,
0x9b, 0xab, 0x06, 0x1b, 0xdf, 0x4c, 0xdd, 0x0f, 0x74, 0xf1, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x72,
0xd8, 0x59, 0x34, 0x50, 0x03, 0x00, 0x00,
}
| {
return m.Timeseries
} | conditional_block |
sss.py | # sss.py
# Commonly used routines to analyse small patterns in isotropic 2-state rules
# Includes giveRLE.py, originally by Nathaniel Johnston
# Includes code from get_all_iso_rules.py, originally by Nathaniel Johnston and Peter Naszvadi
# by Arie Paap, Oct 2017
import itertools
import math
import golly as g
try:
# Avoid xrange argument overflowing type C long on Python2
if xrange(1):
xrange = lambda stop: iter(itertools.count().next, stop)
except NameError:
xrange = range
# Interpret a pattern in sss format
# Return a tuple with corresponding fields
# Format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
def parseshipstr(shipstr):
if (not shipstr) or (not shipstr[0] in '123456789'):
return
ship = shipstr.split(', ')
if not len(ship) == 6:
return
ship[0] = int(ship[0])
ship[1] = ship[1].strip()
ship[2] = int(ship[2])
ship[3] = int(ship[3])
ship[4] = int(ship[4])
ship[5] = ship[5].strip()
return tuple(ship)
# Determine the minimum population, displacement and period of a spaceship
# Input ship is given by an rle string and a separate rule string. If either
# string is empty then use the current pattern / rule (respectively).
# Clears the current layer and leaves the ship in the layer, in a minimum
# population phase which has minimum bounding box area.
# XXX True displacement returned - consider returning 5S canonical displacement.
# XXX Might be better to shift choice of phase to canon5Sship() which also sets
# the minimum isotropic rule and adjusts orientation to 5S project standard.
# XXX Only works in rules with 2 states.
# --------------------------------------------------------------------
def testShip(rlepatt, rule, maxgen = 2000):
# Clear the layer and place the ship
r = g.getrect()
if rlepatt:
patt = g.parse(rlepatt)
# If rlepatt is in a multistate representation then patt will be
# a multistate cell list. testShip() only works for ships in two
# state rules, so convert to two state cell list.
if (len(patt)%2):
# This assumes all cells have non-zero state - which is reasonable
# for the results of g.parse()
patt = [ patt[i] for j, i in enumerate(patt[:-1]) if (j+1)%3 ]
else:
# Use the current pattern
if not r:
return (0, tuple())
patt = g.getcells(r)
patt = g.transform(patt, -r[0], -r[1])
# g.note(str((rlepatt, rule)))
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
# g.note(str(len(patt)) + ", " + str(patt))
# rlepatt might be different to the rle representation determined by
# giveRLE(), so ensure we have the correct representation
testrle = giveRLE(patt)
if rule:
g.setrule(rule)
speed = ()
startpop = int(g.getpop())
bbox = g.getrect()
minpop = startpop
minbboxarea = bbox[2]*bbox[3]
mingen = 0
# Keep track of the total bbox
maxx = bbox[2]
maxy = bbox[3]
maxpop = startpop
# Ignore ship if rule is not a 2-state rule
if not g.numstates()==2:
return (minpop, speed)
for ii in xrange(maxgen):
g.run(1)
r = g.getrect()
if not r:
# Pattern has died out and is therefore not a ship
mingen = 0
break
pop = int(g.getpop())
bboxarea = r[2]*r[3]
if pop < minpop:
# Find phase with minimimum population
minpop = pop
minbboxarea = r[2]*r[3]
mingen = ii+1
elif pop == minpop:
# Amongst phases with min pop, find one with minimum bbox area
# bboxarea = r[2]*r[3]
if bboxarea < minbboxarea:
minbboxarea = bboxarea
mingen = ii+1
# Track the bounding box of the pattern's evolution
maxx = max(maxx, r[2])
maxy = max(maxy, r[3])
maxpop = max(maxpop, pop)
if (pop == startpop and r[2:4] == bbox[2:4]):
if (giveRLE(g.getcells(r)) == testrle):
# Starting ship has reappeared
speed = (r[0]-bbox[0], r[1]-bbox[1], ii+1) # displacement and period
break
# Check for rotated pattern
elif (pop == startpop and r[2:4] == bbox[3:1:-1]):
# For 2-cell oscillators this is sufficient
if minpop == 2:
speed = (0, 0, 2*(ii+1))
mingen = ii+1
break
g.run(mingen) # Evolve ship to generation with minimum population
# return (minpop, speed)
# return (minpop, speed, maxpop)
return (minpop, speed, maxx*maxy)
# --------------------------------------------------------------------
# Return the minimum and maximum of the absolute value of a list of numbers
def minmaxofabs(v):
v = [abs(x) for x in v]
return min(v), max(v)
# Define a sign function
sign = lambda x: int(math.copysign(1, x))
# Find the canonical pattern for a sss format ship
# This is determined by orienting the ship so that it travels E, SE, or ESE,
# setting the rule to the minimal isotropic rule which supports the ship, and
# choosing a minimal bounding box phase from all phases with minimal population
# Input ship is in sss format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
# XXX Two cases where the resulting pattern is not guaranteed to be canonical:
# - asymmetrical ships travelling orthogonally or diagonally (either one of
# the two orientations in the canonical direction may be returned)
# - multiple phases having the minimal population and bounding box area
def canon5Sship(ship, maxgen=2000):
minpop, rulestr, dx, dy, period, shiprle = ship
shipPatt = g.parse(shiprle)
# Transform ship to canonical direction
if abs(dx) >= abs(dy):
a, b, c, d = sign(dx), 0, 0, sign(dy)
else:
a, b, c, d = 0, sign(dy), sign(dx), 0
dy, dx = minmaxofabs((dx, dy))
shipPatt = g.transform(shipPatt, 0, 0, a, b, c, d)
# Clear the layer and place the ship
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(shipPatt)
shiprle = giveRLE(g.getcells(g.getrect()))
g.setrule(rulestr)
# Determine the minimal isotropic rule
setminisorule(period)
return minpop, g.getrule(), dx, dy, period, shiprle
# Python function to convert a cell list to RLE
# Author: Nathaniel Johnston (nathaniel@nathanieljohnston.com), June 2009.
# DMG: Refactored slightly so that the function input is a simple cell list.
# No error checking added.
# TBD: check for multistate rule, show appropriate warning.
# AJP: Replace g.evolve(clist,0) with Python sort (faster for small patterns)
# --------------------------------------------------------------------
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def giveRLE(clist):
# clist_chunks = list (chunks (g.evolve(clist,0), 2))
clist_chunks = list(chunks(clist, 2))
clist_chunks.sort(key=lambda l:(l[1], l[0]))
mcc = min(clist_chunks)
rl_list = [[x[0]-mcc[0],x[1]-mcc[1]] for x in clist_chunks]
rle_res = ""
rle_len = 1
rl_y = rl_list[0][1] - 1
rl_x = 0
for rl_i in rl_list:
if rl_i[1] == rl_y:
if rl_i[0] == rl_x + 1:
| else: rle_strB = str (rl_i[0] - rl_x - 1)
rle_res = rle_res + rle_strA + "o" + rle_strB + "b"
rle_len = 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[1] - rl_y == 1: rle_strB = ""
else: rle_strB = str (rl_i[1] - rl_y)
if rl_i[0] == 1: rle_strC = "b"
elif rl_i[0] == 0: rle_strC = ""
else: rle_strC = str (rl_i[0]) + "b"
rle_res = rle_res + rle_strA + "o" + rle_strB + "$" + rle_strC
rle_len = 1
rl_x = rl_i[0]
rl_y = rl_i[1]
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
rle_res = rle_res[2:] + rle_strA + "o"
return rle_res+"!"
# --------------------------------------------------------------------
# Isotropic rule range functions
# Based on the rule computation scripts by Nathaniel Johnston and Peter Naszvadi
# Functions:
# - parseTransitions:
# Interpret the totalistic and isotropic rule elements as a list of isotropic transitions
# - rulestringopt:
# Cleanup a rulestring. Only used when rulestring will be displayed
# - getRuleRangeElems:
# Determines the minimum and maximum isotropic rules in which a pattern's
# evolution remains unchanged for a given number of generations.
# Returns the required and allowed isotropic rule transitions in four lists.
# Optionally compute only the minimum or the maximum rule.
# --------------------------------------------------------------------
Hensel = [
['0'],
['1c', '1e'],
['2a', '2c', '2e', '2i', '2k', '2n'],
['3a', '3c', '3e', '3i', '3j', '3k', '3n', '3q', '3r', '3y'],
['4a', '4c', '4e', '4i', '4j', '4k', '4n', '4q', '4r', '4t', '4w', '4y', '4z'],
['5a', '5c', '5e', '5i', '5j', '5k', '5n', '5q', '5r', '5y'],
['6a', '6c', '6e', '6i', '6k', '6n'],
['7c', '7e'],
['8']
]
def parseTransitions(ruleTrans):
ruleElem = []
if not ruleTrans:
return ruleElem
context = ruleTrans[0]
bNonTot = False
bNegate = False
for ch in ruleTrans[1:] + '9':
if ch in '0123456789':
if not bNonTot:
ruleElem += Hensel[int(context)]
context = ch
bNonTot = False
bNegate = False
elif ch == '-':
bNegate = True
ruleElem += Hensel[int(context)]
else:
bNonTot = True
if bNegate:
ruleElem.remove(context + ch)
else:
ruleElem.append(context + ch)
return ruleElem
def rulestringopt(a):
result = ''
context = ''
lastnum = ''
lastcontext = ''
for i in a:
if i in 'BS':
context = i
result += i
elif i in '012345678':
if (i == lastnum) and (lastcontext == context):
pass
else:
lastcontext = context
lastnum = i
result += i
else:
result += i
result = result.replace('4aceijknqrtwyz', '4')
result = result.replace('3aceijknqry', '3')
result = result.replace('5aceijknqry', '5')
result = result.replace('2aceikn', '2')
result = result.replace('6aceikn', '6')
result = result.replace('1ce', '1')
result = result.replace('7ce', '7')
return result
def getRuleRangeElems(period, ruleRange = 'minmax'):
if g.empty():
return
if period < 1:
return
rule = g.getrule().split(':')[0]
if not (rule[0] == 'B' and '/S' in rule):
g.exit('Please set Golly to an isotropic 2-state rule.')
# Parse rule string to list of transitions for Birth and Survival
oldrule = rule
Bstr, Sstr = rule.split('/')
Bstr = Bstr.lstrip('B')
Sstr = Sstr.lstrip('S')
b_need = parseTransitions(Bstr)
b_OK = list(b_need)
s_need = parseTransitions(Sstr)
s_OK = list(s_need)
patt = g.getcells(g.getrect())
# Record behavior of pattern in current rule
clist = []
poplist = []
for i in range(0,period):
g.run(1)
clist.append(g.getcells(g.getrect()))
poplist.append(g.getpop())
finalpop = g.getpop()
if 'min' in ruleRange:
# Test all rule transitions to determine if they are required
for t in b_OK:
b_need.remove(t)
g.setrule('B' + ''.join(b_need) + '/S' + Sstr)
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
b_need.append(t)
break
except:
b_need.append(t)
break
b_need.sort()
for t in s_OK:
s_need.remove(t)
g.setrule('B' + Bstr + '/S' + ''.join(s_need))
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
s_need.append(t)
break
except:
s_need.append(t)
break
s_need.sort()
if 'max' in ruleRange:
# Test unused rule transitions to determine if they are allowed
allRuleElem = [t for l in Hensel for t in l]
for t in allRuleElem:
if t in b_OK:
continue
b_OK.append(t)
g.setrule('B' + ''.join(b_OK) + '/S' + Sstr)
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
b_OK.remove(t)
break
except:
b_OK.remove(t)
break
b_OK.sort()
for t in allRuleElem:
if t in s_OK:
continue
s_OK.append(t)
g.setrule('B' + Bstr + '/S' + ''.join(s_OK))
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
s_OK.remove(t)
break
except:
s_OK.remove(t)
break
s_OK.sort()
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
g.setrule(oldrule)
return b_need, s_need, b_OK, s_OK
def setminisorule(period):
if g.empty():
return
if period < 1:
return
b_need, s_need, b_OK, s_OK = getRuleRangeElems(period, ruleRange = 'min')
minrulestr = 'B' + ''.join(sorted(b_need)) + '/S' + ''.join(sorted(s_need))
g.setrule(minrulestr)
return minrulestr
# --------------------------------------------------------------------
# Generator for random order rule iterator over a given rulespace
# Uses a linear congruential generator to iterate over all the rules
# in the given rulespace in a pseudo random order
# The rule space is specified by four lists:
# B_need - the required Birth transitions
# S_need - the required Survival transitions
# B_OK - the optional Birth transitions
# S_OK - the optional Survival transitions
# Provide a value to seed to specify the starting point of the generator
# seed < 2^(len(B_OK) + len(S_OK))
# --------------------------------------------------------------------
def iterRuleStr(B_OK, S_OK, B_need=[], S_need=[], seed=1):
# Pseudo-random rule index generator using an LCG
def randRuleIdx(nB_OK, nS_OK, seed=1):
# LCG state initialisation
m = 2**(nB_OK + nS_OK)
c = 7
a = 5
# Reduce collisions for small seed values
for _ in range(3):
seed = (a*seed+c) % m
# Masks for birth and survival transitions
maskS = 2**nS_OK - 1
maskB = (2**nB_OK - 1) << nS_OK
for ii in xrange(m):
seed = (a*seed+c) % m
randS = seed & maskS
randB = (seed & maskB) >> nS_OK
yield (randB, randS)
# Transition String retrieval
def getTransStr(tList, idx):
trans = ''
for t in tList:
if (idx & 1):
trans += t
idx = idx >> 1
return trans
Bstr = 'B' + ''.join(B_need)
Sstr = '/S' + ''.join(S_need)
for (Bidx, Sidx) in randRuleIdx(len(B_OK), len(S_OK), seed):
rulestr = Bstr + getTransStr(B_OK, Bidx) + Sstr + getTransStr(S_OK, Sidx)
yield rulestr
# -------------------------------------------------------------------- | rle_len += 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[0] - rl_x - 1 == 1: rle_strB = ""
| random_line_split |
sss.py | # sss.py
# Commonly used routines to analyse small patterns in isotropic 2-state rules
# Includes giveRLE.py, originally by Nathaniel Johnston
# Includes code from get_all_iso_rules.py, originally by Nathaniel Johnston and Peter Naszvadi
# by Arie Paap, Oct 2017
import itertools
import math
import golly as g
try:
# Avoid xrange argument overflowing type C long on Python2
if xrange(1):
xrange = lambda stop: iter(itertools.count().next, stop)
except NameError:
xrange = range
# Interpret a pattern in sss format
# Return a tuple with corresponding fields
# Format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
def parseshipstr(shipstr):
if (not shipstr) or (not shipstr[0] in '123456789'):
return
ship = shipstr.split(', ')
if not len(ship) == 6:
return
ship[0] = int(ship[0])
ship[1] = ship[1].strip()
ship[2] = int(ship[2])
ship[3] = int(ship[3])
ship[4] = int(ship[4])
ship[5] = ship[5].strip()
return tuple(ship)
# Determine the minimum population, displacement and period of a spaceship
# Input ship is given by an rle string and a separate rule string. If either
# string is empty then use the current pattern / rule (respectively).
# Clears the current layer and leaves the ship in the layer, in a minimum
# population phase which has minimum bounding box area.
# XXX True displacement returned - consider returning 5S canonical displacement.
# XXX Might be better to shift choice of phase to canon5Sship() which also sets
# the minimum isotropic rule and adjusts orientation to 5S project standard.
# XXX Only works in rules with 2 states.
# --------------------------------------------------------------------
def testShip(rlepatt, rule, maxgen = 2000):
# Clear the layer and place the ship
r = g.getrect()
if rlepatt:
patt = g.parse(rlepatt)
# If rlepatt is in a multistate representation then patt will be
# a multistate cell list. testShip() only works for ships in two
# state rules, so convert to two state cell list.
if (len(patt)%2):
# This assumes all cells have non-zero state - which is reasonable
# for the results of g.parse()
patt = [ patt[i] for j, i in enumerate(patt[:-1]) if (j+1)%3 ]
else:
# Use the current pattern
if not r:
return (0, tuple())
patt = g.getcells(r)
patt = g.transform(patt, -r[0], -r[1])
# g.note(str((rlepatt, rule)))
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
# g.note(str(len(patt)) + ", " + str(patt))
# rlepatt might be different to the rle representation determined by
# giveRLE(), so ensure we have the correct representation
testrle = giveRLE(patt)
if rule:
g.setrule(rule)
speed = ()
startpop = int(g.getpop())
bbox = g.getrect()
minpop = startpop
minbboxarea = bbox[2]*bbox[3]
mingen = 0
# Keep track of the total bbox
maxx = bbox[2]
maxy = bbox[3]
maxpop = startpop
# Ignore ship if rule is not a 2-state rule
if not g.numstates()==2:
return (minpop, speed)
for ii in xrange(maxgen):
g.run(1)
r = g.getrect()
if not r:
# Pattern has died out and is therefore not a ship
mingen = 0
break
pop = int(g.getpop())
bboxarea = r[2]*r[3]
if pop < minpop:
# Find phase with minimimum population
minpop = pop
minbboxarea = r[2]*r[3]
mingen = ii+1
elif pop == minpop:
# Amongst phases with min pop, find one with minimum bbox area
# bboxarea = r[2]*r[3]
if bboxarea < minbboxarea:
minbboxarea = bboxarea
mingen = ii+1
# Track the bounding box of the pattern's evolution
maxx = max(maxx, r[2])
maxy = max(maxy, r[3])
maxpop = max(maxpop, pop)
if (pop == startpop and r[2:4] == bbox[2:4]):
if (giveRLE(g.getcells(r)) == testrle):
# Starting ship has reappeared
speed = (r[0]-bbox[0], r[1]-bbox[1], ii+1) # displacement and period
break
# Check for rotated pattern
elif (pop == startpop and r[2:4] == bbox[3:1:-1]):
# For 2-cell oscillators this is sufficient
if minpop == 2:
speed = (0, 0, 2*(ii+1))
mingen = ii+1
break
g.run(mingen) # Evolve ship to generation with minimum population
# return (minpop, speed)
# return (minpop, speed, maxpop)
return (minpop, speed, maxx*maxy)
# --------------------------------------------------------------------
# Return the minimum and maximum of the absolute value of a list of numbers
def minmaxofabs(v):
v = [abs(x) for x in v]
return min(v), max(v)
# Define a sign function
sign = lambda x: int(math.copysign(1, x))
# Find the canonical pattern for a sss format ship
# This is determined by orienting the ship so that it travels E, SE, or ESE,
# setting the rule to the minimal isotropic rule which supports the ship, and
# choosing a minimal bounding box phase from all phases with minimal population
# Input ship is in sss format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
# XXX Two cases where the resulting pattern is not guaranteed to be canonical:
# - asymmetrical ships travelling orthogonally or diagonally (either one of
# the two orientations in the canonical direction may be returned)
# - multiple phases having the minimal population and bounding box area
def canon5Sship(ship, maxgen=2000):
minpop, rulestr, dx, dy, period, shiprle = ship
shipPatt = g.parse(shiprle)
# Transform ship to canonical direction
if abs(dx) >= abs(dy):
a, b, c, d = sign(dx), 0, 0, sign(dy)
else:
a, b, c, d = 0, sign(dy), sign(dx), 0
dy, dx = minmaxofabs((dx, dy))
shipPatt = g.transform(shipPatt, 0, 0, a, b, c, d)
# Clear the layer and place the ship
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(shipPatt)
shiprle = giveRLE(g.getcells(g.getrect()))
g.setrule(rulestr)
# Determine the minimal isotropic rule
setminisorule(period)
return minpop, g.getrule(), dx, dy, period, shiprle
# Python function to convert a cell list to RLE
# Author: Nathaniel Johnston (nathaniel@nathanieljohnston.com), June 2009.
# DMG: Refactored slightly so that the function input is a simple cell list.
# No error checking added.
# TBD: check for multistate rule, show appropriate warning.
# AJP: Replace g.evolve(clist,0) with Python sort (faster for small patterns)
# --------------------------------------------------------------------
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def giveRLE(clist):
# clist_chunks = list (chunks (g.evolve(clist,0), 2))
clist_chunks = list(chunks(clist, 2))
clist_chunks.sort(key=lambda l:(l[1], l[0]))
mcc = min(clist_chunks)
rl_list = [[x[0]-mcc[0],x[1]-mcc[1]] for x in clist_chunks]
rle_res = ""
rle_len = 1
rl_y = rl_list[0][1] - 1
rl_x = 0
for rl_i in rl_list:
if rl_i[1] == rl_y:
if rl_i[0] == rl_x + 1:
rle_len += 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[0] - rl_x - 1 == 1: rle_strB = ""
else: rle_strB = str (rl_i[0] - rl_x - 1)
rle_res = rle_res + rle_strA + "o" + rle_strB + "b"
rle_len = 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[1] - rl_y == 1: rle_strB = ""
else: rle_strB = str (rl_i[1] - rl_y)
if rl_i[0] == 1: rle_strC = "b"
elif rl_i[0] == 0: rle_strC = ""
else: rle_strC = str (rl_i[0]) + "b"
rle_res = rle_res + rle_strA + "o" + rle_strB + "$" + rle_strC
rle_len = 1
rl_x = rl_i[0]
rl_y = rl_i[1]
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
rle_res = rle_res[2:] + rle_strA + "o"
return rle_res+"!"
# --------------------------------------------------------------------
# Isotropic rule range functions
# Based on the rule computation scripts by Nathaniel Johnston and Peter Naszvadi
# Functions:
# - parseTransitions:
# Interpret the totalistic and isotropic rule elements as a list of isotropic transitions
# - rulestringopt:
# Cleanup a rulestring. Only used when rulestring will be displayed
# - getRuleRangeElems:
# Determines the minimum and maximum isotropic rules in which a pattern's
# evolution remains unchanged for a given number of generations.
# Returns the required and allowed isotropic rule transitions in four lists.
# Optionally compute only the minimum or the maximum rule.
# --------------------------------------------------------------------
Hensel = [
['0'],
['1c', '1e'],
['2a', '2c', '2e', '2i', '2k', '2n'],
['3a', '3c', '3e', '3i', '3j', '3k', '3n', '3q', '3r', '3y'],
['4a', '4c', '4e', '4i', '4j', '4k', '4n', '4q', '4r', '4t', '4w', '4y', '4z'],
['5a', '5c', '5e', '5i', '5j', '5k', '5n', '5q', '5r', '5y'],
['6a', '6c', '6e', '6i', '6k', '6n'],
['7c', '7e'],
['8']
]
def parseTransitions(ruleTrans):
ruleElem = []
if not ruleTrans:
return ruleElem
context = ruleTrans[0]
bNonTot = False
bNegate = False
for ch in ruleTrans[1:] + '9':
if ch in '0123456789':
if not bNonTot:
ruleElem += Hensel[int(context)]
context = ch
bNonTot = False
bNegate = False
elif ch == '-':
bNegate = True
ruleElem += Hensel[int(context)]
else:
bNonTot = True
if bNegate:
ruleElem.remove(context + ch)
else:
ruleElem.append(context + ch)
return ruleElem
def rulestringopt(a):
result = ''
context = ''
lastnum = ''
lastcontext = ''
for i in a:
if i in 'BS':
context = i
result += i
elif i in '012345678':
if (i == lastnum) and (lastcontext == context):
pass
else:
lastcontext = context
lastnum = i
result += i
else:
result += i
result = result.replace('4aceijknqrtwyz', '4')
result = result.replace('3aceijknqry', '3')
result = result.replace('5aceijknqry', '5')
result = result.replace('2aceikn', '2')
result = result.replace('6aceikn', '6')
result = result.replace('1ce', '1')
result = result.replace('7ce', '7')
return result
def getRuleRangeElems(period, ruleRange = 'minmax'):
|
def setminisorule(period):
if g.empty():
return
if period < 1:
return
b_need, s_need, b_OK, s_OK = getRuleRangeElems(period, ruleRange = 'min')
minrulestr = 'B' + ''.join(sorted(b_need)) + '/S' + ''.join(sorted(s_need))
g.setrule(minrulestr)
return minrulestr
# --------------------------------------------------------------------
# Generator for random order rule iterator over a given rulespace
# Uses a linear congruential generator to iterate over all the rules
# in the given rulespace in a pseudo random order
# The rule space is specified by four lists:
# B_need - the required Birth transitions
# S_need - the required Survival transitions
# B_OK - the optional Birth transitions
# S_OK - the optional Survival transitions
# Provide a value to seed to specify the starting point of the generator
# seed < 2^(len(B_OK) + len(S_OK))
# --------------------------------------------------------------------
def iterRuleStr(B_OK, S_OK, B_need=[], S_need=[], seed=1):
# Pseudo-random rule index generator using an LCG
def randRuleIdx(nB_OK, nS_OK, seed=1):
# LCG state initialisation
m = 2**(nB_OK + nS_OK)
c = 7
a = 5
# Reduce collisions for small seed values
for _ in range(3):
seed = (a*seed+c) % m
# Masks for birth and survival transitions
maskS = 2**nS_OK - 1
maskB = (2**nB_OK - 1) << nS_OK
for ii in xrange(m):
seed = (a*seed+c) % m
randS = seed & maskS
randB = (seed & maskB) >> nS_OK
yield (randB, randS)
# Transition String retrieval
def getTransStr(tList, idx):
trans = ''
for t in tList:
if (idx & 1):
trans += t
idx = idx >> 1
return trans
Bstr = 'B' + ''.join(B_need)
Sstr = '/S' + ''.join(S_need)
for (Bidx, Sidx) in randRuleIdx(len(B_OK), len(S_OK), seed):
rulestr = Bstr + getTransStr(B_OK, Bidx) + Sstr + getTransStr(S_OK, Sidx)
yield rulestr
# --------------------------------------------------------------------
| if g.empty():
return
if period < 1:
return
rule = g.getrule().split(':')[0]
if not (rule[0] == 'B' and '/S' in rule):
g.exit('Please set Golly to an isotropic 2-state rule.')
# Parse rule string to list of transitions for Birth and Survival
oldrule = rule
Bstr, Sstr = rule.split('/')
Bstr = Bstr.lstrip('B')
Sstr = Sstr.lstrip('S')
b_need = parseTransitions(Bstr)
b_OK = list(b_need)
s_need = parseTransitions(Sstr)
s_OK = list(s_need)
patt = g.getcells(g.getrect())
# Record behavior of pattern in current rule
clist = []
poplist = []
for i in range(0,period):
g.run(1)
clist.append(g.getcells(g.getrect()))
poplist.append(g.getpop())
finalpop = g.getpop()
if 'min' in ruleRange:
# Test all rule transitions to determine if they are required
for t in b_OK:
b_need.remove(t)
g.setrule('B' + ''.join(b_need) + '/S' + Sstr)
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
b_need.append(t)
break
except:
b_need.append(t)
break
b_need.sort()
for t in s_OK:
s_need.remove(t)
g.setrule('B' + Bstr + '/S' + ''.join(s_need))
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
s_need.append(t)
break
except:
s_need.append(t)
break
s_need.sort()
if 'max' in ruleRange:
# Test unused rule transitions to determine if they are allowed
allRuleElem = [t for l in Hensel for t in l]
for t in allRuleElem:
if t in b_OK:
continue
b_OK.append(t)
g.setrule('B' + ''.join(b_OK) + '/S' + Sstr)
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
b_OK.remove(t)
break
except:
b_OK.remove(t)
break
b_OK.sort()
for t in allRuleElem:
if t in s_OK:
continue
s_OK.append(t)
g.setrule('B' + Bstr + '/S' + ''.join(s_OK))
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
s_OK.remove(t)
break
except:
s_OK.remove(t)
break
s_OK.sort()
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
g.setrule(oldrule)
return b_need, s_need, b_OK, s_OK | identifier_body |
sss.py | # sss.py
# Commonly used routines to analyse small patterns in isotropic 2-state rules
# Includes giveRLE.py, originally by Nathaniel Johnston
# Includes code from get_all_iso_rules.py, originally by Nathaniel Johnston and Peter Naszvadi
# by Arie Paap, Oct 2017
import itertools
import math
import golly as g
try:
# Avoid xrange argument overflowing type C long on Python2
if xrange(1):
xrange = lambda stop: iter(itertools.count().next, stop)
except NameError:
xrange = range
# Interpret a pattern in sss format
# Return a tuple with corresponding fields
# Format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
def parseshipstr(shipstr):
if (not shipstr) or (not shipstr[0] in '123456789'):
return
ship = shipstr.split(', ')
if not len(ship) == 6:
return
ship[0] = int(ship[0])
ship[1] = ship[1].strip()
ship[2] = int(ship[2])
ship[3] = int(ship[3])
ship[4] = int(ship[4])
ship[5] = ship[5].strip()
return tuple(ship)
# Determine the minimum population, displacement and period of a spaceship
# Input ship is given by an rle string and a separate rule string. If either
# string is empty then use the current pattern / rule (respectively).
# Clears the current layer and leaves the ship in the layer, in a minimum
# population phase which has minimum bounding box area.
# XXX True displacement returned - consider returning 5S canonical displacement.
# XXX Might be better to shift choice of phase to canon5Sship() which also sets
# the minimum isotropic rule and adjusts orientation to 5S project standard.
# XXX Only works in rules with 2 states.
# --------------------------------------------------------------------
def testShip(rlepatt, rule, maxgen = 2000):
# Clear the layer and place the ship
r = g.getrect()
if rlepatt:
patt = g.parse(rlepatt)
# If rlepatt is in a multistate representation then patt will be
# a multistate cell list. testShip() only works for ships in two
# state rules, so convert to two state cell list.
if (len(patt)%2):
# This assumes all cells have non-zero state - which is reasonable
# for the results of g.parse()
patt = [ patt[i] for j, i in enumerate(patt[:-1]) if (j+1)%3 ]
else:
# Use the current pattern
if not r:
return (0, tuple())
patt = g.getcells(r)
patt = g.transform(patt, -r[0], -r[1])
# g.note(str((rlepatt, rule)))
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
# g.note(str(len(patt)) + ", " + str(patt))
# rlepatt might be different to the rle representation determined by
# giveRLE(), so ensure we have the correct representation
testrle = giveRLE(patt)
if rule:
g.setrule(rule)
speed = ()
startpop = int(g.getpop())
bbox = g.getrect()
minpop = startpop
minbboxarea = bbox[2]*bbox[3]
mingen = 0
# Keep track of the total bbox
maxx = bbox[2]
maxy = bbox[3]
maxpop = startpop
# Ignore ship if rule is not a 2-state rule
if not g.numstates()==2:
return (minpop, speed)
for ii in xrange(maxgen):
g.run(1)
r = g.getrect()
if not r:
# Pattern has died out and is therefore not a ship
mingen = 0
break
pop = int(g.getpop())
bboxarea = r[2]*r[3]
if pop < minpop:
# Find phase with minimimum population
minpop = pop
minbboxarea = r[2]*r[3]
mingen = ii+1
elif pop == minpop:
# Amongst phases with min pop, find one with minimum bbox area
# bboxarea = r[2]*r[3]
if bboxarea < minbboxarea:
minbboxarea = bboxarea
mingen = ii+1
# Track the bounding box of the pattern's evolution
maxx = max(maxx, r[2])
maxy = max(maxy, r[3])
maxpop = max(maxpop, pop)
if (pop == startpop and r[2:4] == bbox[2:4]):
if (giveRLE(g.getcells(r)) == testrle):
# Starting ship has reappeared
speed = (r[0]-bbox[0], r[1]-bbox[1], ii+1) # displacement and period
break
# Check for rotated pattern
elif (pop == startpop and r[2:4] == bbox[3:1:-1]):
# For 2-cell oscillators this is sufficient
if minpop == 2:
speed = (0, 0, 2*(ii+1))
mingen = ii+1
break
g.run(mingen) # Evolve ship to generation with minimum population
# return (minpop, speed)
# return (minpop, speed, maxpop)
return (minpop, speed, maxx*maxy)
# --------------------------------------------------------------------
# Return the minimum and maximum of the absolute value of a list of numbers
def minmaxofabs(v):
v = [abs(x) for x in v]
return min(v), max(v)
# Define a sign function
sign = lambda x: int(math.copysign(1, x))
# Find the canonical pattern for a sss format ship
# This is determined by orienting the ship so that it travels E, SE, or ESE,
# setting the rule to the minimal isotropic rule which supports the ship, and
# choosing a minimal bounding box phase from all phases with minimal population
# Input ship is in sss format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
# XXX Two cases where the resulting pattern is not guaranteed to be canonical:
# - asymmetrical ships travelling orthogonally or diagonally (either one of
# the two orientations in the canonical direction may be returned)
# - multiple phases having the minimal population and bounding box area
def canon5Sship(ship, maxgen=2000):
minpop, rulestr, dx, dy, period, shiprle = ship
shipPatt = g.parse(shiprle)
# Transform ship to canonical direction
if abs(dx) >= abs(dy):
a, b, c, d = sign(dx), 0, 0, sign(dy)
else:
a, b, c, d = 0, sign(dy), sign(dx), 0
dy, dx = minmaxofabs((dx, dy))
shipPatt = g.transform(shipPatt, 0, 0, a, b, c, d)
# Clear the layer and place the ship
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(shipPatt)
shiprle = giveRLE(g.getcells(g.getrect()))
g.setrule(rulestr)
# Determine the minimal isotropic rule
setminisorule(period)
return minpop, g.getrule(), dx, dy, period, shiprle
# Python function to convert a cell list to RLE
# Author: Nathaniel Johnston (nathaniel@nathanieljohnston.com), June 2009.
# DMG: Refactored slightly so that the function input is a simple cell list.
# No error checking added.
# TBD: check for multistate rule, show appropriate warning.
# AJP: Replace g.evolve(clist,0) with Python sort (faster for small patterns)
# --------------------------------------------------------------------
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def giveRLE(clist):
# clist_chunks = list (chunks (g.evolve(clist,0), 2))
clist_chunks = list(chunks(clist, 2))
clist_chunks.sort(key=lambda l:(l[1], l[0]))
mcc = min(clist_chunks)
rl_list = [[x[0]-mcc[0],x[1]-mcc[1]] for x in clist_chunks]
rle_res = ""
rle_len = 1
rl_y = rl_list[0][1] - 1
rl_x = 0
for rl_i in rl_list:
if rl_i[1] == rl_y:
if rl_i[0] == rl_x + 1:
rle_len += 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[0] - rl_x - 1 == 1: rle_strB = ""
else: rle_strB = str (rl_i[0] - rl_x - 1)
rle_res = rle_res + rle_strA + "o" + rle_strB + "b"
rle_len = 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[1] - rl_y == 1: |
else: rle_strB = str (rl_i[1] - rl_y)
if rl_i[0] == 1: rle_strC = "b"
elif rl_i[0] == 0: rle_strC = ""
else: rle_strC = str (rl_i[0]) + "b"
rle_res = rle_res + rle_strA + "o" + rle_strB + "$" + rle_strC
rle_len = 1
rl_x = rl_i[0]
rl_y = rl_i[1]
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
rle_res = rle_res[2:] + rle_strA + "o"
return rle_res+"!"
# --------------------------------------------------------------------
# Isotropic rule range functions
# Based on the rule computation scripts by Nathaniel Johnston and Peter Naszvadi
# Functions:
# - parseTransitions:
# Interpret the totalistic and isotropic rule elements as a list of isotropic transitions
# - rulestringopt:
# Cleanup a rulestring. Only used when rulestring will be displayed
# - getRuleRangeElems:
# Determines the minimum and maximum isotropic rules in which a pattern's
# evolution remains unchanged for a given number of generations.
# Returns the required and allowed isotropic rule transitions in four lists.
# Optionally compute only the minimum or the maximum rule.
# --------------------------------------------------------------------
Hensel = [
['0'],
['1c', '1e'],
['2a', '2c', '2e', '2i', '2k', '2n'],
['3a', '3c', '3e', '3i', '3j', '3k', '3n', '3q', '3r', '3y'],
['4a', '4c', '4e', '4i', '4j', '4k', '4n', '4q', '4r', '4t', '4w', '4y', '4z'],
['5a', '5c', '5e', '5i', '5j', '5k', '5n', '5q', '5r', '5y'],
['6a', '6c', '6e', '6i', '6k', '6n'],
['7c', '7e'],
['8']
]
def parseTransitions(ruleTrans):
ruleElem = []
if not ruleTrans:
return ruleElem
context = ruleTrans[0]
bNonTot = False
bNegate = False
for ch in ruleTrans[1:] + '9':
if ch in '0123456789':
if not bNonTot:
ruleElem += Hensel[int(context)]
context = ch
bNonTot = False
bNegate = False
elif ch == '-':
bNegate = True
ruleElem += Hensel[int(context)]
else:
bNonTot = True
if bNegate:
ruleElem.remove(context + ch)
else:
ruleElem.append(context + ch)
return ruleElem
def rulestringopt(a):
result = ''
context = ''
lastnum = ''
lastcontext = ''
for i in a:
if i in 'BS':
context = i
result += i
elif i in '012345678':
if (i == lastnum) and (lastcontext == context):
pass
else:
lastcontext = context
lastnum = i
result += i
else:
result += i
result = result.replace('4aceijknqrtwyz', '4')
result = result.replace('3aceijknqry', '3')
result = result.replace('5aceijknqry', '5')
result = result.replace('2aceikn', '2')
result = result.replace('6aceikn', '6')
result = result.replace('1ce', '1')
result = result.replace('7ce', '7')
return result
def getRuleRangeElems(period, ruleRange = 'minmax'):
if g.empty():
return
if period < 1:
return
rule = g.getrule().split(':')[0]
if not (rule[0] == 'B' and '/S' in rule):
g.exit('Please set Golly to an isotropic 2-state rule.')
# Parse rule string to list of transitions for Birth and Survival
oldrule = rule
Bstr, Sstr = rule.split('/')
Bstr = Bstr.lstrip('B')
Sstr = Sstr.lstrip('S')
b_need = parseTransitions(Bstr)
b_OK = list(b_need)
s_need = parseTransitions(Sstr)
s_OK = list(s_need)
patt = g.getcells(g.getrect())
# Record behavior of pattern in current rule
clist = []
poplist = []
for i in range(0,period):
g.run(1)
clist.append(g.getcells(g.getrect()))
poplist.append(g.getpop())
finalpop = g.getpop()
if 'min' in ruleRange:
# Test all rule transitions to determine if they are required
for t in b_OK:
b_need.remove(t)
g.setrule('B' + ''.join(b_need) + '/S' + Sstr)
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
b_need.append(t)
break
except:
b_need.append(t)
break
b_need.sort()
for t in s_OK:
s_need.remove(t)
g.setrule('B' + Bstr + '/S' + ''.join(s_need))
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
s_need.append(t)
break
except:
s_need.append(t)
break
s_need.sort()
if 'max' in ruleRange:
# Test unused rule transitions to determine if they are allowed
allRuleElem = [t for l in Hensel for t in l]
for t in allRuleElem:
if t in b_OK:
continue
b_OK.append(t)
g.setrule('B' + ''.join(b_OK) + '/S' + Sstr)
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
b_OK.remove(t)
break
except:
b_OK.remove(t)
break
b_OK.sort()
for t in allRuleElem:
if t in s_OK:
continue
s_OK.append(t)
g.setrule('B' + Bstr + '/S' + ''.join(s_OK))
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
s_OK.remove(t)
break
except:
s_OK.remove(t)
break
s_OK.sort()
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
g.setrule(oldrule)
return b_need, s_need, b_OK, s_OK
def setminisorule(period):
if g.empty():
return
if period < 1:
return
b_need, s_need, b_OK, s_OK = getRuleRangeElems(period, ruleRange = 'min')
minrulestr = 'B' + ''.join(sorted(b_need)) + '/S' + ''.join(sorted(s_need))
g.setrule(minrulestr)
return minrulestr
# --------------------------------------------------------------------
# Generator for random order rule iterator over a given rulespace
# Uses a linear congruential generator to iterate over all the rules
# in the given rulespace in a pseudo random order
# The rule space is specified by four lists:
# B_need - the required Birth transitions
# S_need - the required Survival transitions
# B_OK - the optional Birth transitions
# S_OK - the optional Survival transitions
# Provide a value to seed to specify the starting point of the generator
# seed < 2^(len(B_OK) + len(S_OK))
# --------------------------------------------------------------------
def iterRuleStr(B_OK, S_OK, B_need=[], S_need=[], seed=1):
# Pseudo-random rule index generator using an LCG
def randRuleIdx(nB_OK, nS_OK, seed=1):
# LCG state initialisation
m = 2**(nB_OK + nS_OK)
c = 7
a = 5
# Reduce collisions for small seed values
for _ in range(3):
seed = (a*seed+c) % m
# Masks for birth and survival transitions
maskS = 2**nS_OK - 1
maskB = (2**nB_OK - 1) << nS_OK
for ii in xrange(m):
seed = (a*seed+c) % m
randS = seed & maskS
randB = (seed & maskB) >> nS_OK
yield (randB, randS)
# Transition String retrieval
def getTransStr(tList, idx):
trans = ''
for t in tList:
if (idx & 1):
trans += t
idx = idx >> 1
return trans
Bstr = 'B' + ''.join(B_need)
Sstr = '/S' + ''.join(S_need)
for (Bidx, Sidx) in randRuleIdx(len(B_OK), len(S_OK), seed):
rulestr = Bstr + getTransStr(B_OK, Bidx) + Sstr + getTransStr(S_OK, Sidx)
yield rulestr
# --------------------------------------------------------------------
| rle_strB = "" | conditional_block |
sss.py | # sss.py
# Commonly used routines to analyse small patterns in isotropic 2-state rules
# Includes giveRLE.py, originally by Nathaniel Johnston
# Includes code from get_all_iso_rules.py, originally by Nathaniel Johnston and Peter Naszvadi
# by Arie Paap, Oct 2017
import itertools
import math
import golly as g
try:
# Avoid xrange argument overflowing type C long on Python2
if xrange(1):
xrange = lambda stop: iter(itertools.count().next, stop)
except NameError:
xrange = range
# Interpret a pattern in sss format
# Return a tuple with corresponding fields
# Format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
def parseshipstr(shipstr):
if (not shipstr) or (not shipstr[0] in '123456789'):
return
ship = shipstr.split(', ')
if not len(ship) == 6:
return
ship[0] = int(ship[0])
ship[1] = ship[1].strip()
ship[2] = int(ship[2])
ship[3] = int(ship[3])
ship[4] = int(ship[4])
ship[5] = ship[5].strip()
return tuple(ship)
# Determine the minimum population, displacement and period of a spaceship
# Input ship is given by an rle string and a separate rule string. If either
# string is empty then use the current pattern / rule (respectively).
# Clears the current layer and leaves the ship in the layer, in a minimum
# population phase which has minimum bounding box area.
# XXX True displacement returned - consider returning 5S canonical displacement.
# XXX Might be better to shift choice of phase to canon5Sship() which also sets
# the minimum isotropic rule and adjusts orientation to 5S project standard.
# XXX Only works in rules with 2 states.
# --------------------------------------------------------------------
def testShip(rlepatt, rule, maxgen = 2000):
# Clear the layer and place the ship
r = g.getrect()
if rlepatt:
patt = g.parse(rlepatt)
# If rlepatt is in a multistate representation then patt will be
# a multistate cell list. testShip() only works for ships in two
# state rules, so convert to two state cell list.
if (len(patt)%2):
# This assumes all cells have non-zero state - which is reasonable
# for the results of g.parse()
patt = [ patt[i] for j, i in enumerate(patt[:-1]) if (j+1)%3 ]
else:
# Use the current pattern
if not r:
return (0, tuple())
patt = g.getcells(r)
patt = g.transform(patt, -r[0], -r[1])
# g.note(str((rlepatt, rule)))
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
# g.note(str(len(patt)) + ", " + str(patt))
# rlepatt might be different to the rle representation determined by
# giveRLE(), so ensure we have the correct representation
testrle = giveRLE(patt)
if rule:
g.setrule(rule)
speed = ()
startpop = int(g.getpop())
bbox = g.getrect()
minpop = startpop
minbboxarea = bbox[2]*bbox[3]
mingen = 0
# Keep track of the total bbox
maxx = bbox[2]
maxy = bbox[3]
maxpop = startpop
# Ignore ship if rule is not a 2-state rule
if not g.numstates()==2:
return (minpop, speed)
for ii in xrange(maxgen):
g.run(1)
r = g.getrect()
if not r:
# Pattern has died out and is therefore not a ship
mingen = 0
break
pop = int(g.getpop())
bboxarea = r[2]*r[3]
if pop < minpop:
# Find phase with minimimum population
minpop = pop
minbboxarea = r[2]*r[3]
mingen = ii+1
elif pop == minpop:
# Amongst phases with min pop, find one with minimum bbox area
# bboxarea = r[2]*r[3]
if bboxarea < minbboxarea:
minbboxarea = bboxarea
mingen = ii+1
# Track the bounding box of the pattern's evolution
maxx = max(maxx, r[2])
maxy = max(maxy, r[3])
maxpop = max(maxpop, pop)
if (pop == startpop and r[2:4] == bbox[2:4]):
if (giveRLE(g.getcells(r)) == testrle):
# Starting ship has reappeared
speed = (r[0]-bbox[0], r[1]-bbox[1], ii+1) # displacement and period
break
# Check for rotated pattern
elif (pop == startpop and r[2:4] == bbox[3:1:-1]):
# For 2-cell oscillators this is sufficient
if minpop == 2:
speed = (0, 0, 2*(ii+1))
mingen = ii+1
break
g.run(mingen) # Evolve ship to generation with minimum population
# return (minpop, speed)
# return (minpop, speed, maxpop)
return (minpop, speed, maxx*maxy)
# --------------------------------------------------------------------
# Return the minimum and maximum of the absolute value of a list of numbers
def minmaxofabs(v):
v = [abs(x) for x in v]
return min(v), max(v)
# Define a sign function
sign = lambda x: int(math.copysign(1, x))
# Find the canonical pattern for a sss format ship
# This is determined by orienting the ship so that it travels E, SE, or ESE,
# setting the rule to the minimal isotropic rule which supports the ship, and
# choosing a minimal bounding box phase from all phases with minimal population
# Input ship is in sss format: (minpop, 'rulestr', dx, dy, period, 'shiprle')
# XXX Two cases where the resulting pattern is not guaranteed to be canonical:
# - asymmetrical ships travelling orthogonally or diagonally (either one of
# the two orientations in the canonical direction may be returned)
# - multiple phases having the minimal population and bounding box area
def canon5Sship(ship, maxgen=2000):
minpop, rulestr, dx, dy, period, shiprle = ship
shipPatt = g.parse(shiprle)
# Transform ship to canonical direction
if abs(dx) >= abs(dy):
a, b, c, d = sign(dx), 0, 0, sign(dy)
else:
a, b, c, d = 0, sign(dy), sign(dx), 0
dy, dx = minmaxofabs((dx, dy))
shipPatt = g.transform(shipPatt, 0, 0, a, b, c, d)
# Clear the layer and place the ship
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(shipPatt)
shiprle = giveRLE(g.getcells(g.getrect()))
g.setrule(rulestr)
# Determine the minimal isotropic rule
setminisorule(period)
return minpop, g.getrule(), dx, dy, period, shiprle
# Python function to convert a cell list to RLE
# Author: Nathaniel Johnston (nathaniel@nathanieljohnston.com), June 2009.
# DMG: Refactored slightly so that the function input is a simple cell list.
# No error checking added.
# TBD: check for multistate rule, show appropriate warning.
# AJP: Replace g.evolve(clist,0) with Python sort (faster for small patterns)
# --------------------------------------------------------------------
def | (l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def giveRLE(clist):
# clist_chunks = list (chunks (g.evolve(clist,0), 2))
clist_chunks = list(chunks(clist, 2))
clist_chunks.sort(key=lambda l:(l[1], l[0]))
mcc = min(clist_chunks)
rl_list = [[x[0]-mcc[0],x[1]-mcc[1]] for x in clist_chunks]
rle_res = ""
rle_len = 1
rl_y = rl_list[0][1] - 1
rl_x = 0
for rl_i in rl_list:
if rl_i[1] == rl_y:
if rl_i[0] == rl_x + 1:
rle_len += 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[0] - rl_x - 1 == 1: rle_strB = ""
else: rle_strB = str (rl_i[0] - rl_x - 1)
rle_res = rle_res + rle_strA + "o" + rle_strB + "b"
rle_len = 1
else:
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
if rl_i[1] - rl_y == 1: rle_strB = ""
else: rle_strB = str (rl_i[1] - rl_y)
if rl_i[0] == 1: rle_strC = "b"
elif rl_i[0] == 0: rle_strC = ""
else: rle_strC = str (rl_i[0]) + "b"
rle_res = rle_res + rle_strA + "o" + rle_strB + "$" + rle_strC
rle_len = 1
rl_x = rl_i[0]
rl_y = rl_i[1]
if rle_len == 1: rle_strA = ""
else: rle_strA = str (rle_len)
rle_res = rle_res[2:] + rle_strA + "o"
return rle_res+"!"
# --------------------------------------------------------------------
# Isotropic rule range functions
# Based on the rule computation scripts by Nathaniel Johnston and Peter Naszvadi
# Functions:
# - parseTransitions:
# Interpret the totalistic and isotropic rule elements as a list of isotropic transitions
# - rulestringopt:
# Cleanup a rulestring. Only used when rulestring will be displayed
# - getRuleRangeElems:
# Determines the minimum and maximum isotropic rules in which a pattern's
# evolution remains unchanged for a given number of generations.
# Returns the required and allowed isotropic rule transitions in four lists.
# Optionally compute only the minimum or the maximum rule.
# --------------------------------------------------------------------
Hensel = [
['0'],
['1c', '1e'],
['2a', '2c', '2e', '2i', '2k', '2n'],
['3a', '3c', '3e', '3i', '3j', '3k', '3n', '3q', '3r', '3y'],
['4a', '4c', '4e', '4i', '4j', '4k', '4n', '4q', '4r', '4t', '4w', '4y', '4z'],
['5a', '5c', '5e', '5i', '5j', '5k', '5n', '5q', '5r', '5y'],
['6a', '6c', '6e', '6i', '6k', '6n'],
['7c', '7e'],
['8']
]
def parseTransitions(ruleTrans):
ruleElem = []
if not ruleTrans:
return ruleElem
context = ruleTrans[0]
bNonTot = False
bNegate = False
for ch in ruleTrans[1:] + '9':
if ch in '0123456789':
if not bNonTot:
ruleElem += Hensel[int(context)]
context = ch
bNonTot = False
bNegate = False
elif ch == '-':
bNegate = True
ruleElem += Hensel[int(context)]
else:
bNonTot = True
if bNegate:
ruleElem.remove(context + ch)
else:
ruleElem.append(context + ch)
return ruleElem
def rulestringopt(a):
result = ''
context = ''
lastnum = ''
lastcontext = ''
for i in a:
if i in 'BS':
context = i
result += i
elif i in '012345678':
if (i == lastnum) and (lastcontext == context):
pass
else:
lastcontext = context
lastnum = i
result += i
else:
result += i
result = result.replace('4aceijknqrtwyz', '4')
result = result.replace('3aceijknqry', '3')
result = result.replace('5aceijknqry', '5')
result = result.replace('2aceikn', '2')
result = result.replace('6aceikn', '6')
result = result.replace('1ce', '1')
result = result.replace('7ce', '7')
return result
def getRuleRangeElems(period, ruleRange = 'minmax'):
if g.empty():
return
if period < 1:
return
rule = g.getrule().split(':')[0]
if not (rule[0] == 'B' and '/S' in rule):
g.exit('Please set Golly to an isotropic 2-state rule.')
# Parse rule string to list of transitions for Birth and Survival
oldrule = rule
Bstr, Sstr = rule.split('/')
Bstr = Bstr.lstrip('B')
Sstr = Sstr.lstrip('S')
b_need = parseTransitions(Bstr)
b_OK = list(b_need)
s_need = parseTransitions(Sstr)
s_OK = list(s_need)
patt = g.getcells(g.getrect())
# Record behavior of pattern in current rule
clist = []
poplist = []
for i in range(0,period):
g.run(1)
clist.append(g.getcells(g.getrect()))
poplist.append(g.getpop())
finalpop = g.getpop()
if 'min' in ruleRange:
# Test all rule transitions to determine if they are required
for t in b_OK:
b_need.remove(t)
g.setrule('B' + ''.join(b_need) + '/S' + Sstr)
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
b_need.append(t)
break
except:
b_need.append(t)
break
b_need.sort()
for t in s_OK:
s_need.remove(t)
g.setrule('B' + Bstr + '/S' + ''.join(s_need))
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
s_need.append(t)
break
except:
s_need.append(t)
break
s_need.sort()
if 'max' in ruleRange:
# Test unused rule transitions to determine if they are allowed
allRuleElem = [t for l in Hensel for t in l]
for t in allRuleElem:
if t in b_OK:
continue
b_OK.append(t)
g.setrule('B' + ''.join(b_OK) + '/S' + Sstr)
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
b_OK.remove(t)
break
except:
b_OK.remove(t)
break
b_OK.sort()
for t in allRuleElem:
if t in s_OK:
continue
s_OK.append(t)
g.setrule('B' + Bstr + '/S' + ''.join(s_OK))
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
for j in range(0, period):
g.run(1)
try:
if not(clist[j] == g.getcells(g.getrect())):
s_OK.remove(t)
break
except:
s_OK.remove(t)
break
s_OK.sort()
r = g.getrect()
if r:
g.select(r)
g.clear(0)
g.putcells(patt)
g.setrule(oldrule)
return b_need, s_need, b_OK, s_OK
def setminisorule(period):
if g.empty():
return
if period < 1:
return
b_need, s_need, b_OK, s_OK = getRuleRangeElems(period, ruleRange = 'min')
minrulestr = 'B' + ''.join(sorted(b_need)) + '/S' + ''.join(sorted(s_need))
g.setrule(minrulestr)
return minrulestr
# --------------------------------------------------------------------
# Generator for random order rule iterator over a given rulespace
# Uses a linear congruential generator to iterate over all the rules
# in the given rulespace in a pseudo random order
# The rule space is specified by four lists:
# B_need - the required Birth transitions
# S_need - the required Survival transitions
# B_OK - the optional Birth transitions
# S_OK - the optional Survival transitions
# Provide a value to seed to specify the starting point of the generator
# seed < 2^(len(B_OK) + len(S_OK))
# --------------------------------------------------------------------
def iterRuleStr(B_OK, S_OK, B_need=[], S_need=[], seed=1):
# Pseudo-random rule index generator using an LCG
def randRuleIdx(nB_OK, nS_OK, seed=1):
# LCG state initialisation
m = 2**(nB_OK + nS_OK)
c = 7
a = 5
# Reduce collisions for small seed values
for _ in range(3):
seed = (a*seed+c) % m
# Masks for birth and survival transitions
maskS = 2**nS_OK - 1
maskB = (2**nB_OK - 1) << nS_OK
for ii in xrange(m):
seed = (a*seed+c) % m
randS = seed & maskS
randB = (seed & maskB) >> nS_OK
yield (randB, randS)
# Transition String retrieval
def getTransStr(tList, idx):
trans = ''
for t in tList:
if (idx & 1):
trans += t
idx = idx >> 1
return trans
Bstr = 'B' + ''.join(B_need)
Sstr = '/S' + ''.join(S_need)
for (Bidx, Sidx) in randRuleIdx(len(B_OK), len(S_OK), seed):
rulestr = Bstr + getTransStr(B_OK, Bidx) + Sstr + getTransStr(S_OK, Sidx)
yield rulestr
# --------------------------------------------------------------------
| chunks | identifier_name |
limit.rs | //! Data structures to help perform rate limiting.
use std::collections::{HashMap, VecDeque};
use std::cmp;
use std::fmt::Debug;
use std::io::{self, Read, Write, ErrorKind};
use std::result::Result;
use bytes::{BytesMut, Buf, BufMut};
use crate::util::RorW;
use self::Status::*;
/// Generic buffer for rate-limiting, both reading and writing.
#[derive(Debug)]
pub struct RLBuf {
/// Buffer to help determine demand, for rate-limiting.
buf: BytesMut,
/// Index into `buf`, of the first data not allowed to be used. Everything
/// before it will be used upon request.
///
/// "Used" means `read` by a higher layer, or `write` by a lower layer.
allowance: usize,
/// Amount of data read out since last call to `reset_usage`.
last_used: usize,
}
impl RLBuf {
/** Create a new `RLBuf` with the given lower bound on the initial capacity.
The actual capacity can be got later with `get_demand_cap`.
*/
pub fn new_lb(init: usize) -> RLBuf {
RLBuf {
buf: BytesMut::with_capacity(init),
allowance: 0,
last_used: 0,
}
}
/** Get the current demand.
For higher-level rate-limiting logic, to determine how to rate-limit.
*/
pub fn get_demand(&self) -> usize {
self.buf.len()
}
/** Get the current buffer capacity, i.e. allocated memory.
For higher-level rate-limiting logic, to monitor resource usage, to help it
analyse how efficient it is.
*/
pub fn get_demand_cap(&self) -> usize {
self.buf.capacity()
}
pub fn get_demand_remaining(&self) -> usize {
self.buf.capacity() - self.buf.len()
}
/** Add the allowance, which must not be greater than the demand.
For higher-level rate-limiting logic, as it performs the rate-limiting.
*/
pub fn add_allowance(&mut self, allowance: usize) {
if self.allowance + allowance > self.get_demand() {
panic!("allowance > demand");
}
self.allowance += allowance
}
/** Return the latest usage figures & reset them back to zero.
The first number is the number of allowed bytes that were unused.
The second number is the number of allowed bytes that were used.
For higher-level rate-limiting logic, before rate-limiting is performed, to
detect consumers that consumed even more slowly than the rate limit in the
previous cycle. In response to this, the higher-level logic should give less
allowance for this consumer, to avoid waste.
*/
pub fn reset_usage(&mut self) -> (usize, usize) {
let wasted = self.allowance;
let used = self.last_used;
self.allowance = 0;
self.last_used = 0;
(wasted, used)
}
fn record_demand(&mut self, buf: &[u8]) {
self.buf.extend_from_slice(buf);
}
fn add_demand_cap(&mut self, more: usize) {
self.buf.reserve(more + self.get_demand_remaining());
}
fn take_allowance(&mut self, taken: usize) {
if taken > self.allowance {
panic!("taken > allowance");
}
self.allowance -= taken;
self.last_used += taken;
}
fn consume_read(&mut self, buf: &mut [u8]) -> usize {
let to_drain = cmp::min(buf.len(), self.allowance);
self.buf.copy_to_slice(&mut buf[..to_drain]);
self.buf.reserve(to_drain);
self.take_allowance(to_drain);
to_drain
}
fn consume_write<F, E>(&mut self, sz: usize, mut write: F) -> (usize, Option<E>)
where F: FnMut (&[u8]) -> Result<usize, E> {
let mut used = 0;
let mut err = None;
let to_drain = cmp::min(self.buf.len(), sz);
match write(&self.buf[..to_drain]) {
Ok(n) => used += n,
Err(e) => err = Some(e),
}
self.buf.advance(used);
self.add_demand_cap(used);
self.take_allowance(used);
(used, err)
}
}
fn unwrap_err_or<T, E>(r: Result<T, E>, de: E) -> E {
match r {
Ok(_) => de,
Err(e) => e,
}
}
#[derive(Debug, PartialEq, Eq)]
enum Status {
SOpen,
SOk, // eof
SErr
}
/** Rate-limited asynchronous analogue of `std::io::BufReader` + `std::io::BufWriter`.
You **must** call `flush()` before dropping this (which closes the stream).
This is even more important than doing so on `BufWriter` - if not, you may lose
data. See https://internals.rust-lang.org/t/asynchronous-destructors/11127/49
for an in-depth explanation.
*/
#[derive(Debug)]
pub struct RateLimited<T> where T: ?Sized {
rstatus: Status,
pub(crate) rbuf: RLBuf,
wstatus: Status,
pub(crate) wbuf: RLBuf,
pub(crate) inner: T,
}
impl<T> RateLimited<T> {
/** Create a new `RateLimited` with the given initial capacity.
The inner stream must already be in non-blocking mode.
*/
pub fn new_lb(inner: T, init: usize) -> RateLimited<T> {
RateLimited {
inner: inner,
rstatus: SOpen,
rbuf: RLBuf::new_lb(init),
wstatus: SOpen,
wbuf: RLBuf::new_lb(init),
}
}
}
impl<T> RateLimited<T> where T: RorW + ?Sized {
/** Do a pre-read.
That is, do a non-blocking read from the underlying handle, filling up the
remaining part of `rbuf`.
This is to be used by higher-level code, before it performs the rate-limiting.
*/
pub fn pre_read(&mut self) {
match self.rstatus {
SOpen => {
let remain = self.rbuf.get_demand_remaining();
if remain == 0 {
return;
}
// TODO: replace with https://github.com/rust-lang/rfcs/pull/2930
let mut buf: &mut [u8] = unsafe { std::mem::transmute(self.rbuf.buf.bytes_mut()) };
match self.inner.read(&mut buf) { // TODO: assert non-blocking
Ok(0) => {
self.rstatus = SOk;
},
Ok(n) => {
unsafe {
self.rbuf.buf.advance_mut(n);
}
if n >= remain {
// TODO: automatically grow the buffer capacity
log::debug!("rbuf pre_read filled buffer");
}
},
Err(e) => match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
// println!("pre_read: {:?}", e);
self.rstatus = SErr;
}
},
}
},
_ => (), // already finished
}
}
pub fn is_readable(&self) -> bool {
self.rstatus != SOpen || self.rbuf.allowance > 0
}
/** Do a post-write.
That is, do a non-blocking write to the underlying handle, up to the current
allowance of `wbuf`.
This is to be used by higher-level code, after it performs the rate-limiting.
*/
pub fn post_write(&mut self) {
self.post_write_exact(self.wbuf.allowance);
}
pub fn is_writable(&self) -> bool {
self.wstatus != SOpen || self.wbuf.get_demand_remaining() > 0
}
// extra param is exposed for testing only
fn post_write_exact(&mut self, sz: usize) -> Option<io::Error> {
match self.wbuf.get_demand() {
0 => None,
_ => match self.wbuf.allowance {
0 => None,
_ => {
let w = &mut self.inner;
let (_, err) = self.wbuf.consume_write(sz, |b| w.write(b));
if let Some(e) = err.as_ref() {
match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
self.wstatus = SErr;
},
}
}
err
}
}
}
}
}
impl<T> Read for RateLimited<T> where T: Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rbuf.get_demand() {
0 => match self.rstatus {
SOpen => Err(io::Error::new(ErrorKind::WouldBlock, "")),
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.read(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
},
_ => match self.rbuf.allowance {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => Ok(self.rbuf.consume_read(buf)),
}
}
}
}
impl<T> Write for RateLimited<T> where T: Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> |
fn flush(&mut self) -> io::Result<()> {
match self.wstatus {
SErr =>
// if there was an error, wbuf might not have been consumed, so output error even if wbuf is non-empty
Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
_ => match self.wbuf.get_demand() {
0 => {
//println!("flush OK");
Ok(())
},
_ => {
//println!("flush waiting :( {} {}", self.wbuf.get_demand(), self.wbuf.allowance);
Err(io::Error::new(ErrorKind::WouldBlock, ""))
}, // something else is responsible for calling post_write
}
}
}
}
#[derive(Debug)]
pub struct UsageStats {
samples: VecDeque<(usize, usize)>,
max_samples: usize,
current_usage: (usize, usize), // (waste, used)
}
impl UsageStats {
pub fn new() -> UsageStats {
UsageStats {
samples: VecDeque::new(),
max_samples: 4096, // TODO: make configurable
current_usage: (0, 0),
}
}
pub fn add_current_usage(&mut self, usage: (usize, usize)) {
self.current_usage.0 += usage.0;
self.current_usage.1 += usage.1;
}
pub fn finalise_current_usage(&mut self) -> (usize, usize) {
while self.samples.len() >= self.max_samples {
self.samples.pop_front();
}
let usage = self.current_usage;
self.samples.push_back(usage);
self.current_usage = (0, 0);
usage
}
pub fn estimate_next_usage(&mut self) -> usize {
// TODO: something smarter
// TODO: do something with the waste, e.g. to give more allowance
self.samples.back().unwrap().1
}
}
pub fn derive_allowance<K>(demand: HashMap<K, usize>) -> HashMap<K, usize> {
// TODO: actually perform rate-limiting. the current code ought not
// to be (but is) much slower than the async-io version, however
// this only noticeable on localhost-localhost transfers.
demand
}
#[cfg(test)]
mod tests {
use std::fs::*;
use std::fmt::Debug;
use std::io;
use std::io::*;
use std::assert;
use crate::sys::*;
use crate::util::*;
use super::*;
fn assert_would_block<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => assert_eq!(e.kind(), ErrorKind::WouldBlock),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_error<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => match e.kind() {
ErrorKind::WouldBlock => assert!(false),
ErrorKind::Interrupted => assert!(false),
_ => (),
},
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_num_bytes(res: io::Result<usize>, s: usize) {
match res {
Ok(n) => assert_eq!(n, s),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
// TODO: /dev/null etc is not a RawSocket in windows
#[test]
fn read_eof_ok() -> io::Result<()> {
let file = File::open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(RO(file), 1);
let mut buf = [0].repeat(1);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_num_bytes(bf.read(&mut buf), 0); // eof
Ok(())
}
#[test]
fn read_zero_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let unsafe_f = unsafe { File::from_raw_source(file.as_raw_source()) };
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(RO(file), sd);
assert_eq!(sd, bf.rbuf.get_demand_cap());
assert_eq!(0, bf.rbuf.get_demand());
let mut buf = [0].repeat(sx);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_eq!(sd, bf.rbuf.get_demand());
assert_would_block(bf.read(&mut buf));
bf.rbuf.add_allowance(sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx, bf.rbuf.get_demand());
bf.rbuf.add_allowance(sx + sy);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(bf.rbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert_would_block(bf.read(&mut buf));
assert_eq!(bf.rbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SOpen, bf.rstatus);
drop(unsafe_f); // close f, to force an error on the underlying stream
bf.pre_read();
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SErr, bf.rstatus);
bf.rbuf.add_allowance(sd - sx - sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert!(sd - sx - sx - sx <= sx); // otherwise next step fails
assert_num_bytes(bf.read(&mut buf), sd - sx - sx - sx);
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
Ok(())
}
#[test]
fn write_eof_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(WO(file), 1);
let buf = [0].repeat(1);
assert_num_bytes(bf.write(&buf), 1);
bf.post_write();
assert_eq!(bf.wstatus, SOpen);
bf.wbuf.add_allowance(1);
bf.post_write();
assert_eq!(bf.wstatus, SErr);
assert_error(bf.flush());
assert_error(bf.flush());
assert_error(bf.flush());
Ok(())
}
#[test]
fn write_null_ok() -> io::Result<()> {
let file = OpenOptions::new().write(true).open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(WO(file), sd);
assert_eq!(sd, bf.wbuf.get_demand_cap());
assert_eq!(sd, bf.wbuf.get_demand_remaining());
assert_eq!(0, bf.wbuf.get_demand());
let buf = [0].repeat(sd + sx);
bf.flush()?;
assert_num_bytes(bf.write(&buf), sd);
assert_eq!(sd, bf.wbuf.get_demand());
assert_would_block(bf.write(&buf[sd..]));
bf.wbuf.add_allowance(sx);
bf.post_write();
assert_eq!(sd - sx, bf.wbuf.get_demand());
bf.wbuf.add_allowance(sx + sy);
bf.post_write_exact(sx);
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(bf.wbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert!(bf.post_write_exact(0).is_none());
assert_eq!(bf.wbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
assert_num_bytes(bf.write(&buf), sx + sx);
assert_eq!(sd, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
bf.wbuf.add_allowance(sd);
assert_would_block(bf.flush());
assert_would_block(bf.flush());
assert_would_block(bf.flush());
bf.post_write();
assert_eq!(0, bf.wbuf.get_demand());
bf.flush()
}
}
| {
match self.wstatus {
SOpen => {
// TODO: figure out when it's appropriate to automatically grow the buffer capacity
let remain = self.wbuf.get_demand_remaining();
match remain {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => {
let n = cmp::min(buf.len(), remain);
self.wbuf.record_demand(&buf[..n]);
Ok(n)
}
}
},
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
}
} | identifier_body |
limit.rs | //! Data structures to help perform rate limiting.
use std::collections::{HashMap, VecDeque};
use std::cmp;
use std::fmt::Debug;
use std::io::{self, Read, Write, ErrorKind};
use std::result::Result;
use bytes::{BytesMut, Buf, BufMut};
use crate::util::RorW;
use self::Status::*;
/// Generic buffer for rate-limiting, both reading and writing.
#[derive(Debug)]
pub struct RLBuf {
/// Buffer to help determine demand, for rate-limiting.
buf: BytesMut,
/// Index into `buf`, of the first data not allowed to be used. Everything
/// before it will be used upon request.
///
/// "Used" means `read` by a higher layer, or `write` by a lower layer.
allowance: usize,
/// Amount of data read out since last call to `reset_usage`.
last_used: usize,
}
impl RLBuf {
/** Create a new `RLBuf` with the given lower bound on the initial capacity.
The actual capacity can be got later with `get_demand_cap`.
*/
pub fn new_lb(init: usize) -> RLBuf {
RLBuf {
buf: BytesMut::with_capacity(init),
allowance: 0,
last_used: 0,
}
}
/** Get the current demand.
For higher-level rate-limiting logic, to determine how to rate-limit.
*/
pub fn get_demand(&self) -> usize {
self.buf.len()
}
/** Get the current buffer capacity, i.e. allocated memory.
For higher-level rate-limiting logic, to monitor resource usage, to help it
analyse how efficient it is.
*/
pub fn get_demand_cap(&self) -> usize {
self.buf.capacity()
}
pub fn get_demand_remaining(&self) -> usize {
self.buf.capacity() - self.buf.len()
}
/** Add the allowance, which must not be greater than the demand.
For higher-level rate-limiting logic, as it performs the rate-limiting.
*/
pub fn add_allowance(&mut self, allowance: usize) {
if self.allowance + allowance > self.get_demand() {
panic!("allowance > demand");
}
self.allowance += allowance
}
/** Return the latest usage figures & reset them back to zero.
The first number is the number of allowed bytes that were unused.
The second number is the number of allowed bytes that were used.
For higher-level rate-limiting logic, before rate-limiting is performed, to
detect consumers that consumed even more slowly than the rate limit in the
previous cycle. In response to this, the higher-level logic should give less
allowance for this consumer, to avoid waste.
*/
pub fn reset_usage(&mut self) -> (usize, usize) {
let wasted = self.allowance;
let used = self.last_used;
self.allowance = 0;
self.last_used = 0;
(wasted, used)
}
fn record_demand(&mut self, buf: &[u8]) {
self.buf.extend_from_slice(buf);
}
fn add_demand_cap(&mut self, more: usize) {
self.buf.reserve(more + self.get_demand_remaining());
}
fn take_allowance(&mut self, taken: usize) {
if taken > self.allowance {
panic!("taken > allowance");
}
self.allowance -= taken;
self.last_used += taken;
}
fn consume_read(&mut self, buf: &mut [u8]) -> usize {
let to_drain = cmp::min(buf.len(), self.allowance);
self.buf.copy_to_slice(&mut buf[..to_drain]);
self.buf.reserve(to_drain);
self.take_allowance(to_drain);
to_drain
}
fn consume_write<F, E>(&mut self, sz: usize, mut write: F) -> (usize, Option<E>)
where F: FnMut (&[u8]) -> Result<usize, E> {
let mut used = 0;
let mut err = None;
let to_drain = cmp::min(self.buf.len(), sz);
match write(&self.buf[..to_drain]) {
Ok(n) => used += n,
Err(e) => err = Some(e),
}
self.buf.advance(used);
self.add_demand_cap(used);
self.take_allowance(used);
(used, err)
}
}
fn unwrap_err_or<T, E>(r: Result<T, E>, de: E) -> E {
match r {
Ok(_) => de,
Err(e) => e,
}
}
#[derive(Debug, PartialEq, Eq)]
enum Status {
SOpen,
SOk, // eof
SErr
}
/** Rate-limited asynchronous analogue of `std::io::BufReader` + `std::io::BufWriter`.
You **must** call `flush()` before dropping this (which closes the stream).
This is even more important than doing so on `BufWriter` - if not, you may lose
data. See https://internals.rust-lang.org/t/asynchronous-destructors/11127/49
for an in-depth explanation.
*/
#[derive(Debug)]
pub struct RateLimited<T> where T: ?Sized {
rstatus: Status,
pub(crate) rbuf: RLBuf,
wstatus: Status,
pub(crate) wbuf: RLBuf, | pub(crate) inner: T,
}
impl<T> RateLimited<T> {
/** Create a new `RateLimited` with the given initial capacity.
The inner stream must already be in non-blocking mode.
*/
pub fn new_lb(inner: T, init: usize) -> RateLimited<T> {
RateLimited {
inner: inner,
rstatus: SOpen,
rbuf: RLBuf::new_lb(init),
wstatus: SOpen,
wbuf: RLBuf::new_lb(init),
}
}
}
impl<T> RateLimited<T> where T: RorW + ?Sized {
/** Do a pre-read.
That is, do a non-blocking read from the underlying handle, filling up the
remaining part of `rbuf`.
This is to be used by higher-level code, before it performs the rate-limiting.
*/
pub fn pre_read(&mut self) {
match self.rstatus {
SOpen => {
let remain = self.rbuf.get_demand_remaining();
if remain == 0 {
return;
}
// TODO: replace with https://github.com/rust-lang/rfcs/pull/2930
let mut buf: &mut [u8] = unsafe { std::mem::transmute(self.rbuf.buf.bytes_mut()) };
match self.inner.read(&mut buf) { // TODO: assert non-blocking
Ok(0) => {
self.rstatus = SOk;
},
Ok(n) => {
unsafe {
self.rbuf.buf.advance_mut(n);
}
if n >= remain {
// TODO: automatically grow the buffer capacity
log::debug!("rbuf pre_read filled buffer");
}
},
Err(e) => match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
// println!("pre_read: {:?}", e);
self.rstatus = SErr;
}
},
}
},
_ => (), // already finished
}
}
pub fn is_readable(&self) -> bool {
self.rstatus != SOpen || self.rbuf.allowance > 0
}
/** Do a post-write.
That is, do a non-blocking write to the underlying handle, up to the current
allowance of `wbuf`.
This is to be used by higher-level code, after it performs the rate-limiting.
*/
pub fn post_write(&mut self) {
self.post_write_exact(self.wbuf.allowance);
}
pub fn is_writable(&self) -> bool {
self.wstatus != SOpen || self.wbuf.get_demand_remaining() > 0
}
// extra param is exposed for testing only
fn post_write_exact(&mut self, sz: usize) -> Option<io::Error> {
match self.wbuf.get_demand() {
0 => None,
_ => match self.wbuf.allowance {
0 => None,
_ => {
let w = &mut self.inner;
let (_, err) = self.wbuf.consume_write(sz, |b| w.write(b));
if let Some(e) = err.as_ref() {
match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
self.wstatus = SErr;
},
}
}
err
}
}
}
}
}
impl<T> Read for RateLimited<T> where T: Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rbuf.get_demand() {
0 => match self.rstatus {
SOpen => Err(io::Error::new(ErrorKind::WouldBlock, "")),
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.read(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
},
_ => match self.rbuf.allowance {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => Ok(self.rbuf.consume_read(buf)),
}
}
}
}
impl<T> Write for RateLimited<T> where T: Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self.wstatus {
SOpen => {
// TODO: figure out when it's appropriate to automatically grow the buffer capacity
let remain = self.wbuf.get_demand_remaining();
match remain {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => {
let n = cmp::min(buf.len(), remain);
self.wbuf.record_demand(&buf[..n]);
Ok(n)
}
}
},
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
}
}
fn flush(&mut self) -> io::Result<()> {
match self.wstatus {
SErr =>
// if there was an error, wbuf might not have been consumed, so output error even if wbuf is non-empty
Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
_ => match self.wbuf.get_demand() {
0 => {
//println!("flush OK");
Ok(())
},
_ => {
//println!("flush waiting :( {} {}", self.wbuf.get_demand(), self.wbuf.allowance);
Err(io::Error::new(ErrorKind::WouldBlock, ""))
}, // something else is responsible for calling post_write
}
}
}
}
#[derive(Debug)]
pub struct UsageStats {
samples: VecDeque<(usize, usize)>,
max_samples: usize,
current_usage: (usize, usize), // (waste, used)
}
impl UsageStats {
pub fn new() -> UsageStats {
UsageStats {
samples: VecDeque::new(),
max_samples: 4096, // TODO: make configurable
current_usage: (0, 0),
}
}
pub fn add_current_usage(&mut self, usage: (usize, usize)) {
self.current_usage.0 += usage.0;
self.current_usage.1 += usage.1;
}
pub fn finalise_current_usage(&mut self) -> (usize, usize) {
while self.samples.len() >= self.max_samples {
self.samples.pop_front();
}
let usage = self.current_usage;
self.samples.push_back(usage);
self.current_usage = (0, 0);
usage
}
pub fn estimate_next_usage(&mut self) -> usize {
// TODO: something smarter
// TODO: do something with the waste, e.g. to give more allowance
self.samples.back().unwrap().1
}
}
pub fn derive_allowance<K>(demand: HashMap<K, usize>) -> HashMap<K, usize> {
// TODO: actually perform rate-limiting. the current code ought not
// to be (but is) much slower than the async-io version, however
// this only noticeable on localhost-localhost transfers.
demand
}
#[cfg(test)]
mod tests {
use std::fs::*;
use std::fmt::Debug;
use std::io;
use std::io::*;
use std::assert;
use crate::sys::*;
use crate::util::*;
use super::*;
fn assert_would_block<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => assert_eq!(e.kind(), ErrorKind::WouldBlock),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_error<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => match e.kind() {
ErrorKind::WouldBlock => assert!(false),
ErrorKind::Interrupted => assert!(false),
_ => (),
},
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_num_bytes(res: io::Result<usize>, s: usize) {
match res {
Ok(n) => assert_eq!(n, s),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
// TODO: /dev/null etc is not a RawSocket in windows
#[test]
fn read_eof_ok() -> io::Result<()> {
let file = File::open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(RO(file), 1);
let mut buf = [0].repeat(1);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_num_bytes(bf.read(&mut buf), 0); // eof
Ok(())
}
#[test]
fn read_zero_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let unsafe_f = unsafe { File::from_raw_source(file.as_raw_source()) };
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(RO(file), sd);
assert_eq!(sd, bf.rbuf.get_demand_cap());
assert_eq!(0, bf.rbuf.get_demand());
let mut buf = [0].repeat(sx);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_eq!(sd, bf.rbuf.get_demand());
assert_would_block(bf.read(&mut buf));
bf.rbuf.add_allowance(sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx, bf.rbuf.get_demand());
bf.rbuf.add_allowance(sx + sy);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(bf.rbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert_would_block(bf.read(&mut buf));
assert_eq!(bf.rbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SOpen, bf.rstatus);
drop(unsafe_f); // close f, to force an error on the underlying stream
bf.pre_read();
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SErr, bf.rstatus);
bf.rbuf.add_allowance(sd - sx - sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert!(sd - sx - sx - sx <= sx); // otherwise next step fails
assert_num_bytes(bf.read(&mut buf), sd - sx - sx - sx);
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
Ok(())
}
#[test]
fn write_eof_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(WO(file), 1);
let buf = [0].repeat(1);
assert_num_bytes(bf.write(&buf), 1);
bf.post_write();
assert_eq!(bf.wstatus, SOpen);
bf.wbuf.add_allowance(1);
bf.post_write();
assert_eq!(bf.wstatus, SErr);
assert_error(bf.flush());
assert_error(bf.flush());
assert_error(bf.flush());
Ok(())
}
#[test]
fn write_null_ok() -> io::Result<()> {
let file = OpenOptions::new().write(true).open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(WO(file), sd);
assert_eq!(sd, bf.wbuf.get_demand_cap());
assert_eq!(sd, bf.wbuf.get_demand_remaining());
assert_eq!(0, bf.wbuf.get_demand());
let buf = [0].repeat(sd + sx);
bf.flush()?;
assert_num_bytes(bf.write(&buf), sd);
assert_eq!(sd, bf.wbuf.get_demand());
assert_would_block(bf.write(&buf[sd..]));
bf.wbuf.add_allowance(sx);
bf.post_write();
assert_eq!(sd - sx, bf.wbuf.get_demand());
bf.wbuf.add_allowance(sx + sy);
bf.post_write_exact(sx);
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(bf.wbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert!(bf.post_write_exact(0).is_none());
assert_eq!(bf.wbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
assert_num_bytes(bf.write(&buf), sx + sx);
assert_eq!(sd, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
bf.wbuf.add_allowance(sd);
assert_would_block(bf.flush());
assert_would_block(bf.flush());
assert_would_block(bf.flush());
bf.post_write();
assert_eq!(0, bf.wbuf.get_demand());
bf.flush()
}
} | random_line_split | |
limit.rs | //! Data structures to help perform rate limiting.
use std::collections::{HashMap, VecDeque};
use std::cmp;
use std::fmt::Debug;
use std::io::{self, Read, Write, ErrorKind};
use std::result::Result;
use bytes::{BytesMut, Buf, BufMut};
use crate::util::RorW;
use self::Status::*;
/// Generic buffer for rate-limiting, both reading and writing.
#[derive(Debug)]
pub struct RLBuf {
/// Buffer to help determine demand, for rate-limiting.
buf: BytesMut,
/// Index into `buf`, of the first data not allowed to be used. Everything
/// before it will be used upon request.
///
/// "Used" means `read` by a higher layer, or `write` by a lower layer.
allowance: usize,
/// Amount of data read out since last call to `reset_usage`.
last_used: usize,
}
impl RLBuf {
/** Create a new `RLBuf` with the given lower bound on the initial capacity.
The actual capacity can be got later with `get_demand_cap`.
*/
pub fn new_lb(init: usize) -> RLBuf {
RLBuf {
buf: BytesMut::with_capacity(init),
allowance: 0,
last_used: 0,
}
}
/** Get the current demand.
For higher-level rate-limiting logic, to determine how to rate-limit.
*/
pub fn get_demand(&self) -> usize {
self.buf.len()
}
/** Get the current buffer capacity, i.e. allocated memory.
For higher-level rate-limiting logic, to monitor resource usage, to help it
analyse how efficient it is.
*/
pub fn get_demand_cap(&self) -> usize {
self.buf.capacity()
}
pub fn get_demand_remaining(&self) -> usize {
self.buf.capacity() - self.buf.len()
}
/** Add the allowance, which must not be greater than the demand.
For higher-level rate-limiting logic, as it performs the rate-limiting.
*/
pub fn add_allowance(&mut self, allowance: usize) {
if self.allowance + allowance > self.get_demand() {
panic!("allowance > demand");
}
self.allowance += allowance
}
/** Return the latest usage figures & reset them back to zero.
The first number is the number of allowed bytes that were unused.
The second number is the number of allowed bytes that were used.
For higher-level rate-limiting logic, before rate-limiting is performed, to
detect consumers that consumed even more slowly than the rate limit in the
previous cycle. In response to this, the higher-level logic should give less
allowance for this consumer, to avoid waste.
*/
pub fn reset_usage(&mut self) -> (usize, usize) {
let wasted = self.allowance;
let used = self.last_used;
self.allowance = 0;
self.last_used = 0;
(wasted, used)
}
fn record_demand(&mut self, buf: &[u8]) {
self.buf.extend_from_slice(buf);
}
fn add_demand_cap(&mut self, more: usize) {
self.buf.reserve(more + self.get_demand_remaining());
}
fn take_allowance(&mut self, taken: usize) {
if taken > self.allowance {
panic!("taken > allowance");
}
self.allowance -= taken;
self.last_used += taken;
}
fn | (&mut self, buf: &mut [u8]) -> usize {
let to_drain = cmp::min(buf.len(), self.allowance);
self.buf.copy_to_slice(&mut buf[..to_drain]);
self.buf.reserve(to_drain);
self.take_allowance(to_drain);
to_drain
}
fn consume_write<F, E>(&mut self, sz: usize, mut write: F) -> (usize, Option<E>)
where F: FnMut (&[u8]) -> Result<usize, E> {
let mut used = 0;
let mut err = None;
let to_drain = cmp::min(self.buf.len(), sz);
match write(&self.buf[..to_drain]) {
Ok(n) => used += n,
Err(e) => err = Some(e),
}
self.buf.advance(used);
self.add_demand_cap(used);
self.take_allowance(used);
(used, err)
}
}
fn unwrap_err_or<T, E>(r: Result<T, E>, de: E) -> E {
match r {
Ok(_) => de,
Err(e) => e,
}
}
#[derive(Debug, PartialEq, Eq)]
enum Status {
SOpen,
SOk, // eof
SErr
}
/** Rate-limited asynchronous analogue of `std::io::BufReader` + `std::io::BufWriter`.
You **must** call `flush()` before dropping this (which closes the stream).
This is even more important than doing so on `BufWriter` - if not, you may lose
data. See https://internals.rust-lang.org/t/asynchronous-destructors/11127/49
for an in-depth explanation.
*/
#[derive(Debug)]
pub struct RateLimited<T> where T: ?Sized {
rstatus: Status,
pub(crate) rbuf: RLBuf,
wstatus: Status,
pub(crate) wbuf: RLBuf,
pub(crate) inner: T,
}
impl<T> RateLimited<T> {
/** Create a new `RateLimited` with the given initial capacity.
The inner stream must already be in non-blocking mode.
*/
pub fn new_lb(inner: T, init: usize) -> RateLimited<T> {
RateLimited {
inner: inner,
rstatus: SOpen,
rbuf: RLBuf::new_lb(init),
wstatus: SOpen,
wbuf: RLBuf::new_lb(init),
}
}
}
impl<T> RateLimited<T> where T: RorW + ?Sized {
/** Do a pre-read.
That is, do a non-blocking read from the underlying handle, filling up the
remaining part of `rbuf`.
This is to be used by higher-level code, before it performs the rate-limiting.
*/
pub fn pre_read(&mut self) {
match self.rstatus {
SOpen => {
let remain = self.rbuf.get_demand_remaining();
if remain == 0 {
return;
}
// TODO: replace with https://github.com/rust-lang/rfcs/pull/2930
let mut buf: &mut [u8] = unsafe { std::mem::transmute(self.rbuf.buf.bytes_mut()) };
match self.inner.read(&mut buf) { // TODO: assert non-blocking
Ok(0) => {
self.rstatus = SOk;
},
Ok(n) => {
unsafe {
self.rbuf.buf.advance_mut(n);
}
if n >= remain {
// TODO: automatically grow the buffer capacity
log::debug!("rbuf pre_read filled buffer");
}
},
Err(e) => match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
// println!("pre_read: {:?}", e);
self.rstatus = SErr;
}
},
}
},
_ => (), // already finished
}
}
pub fn is_readable(&self) -> bool {
self.rstatus != SOpen || self.rbuf.allowance > 0
}
/** Do a post-write.
That is, do a non-blocking write to the underlying handle, up to the current
allowance of `wbuf`.
This is to be used by higher-level code, after it performs the rate-limiting.
*/
pub fn post_write(&mut self) {
self.post_write_exact(self.wbuf.allowance);
}
pub fn is_writable(&self) -> bool {
self.wstatus != SOpen || self.wbuf.get_demand_remaining() > 0
}
// extra param is exposed for testing only
fn post_write_exact(&mut self, sz: usize) -> Option<io::Error> {
match self.wbuf.get_demand() {
0 => None,
_ => match self.wbuf.allowance {
0 => None,
_ => {
let w = &mut self.inner;
let (_, err) = self.wbuf.consume_write(sz, |b| w.write(b));
if let Some(e) = err.as_ref() {
match e.kind() {
ErrorKind::WouldBlock => (),
ErrorKind::Interrupted => (),
_ => {
self.wstatus = SErr;
},
}
}
err
}
}
}
}
}
impl<T> Read for RateLimited<T> where T: Read {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.rbuf.get_demand() {
0 => match self.rstatus {
SOpen => Err(io::Error::new(ErrorKind::WouldBlock, "")),
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.read(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
},
_ => match self.rbuf.allowance {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => Ok(self.rbuf.consume_read(buf)),
}
}
}
}
impl<T> Write for RateLimited<T> where T: Write {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self.wstatus {
SOpen => {
// TODO: figure out when it's appropriate to automatically grow the buffer capacity
let remain = self.wbuf.get_demand_remaining();
match remain {
0 => Err(io::Error::new(ErrorKind::WouldBlock, "")),
_ => {
let n = cmp::min(buf.len(), remain);
self.wbuf.record_demand(&buf[..n]);
Ok(n)
}
}
},
SOk => Ok(0),
SErr => Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
}
}
fn flush(&mut self) -> io::Result<()> {
match self.wstatus {
SErr =>
// if there was an error, wbuf might not have been consumed, so output error even if wbuf is non-empty
Err(unwrap_err_or(self.inner.write(&mut []), io::Error::new(ErrorKind::Other, "Ok after Err"))),
_ => match self.wbuf.get_demand() {
0 => {
//println!("flush OK");
Ok(())
},
_ => {
//println!("flush waiting :( {} {}", self.wbuf.get_demand(), self.wbuf.allowance);
Err(io::Error::new(ErrorKind::WouldBlock, ""))
}, // something else is responsible for calling post_write
}
}
}
}
#[derive(Debug)]
pub struct UsageStats {
samples: VecDeque<(usize, usize)>,
max_samples: usize,
current_usage: (usize, usize), // (waste, used)
}
impl UsageStats {
pub fn new() -> UsageStats {
UsageStats {
samples: VecDeque::new(),
max_samples: 4096, // TODO: make configurable
current_usage: (0, 0),
}
}
pub fn add_current_usage(&mut self, usage: (usize, usize)) {
self.current_usage.0 += usage.0;
self.current_usage.1 += usage.1;
}
pub fn finalise_current_usage(&mut self) -> (usize, usize) {
while self.samples.len() >= self.max_samples {
self.samples.pop_front();
}
let usage = self.current_usage;
self.samples.push_back(usage);
self.current_usage = (0, 0);
usage
}
pub fn estimate_next_usage(&mut self) -> usize {
// TODO: something smarter
// TODO: do something with the waste, e.g. to give more allowance
self.samples.back().unwrap().1
}
}
pub fn derive_allowance<K>(demand: HashMap<K, usize>) -> HashMap<K, usize> {
// TODO: actually perform rate-limiting. the current code ought not
// to be (but is) much slower than the async-io version, however
// this only noticeable on localhost-localhost transfers.
demand
}
#[cfg(test)]
mod tests {
use std::fs::*;
use std::fmt::Debug;
use std::io;
use std::io::*;
use std::assert;
use crate::sys::*;
use crate::util::*;
use super::*;
fn assert_would_block<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => assert_eq!(e.kind(), ErrorKind::WouldBlock),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_error<T>(res: io::Result<T>) where T: Debug {
match res {
Err(e) => match e.kind() {
ErrorKind::WouldBlock => assert!(false),
ErrorKind::Interrupted => assert!(false),
_ => (),
},
x => {
println!("{:?}", x);
assert!(false);
},
}
}
fn assert_num_bytes(res: io::Result<usize>, s: usize) {
match res {
Ok(n) => assert_eq!(n, s),
x => {
println!("{:?}", x);
assert!(false);
},
}
}
// TODO: /dev/null etc is not a RawSocket in windows
#[test]
fn read_eof_ok() -> io::Result<()> {
let file = File::open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(RO(file), 1);
let mut buf = [0].repeat(1);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_num_bytes(bf.read(&mut buf), 0); // eof
Ok(())
}
#[test]
fn read_zero_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let unsafe_f = unsafe { File::from_raw_source(file.as_raw_source()) };
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(RO(file), sd);
assert_eq!(sd, bf.rbuf.get_demand_cap());
assert_eq!(0, bf.rbuf.get_demand());
let mut buf = [0].repeat(sx);
assert_would_block(bf.read(&mut buf));
bf.pre_read();
assert_eq!(sd, bf.rbuf.get_demand());
assert_would_block(bf.read(&mut buf));
bf.rbuf.add_allowance(sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx, bf.rbuf.get_demand());
bf.rbuf.add_allowance(sx + sy);
assert_num_bytes(bf.read(&mut buf), sx);
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(bf.rbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert_would_block(bf.read(&mut buf));
assert_eq!(bf.rbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SOpen, bf.rstatus);
drop(unsafe_f); // close f, to force an error on the underlying stream
bf.pre_read();
assert_eq!(sd - sx - sx, bf.rbuf.get_demand());
assert_eq!(SErr, bf.rstatus);
bf.rbuf.add_allowance(sd - sx - sx);
assert_num_bytes(bf.read(&mut buf), sx);
assert!(sd - sx - sx - sx <= sx); // otherwise next step fails
assert_num_bytes(bf.read(&mut buf), sd - sx - sx - sx);
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
assert_error(bf.read(&mut buf));
Ok(())
}
#[test]
fn write_eof_err() -> io::Result<()> {
let file = File::open("/dev/zero")?;
set_non_blocking(file.as_raw_source())?;
let mut bf = RateLimited::new_lb(WO(file), 1);
let buf = [0].repeat(1);
assert_num_bytes(bf.write(&buf), 1);
bf.post_write();
assert_eq!(bf.wstatus, SOpen);
bf.wbuf.add_allowance(1);
bf.post_write();
assert_eq!(bf.wstatus, SErr);
assert_error(bf.flush());
assert_error(bf.flush());
assert_error(bf.flush());
Ok(())
}
#[test]
fn write_null_ok() -> io::Result<()> {
let file = OpenOptions::new().write(true).open("/dev/null")?;
set_non_blocking(file.as_raw_source())?;
let sd = 4095; // in case VecDeque changes implementation, this needs to be changed
let sx = 1024;
let sy = 1024;
let mut bf = RateLimited::new_lb(WO(file), sd);
assert_eq!(sd, bf.wbuf.get_demand_cap());
assert_eq!(sd, bf.wbuf.get_demand_remaining());
assert_eq!(0, bf.wbuf.get_demand());
let buf = [0].repeat(sd + sx);
bf.flush()?;
assert_num_bytes(bf.write(&buf), sd);
assert_eq!(sd, bf.wbuf.get_demand());
assert_would_block(bf.write(&buf[sd..]));
bf.wbuf.add_allowance(sx);
bf.post_write();
assert_eq!(sd - sx, bf.wbuf.get_demand());
bf.wbuf.add_allowance(sx + sy);
bf.post_write_exact(sx);
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(bf.wbuf.reset_usage(), (sy, sx + sy));
// sy bytes of allowance were wasted
assert!(bf.post_write_exact(0).is_none());
assert_eq!(bf.wbuf.reset_usage(), (0, 0));
assert_eq!(sd - sx - sx, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
assert_num_bytes(bf.write(&buf), sx + sx);
assert_eq!(sd, bf.wbuf.get_demand());
assert_eq!(SOpen, bf.wstatus);
bf.wbuf.add_allowance(sd);
assert_would_block(bf.flush());
assert_would_block(bf.flush());
assert_would_block(bf.flush());
bf.post_write();
assert_eq!(0, bf.wbuf.get_demand());
bf.flush()
}
}
| consume_read | identifier_name |
de.rs | //! Deserialization support for the `application/x-www-form-urlencoded` format.
use serde::de;
use std::collections::{
HashMap,
};
use std::borrow::Cow;
#[doc(inline)]
pub use serde::de::value::Error;
use serde::de::value::MapDeserializer;
use std::io::Read;
// use url::form_urlencoded::Parse as UrlEncodedParse;
use url::form_urlencoded::parse;
/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_bytes::<Vec<(String, String)>>(
/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_bytes<T: de::Deserialize>(input: &[u8]) -> Result<T, Error> {
T::deserialize(Deserializer::new(input))
}
/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_str<T: de::Deserialize>(input: &str) -> Result<T, Error> {
from_bytes(input.as_bytes())
}
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where T: de::Deserialize, R: Read
{
let mut buf = vec![];
reader.read_to_end(&mut buf)
.map_err(|e| {
de::Error::custom(format_args!("could not read input: {}", e))
})?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}] ? Ordering not clear.
if next_rest != "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
Deserializer::with_map(x).deserialize_map(visitor)
} else {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
self.deserialize_map(visitor)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
| forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
impl<'a> ValueDeserializer for Level<'a>
{
type Deserializer = LevelDeserializer<'a>;
fn into_deserializer(self) -> Self::Deserializer {
LevelDeserializer(self)
}
}
| // visitor.visit_seq(self)
if let Level::Sequence(x) = self.0 {
SeqDeserializer::new(x.into_iter()).deserialize(visitor)
} else {
Err(de::Error::custom("value does not appear to be a sequence"))
}
}
| identifier_body |
de.rs | //! Deserialization support for the `application/x-www-form-urlencoded` format.
use serde::de;
use std::collections::{
HashMap,
};
use std::borrow::Cow;
#[doc(inline)]
pub use serde::de::value::Error;
use serde::de::value::MapDeserializer;
use std::io::Read;
// use url::form_urlencoded::Parse as UrlEncodedParse;
use url::form_urlencoded::parse;
/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_bytes::<Vec<(String, String)>>(
/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_bytes<T: de::Deserialize>(input: &[u8]) -> Result<T, Error> {
T::deserialize(Deserializer::new(input))
}
/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_str<T: de::Deserialize>(input: &str) -> Result<T, Error> {
from_bytes(input.as_bytes())
}
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where T: de::Deserialize, R: Read
{
let mut buf = vec![];
reader.read_to_end(&mut buf)
.map_err(|e| {
de::Error::custom(format_args!("could not read input: {}", e))
})?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}] ? Ordering not clear.
if next_rest != "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
Deserializer::with_map(x).deserialize_map(visitor)
} else {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor |
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
// visitor.visit_seq(self)
if let Level::Sequence(x) = self.0 {
SeqDeserializer::new(x.into_iter()).deserialize(visitor)
} else {
Err(de::Error::custom("value does not appear to be a sequence"))
}
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
impl<'a> ValueDeserializer for Level<'a>
{
type Deserializer = LevelDeserializer<'a>;
fn into_deserializer(self) -> Self::Deserializer {
LevelDeserializer(self)
}
} | {
self.deserialize_map(visitor)
} | random_line_split |
de.rs | //! Deserialization support for the `application/x-www-form-urlencoded` format.
use serde::de;
use std::collections::{
HashMap,
};
use std::borrow::Cow;
#[doc(inline)]
pub use serde::de::value::Error;
use serde::de::value::MapDeserializer;
use std::io::Read;
// use url::form_urlencoded::Parse as UrlEncodedParse;
use url::form_urlencoded::parse;
/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_bytes::<Vec<(String, String)>>(
/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_bytes<T: de::Deserialize>(input: &[u8]) -> Result<T, Error> {
T::deserialize(Deserializer::new(input))
}
/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_str<T: de::Deserialize>(input: &str) -> Result<T, Error> {
from_bytes(input.as_bytes())
}
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where T: de::Deserialize, R: Read
{
let mut buf = vec![];
reader.read_to_end(&mut buf)
.map_err(|e| {
de::Error::custom(format_args!("could not read input: {}", e))
})?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}] ? Ordering not clear.
if next_rest != "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn de | >(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
Deserializer::with_map(x).deserialize_map(visitor)
} else {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
self.deserialize_map(visitor)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
// visitor.visit_seq(self)
if let Level::Sequence(x) = self.0 {
SeqDeserializer::new(x.into_iter()).deserialize(visitor)
} else {
Err(de::Error::custom("value does not appear to be a sequence"))
}
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
impl<'a> ValueDeserializer for Level<'a>
{
type Deserializer = LevelDeserializer<'a>;
fn into_deserializer(self) -> Self::Deserializer {
LevelDeserializer(self)
}
}
| serialize_struct<V | identifier_name |
de.rs | //! Deserialization support for the `application/x-www-form-urlencoded` format.
use serde::de;
use std::collections::{
HashMap,
};
use std::borrow::Cow;
#[doc(inline)]
pub use serde::de::value::Error;
use serde::de::value::MapDeserializer;
use std::io::Read;
// use url::form_urlencoded::Parse as UrlEncodedParse;
use url::form_urlencoded::parse;
/// Deserializes a `application/x-wwww-url-encoded` value from a `&[u8]`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_bytes::<Vec<(String, String)>>(
/// b"bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_bytes<T: de::Deserialize>(input: &[u8]) -> Result<T, Error> {
T::deserialize(Deserializer::new(input))
}
/// Deserializes a `application/x-wwww-url-encoded` value from a `&str`.
///
/// ```
/// let meal = vec![
/// ("bread".to_owned(), "baguette".to_owned()),
/// ("cheese".to_owned(), "comté".to_owned()),
/// ("fat".to_owned(), "butter".to_owned()),
/// ("meat".to_owned(), "ham".to_owned()),
/// ];
///
/// let mut res = serde_urlencoded::from_str::<Vec<(String, String)>>(
/// "bread=baguette&cheese=comt%C3%A9&meat=ham&fat=butter").unwrap();
/// res.sort();
/// assert_eq!(res, meal);
/// ```
pub fn from_str<T: de::Deserialize>(input: &str) -> Result<T, Error> {
from_bytes(input.as_bytes())
}
/// Convenience function that reads all bytes from `reader` and deserializes
/// them with `from_bytes`.
pub fn from_reader<T, R>(mut reader: R) -> Result<T, Error>
where T: de::Deserialize, R: Read
{
let mut buf = vec![];
reader.read_to_end(&mut buf)
.map_err(|e| {
de::Error::custom(format_args!("could not read input: {}", e))
})?;
from_bytes(&buf)
}
/// A deserializer for the `application/x-www-form-urlencoded` format.
///
/// * Supported top-level outputs are structs, maps and sequences of pairs,
/// with or without a given length.
///
/// * Main `deserialize` methods defers to `deserialize_map`.
///
/// * Everything else but `deserialize_seq` and `deserialize_seq_fixed_size`
/// defers to `deserialize`.
pub struct Deserializer<'a> {
// value: &'a [u8],
// map: HashMap<Cow<'a, str>, Level<'a>>,
// parser: Option<UrlEncodedParse<'a>>,
iter: iter::Peekable<iter::Fuse<IntoIter<Cow<'a, str>, Level<'a>>>>,
}
// use serde::de::MapVisitor;
use std::iter;
use std::collections::hash_map::{Entry, IntoIter};
#[derive(Debug)]
enum Level<'a> {
Nested(HashMap<Cow<'a, str>, Level<'a>>),
Sequence(Vec<Cow<'a, str>>),
Flat(Cow<'a, str>),
Invalid(&'static str),
}
impl<'a> Deserializer<'a> {
// Call this with a map, with key k, and rest should the rest of the key.
// I.e. a[b][c]=v would be called as parse(map, "a", "b][c]", v)
fn parse(map: &mut HashMap<Cow<'a, str>, Level<'a>>, k: Cow<'a, str>, rest: Cow<'a, str>, v: Cow<'a, str>) {
if rest.is_empty() {
match map.entry(k) {
Entry::Occupied(mut o) => {
o.insert(Level::Invalid("Multiple values for one key"));
},
Entry::Vacant(vm) => {
vm.insert(Level::Flat(v));
}
}
return;
} else {
// rest is not empty
// "b][c]" =? "b", "[c]"
let (next_key, next_rest) = split(rest, ']');
if next_key.is_empty() {
// key is of the form a[]
// We assume this is at the bottom layer of nesting, otherwise we have
// ambiguity: a[][b]=1, a[][b]=2, a[][c]=3, a[][c] = 4
// ==> [{b:1, c:3}, {b:2, c:4}] or
// ==> [{b:1, c:4}, {b:2, c:3}] ? Ordering not clear.
if next_rest != "]" {
map.insert(k, Level::Invalid("unindexed nested structs is unsupported"));
return;
}
match map.entry(k) {
Entry::Vacant(vm) => {
let vec: Vec<Cow<'a, str>> = Vec::new();
vm.insert(Level::Sequence(vec));
},
Entry::Occupied(o) => {
match o.into_mut() {
&mut Level::Sequence(ref mut inner) => { inner.push(v); },
x => { *x = Level::Invalid("multiple types for one key"); }
}
}
};
return;
} else {
// assert_eq!(&rest.as_ref()[0..1], "[");
// println!("{:?}", next_rest);
let (e, next_rest) = split(next_rest, '[');
assert_eq!(e, "");
match map.entry(k).or_insert(Level::Nested(HashMap::new())) {
&mut Level::Nested(ref mut m) => Deserializer::parse(m, next_key, next_rest, v),
x => { *x = Level::Invalid(""); return; }
}
return;
}
}
}
/// Returns a new `Deserializer`.
pub fn new(input: &'a [u8]) -> Self {
let mut map = HashMap::<Cow<str>, Level<'a>>::new();
let parser = parse(input).into_iter();
for (k, v) in parser {
let (ldepth, rdepth) = k.chars().fold((0, 0), |(acc0, acc1), x| {
match x {
'[' => (acc0+1, acc1),
']' => (acc0, acc1+1),
_ => (acc0, acc1)
}
});
debug_assert!(ldepth == rdepth);
// Split keystring into the `root` key and the `rest`.
// a[b][c]/// => "a", "b][c]..."
let (root, rest) = split(k, '[');
Deserializer::parse(&mut map, root, rest, v); }
// println!("{:?}", map);
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
fn with_map(map: HashMap<Cow<'a, str>, Level<'a>>) -> Self {
Deserializer {
iter: map.into_iter().fuse().peekable(),
}
}
}
fn split<'a>(input: Cow<'a, str>, split: char) -> (Cow<'a, str>, Cow<'a, str>) {
match input {
Cow::Borrowed(v) => {
let mut split2 = v.splitn(2, split);
let s1 = split2.next().unwrap();
let s2 = split2.next().unwrap_or("");
(Cow::Borrowed(s1), Cow::Borrowed(s2))
},
Cow::Owned(v) => {
// let v = v.into_bytes();
let mut split_idx = v.len();
for (idx, c) in v.chars().enumerate() {
if c == split {
split_idx = idx;
break;
}
}
// b][c] split = ], idx = 1
if split_idx < v.len() {
let mut v = v.into_bytes();
let v2 = v.split_off(split_idx+1);
v.pop();
unsafe {
return (Cow::Owned(String::from_utf8_unchecked(v)),
Cow::Owned(String::from_utf8_unchecked(v2)))
}
} else {
return (Cow::Owned(v), Cow::Owned("".to_string()))
}
// (Cow::Owned(v),Cow::Borrowed(""))
}
}
}
impl<'a, 'b> de::Deserializer for Deserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
self.deserialize_map(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
visitor.visit_map(self)
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_map(self)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
visitor.visit_seq(MapDeserializer::new(self.iter))
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
use serde::de::value::{SeqDeserializer, ValueDeserializer};
impl<'a> de::MapVisitor for Deserializer<'a> {
type Error = Error;
fn visit_key_seed<K>(&mut self, seed: K) -> Result<Option<K::Value>, Error>
where K: de::DeserializeSeed,
{
if let Some(&(ref key, _)) = self.iter.peek() {
return seed.deserialize(key.clone().into_deserializer()).map(Some)
};
Ok(None)
}
fn visit_value_seed<V>(&mut self, seed: V) -> Result<V::Value, Error>
where V: de::DeserializeSeed,
{
if let Some((_, value)) = self.iter.next() {
seed.deserialize(value.into_deserializer())
} else {
Err(de::Error::custom("Somehow the list was empty after a non-empty key was returned"))
}
}
}
struct LevelDeserializer<'a>(Level<'a>);
impl<'a> de::Deserializer for LevelDeserializer<'a> {
type Error = Error;
fn deserialize<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Flat(x) = self.0 {
x.into_deserializer().deserialize(visitor)
} else {
Err(de::Error::custom("cannot deserialize value"))
}
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor,
{
if let Level::Nested(x) = self.0 {
| lse {
Err(de::Error::custom("value does not appear to be a map"))
}
}
// _serde::Deserializer::deserialize_struct(deserializer,"A", FIELDS, __Visitor)
fn deserialize_struct<V>(self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V)
-> Result<V::Value, Self::Error>
where V: de::Visitor
{
self.deserialize_map(visitor)
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: de::Visitor
{
// visitor.visit_seq(self)
if let Level::Sequence(x) = self.0 {
SeqDeserializer::new(x.into_iter()).deserialize(visitor)
} else {
Err(de::Error::custom("value does not appear to be a sequence"))
}
}
forward_to_deserialize! {
bool
u8
u16
u32
u64
i8
i16
i32
i64
f32
f64
char
str
string
unit
option
bytes
byte_buf
unit_struct
// seq
seq_fixed_size
newtype_struct
tuple_struct
// struct
struct_field
tuple
enum
ignored_any
}
}
impl<'a> ValueDeserializer for Level<'a>
{
type Deserializer = LevelDeserializer<'a>;
fn into_deserializer(self) -> Self::Deserializer {
LevelDeserializer(self)
}
}
| Deserializer::with_map(x).deserialize_map(visitor)
} e | conditional_block |
JsPsych.js | /* eslint-disable */
import React, { useEffect, useState } from 'react';
import { useParams } from 'react-router-dom';
import {
observeStimuliCompletion,
uploadSelectionResult,
} from '../../../firebase/api/gcp-utils';
/**
* Component to load jsPsych's "Self Image Experiment".
*
* @component
* @return {object} (
* <React.Fragment />
* )
*/
export const JsPsych = ({ selectionTaskCompletionHandler }) => {
const { experimentId, participantId } = useParams(); // Parse URL params
const [stimuliUrls, setStimuliUrls] = useState([]);
const [ready, setReady] = useState(false);
useEffect(async () => {
// Load participantId, experimentId, and 400 stimuli urls
await observeStimuliCompletion(
participantId,
experimentId,
setStimuliUrls,
errorLoadingJsPsych,
);
}, []);
useEffect(() => {
if (stimuliUrls.length > 0) |
}, [stimuliUrls]);
useEffect(() => {
if (ready) {
setTimeout(() => {
// Implementation of previous team timeline & trial logic -------------------------- START
/* create timeline */
const timeline = [];
/* number of trials */
// NOTE: Adjust line below to shorten the number of trials. 199 will go through all 200 iterations.
// NUMBER_OF_TRIALS = 199, means a total of 200 trials (0-indexed)
const NUMBER_OF_TRIALS = 199;
const exampleImageOne =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample1.png?alt=media&token=8a6ee16a-0700-40ef-a7cf-5c3e380b5b3f';
const exampleImageTwo =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample2.png?alt=media&token=fb0a7373-add3-4187-9449-2f206fd08ae3';
/* define instructions */
const instructions = {
type: 'html-keyboard-response',
stimulus: function () {
return (
'<h1>Instruction</h1>' +
'<p>In this experiment, two images will be shown ' +
'on the screen. Choose the image that looks more like you. </p>' +
'<p>Press the letter <strong>E</strong> on the keyboard to select the image on the left.</p>' +
'<p>Press the letter <strong>I</strong> on the keyboard to select the image on the right.</p> ' +
'<p></p>' +
"<div style='width: 900px; margin: auto;'>" +
"<div style='float: left;'><img width='300' src='" +
exampleImageOne +
"' alt='Error loading example 1'/>" +
"<p class='small'><strong>Press the E key</strong></p></div>" +
"<div class='float: right;'><img width='300' src='" +
exampleImageTwo +
"' alt='Error loading example 2'/>" +
"<p class='small'><strong>Press the I key</strong></p></div>" +
'</div>' +
'<p></p>' +
'<p><strong>Press any key to begin.</strong></p>'
);
},
};
timeline.push(instructions);
// Preload images for trials
var preload = {
type: 'preload',
images: stimuliUrls,
};
timeline.push(preload);
/* generate trials with number of trials */
function generateTrials(numberOfTrial) {
const trials = [];
for (let i = 0; i <= numberOfTrial; i++) {
const invFilePath = stimuliUrls[i];
const oriFilePath = stimuliUrls[i + 1];
const twoStimulusHtml =
// For the first 200 images that are rendered, show original on left & show inverted on right
i <= numberOfTrial / 2
? "<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
'</div>'
: // For the last 200 images that are rendered, show inverted on left & show original on right
"<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
'</div>';
const newStimuli = {
stimulus: twoStimulusHtml,
data: { label: 'trial', trial_num: i },
};
trials.push(newStimuli);
}
return trials;
}
const fixation = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:red; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 1000, // From 1500
data: { label: 'fixation' },
};
const trial = {
type: 'html-keyboard-response',
stimulus: jsPsych.timelineVariable('stimulus'),
choices: ['e', 'i'],
data: jsPsych.timelineVariable('data'),
trial_duration: 1000, // 1000,
post_trial_gap: 0,
on_finish: function (data) {
if (data.response === 'e') {
data.selection = 'left';
} else if (data.response === 'i') {
data.selection = 'right';
} else {
data.selection = 'none';
}
},
};
const postTrialPause = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:blue; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 250, // From 1500
data: { label: 'post-fixation' },
};
// Transforms the experimental data from JsPsych to follow the back end JSON scheme
function transformExperimentData() {
let trialSelections = jsPsych.data
.get()
.filter({ label: 'trial' })
.select('selection').values;
let newData = [];
let columnHeaders = {
stimulus: 'trial number',
response:
'trial response is whether or not the user chose the original image; 1 = correct, -1 = incorrect',
trait: 'untrustworthy by default',
subject:
'trial subject is the placement of original image; 1 = left, 2 = right',
};
newData.push(columnHeaders);
for (
let trialNumber = 0;
trialNumber < trialSelections.length;
trialNumber++
) {
let trialResponse;
let trialSubject;
// If the user doesn't make a selection, we are counting it as '-1'; an untrustworthy trial.
if (trialNumber <= NUMBER_OF_TRIALS / 2) {
// For the first half trials, original image on left.
trialResponse = trialSelections[trialNumber] === 'left' ? 1 : -1;
trialSubject = 1;
} else {
// For the second half trials, original image on right.
trialResponse = trialSelections[trialNumber] === 'right' ? 1 : -1;
trialSubject = 2;
}
let trialRow = {
stimulus: trialNumber + 1,
response: trialResponse,
trait: 'untrustworthy',
subject: trialSubject,
};
newData.push(trialRow);
}
return newData;
}
// Call backend api storeExperimentResult to connect with FireBase and update Users Collection with experiment data.
function saveExperimentData(experimentData) {
uploadSelectionResult(participantId, experimentId, experimentData);
selectionTaskCompletionHandler(true);
}
const trialProcedure = {
timeline: [fixation, trial, postTrialPause],
timeline_variables: generateTrials(NUMBER_OF_TRIALS),
randomize_order: false,
repetitions: 1,
};
timeline.push(trialProcedure);
const reportBlock = {
type: 'html-keyboard-response',
stimulus: function () {
const trials = jsPsych.data.get().filter({ label: 'trial' });
const trialCount = trials.count();
const leftTrials = trials.filter({ selection: 'left' }).count();
const rightTrials = trials.filter({ selection: 'right' }).count();
const responseTime = Math.round(trials.select('rt').mean());
return (
'<h1>Completed!</h1>' +
'<p>You completed ' +
trialCount +
' trials.</p>' +
'<p>You selected image on the left in ' +
leftTrials +
' trials.</p>' +
'<p>You selected image on the right in ' +
rightTrials +
' trials.</p>' +
'<p>Your average response time ' +
responseTime +
'ms.</p>' +
'<p></p>' +
'<p>Press any key to complete the experiment. Thank you!</p>'
);
},
};
timeline.push(reportBlock);
// Checks to see if we have participantId, experimentId, and stimuli URLs ready
if (ready) {
/* start the experiment */
jsPsych.init({
timeline: timeline,
display_element: 'jspsych-target',
on_finish: function () {
// Filter out data to only show 'trial' data via label
let experimentalData = transformExperimentData(
jsPsych.data.get().filter({ label: 'trial' }).json('pretty'),
);
saveExperimentData(experimentalData);
},
});
}
// Implementation of previous team timeline & trial logic ------------------------------ END
}, 1000);
}
}, [ready]);
// Error handler that prompts the participant to re-click experiment.
const errorLoadingJsPsych = (errorCode) => {
window.alert(
'Something went wrong. Please click on experiment again.' +
' Error code: ' +
errorCode,
);
};
return (
<div>
{/* Including necessary JsPsych plugin classes & button cues */}
<h4 id="title">Which one do you pick?</h4>
<div id="instruction">
<div class="key-instruction">E - select image on left</div>
<div id="between" />
<div class="key-instruction">I - select image on right</div>
</div>
<p/>
<h5 id="note">Note: close experiment window when finished</h5>
<div id="jspsych-target"></div>
</div>
);
};
| {
setReady(true);
} | conditional_block |
JsPsych.js | /* eslint-disable */
import React, { useEffect, useState } from 'react';
import { useParams } from 'react-router-dom';
import {
observeStimuliCompletion,
uploadSelectionResult,
} from '../../../firebase/api/gcp-utils';
/**
* Component to load jsPsych's "Self Image Experiment".
*
* @component
* @return {object} (
* <React.Fragment />
* )
*/
export const JsPsych = ({ selectionTaskCompletionHandler }) => {
const { experimentId, participantId } = useParams(); // Parse URL params
const [stimuliUrls, setStimuliUrls] = useState([]);
const [ready, setReady] = useState(false);
useEffect(async () => {
// Load participantId, experimentId, and 400 stimuli urls
await observeStimuliCompletion(
participantId,
experimentId,
setStimuliUrls,
errorLoadingJsPsych,
);
}, []);
useEffect(() => {
if (stimuliUrls.length > 0) {
setReady(true);
}
}, [stimuliUrls]);
useEffect(() => {
if (ready) {
setTimeout(() => {
// Implementation of previous team timeline & trial logic -------------------------- START
/* create timeline */
const timeline = [];
/* number of trials */
// NOTE: Adjust line below to shorten the number of trials. 199 will go through all 200 iterations.
// NUMBER_OF_TRIALS = 199, means a total of 200 trials (0-indexed)
const NUMBER_OF_TRIALS = 199;
const exampleImageOne =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample1.png?alt=media&token=8a6ee16a-0700-40ef-a7cf-5c3e380b5b3f';
const exampleImageTwo =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample2.png?alt=media&token=fb0a7373-add3-4187-9449-2f206fd08ae3';
/* define instructions */
const instructions = {
type: 'html-keyboard-response',
stimulus: function () {
return (
'<h1>Instruction</h1>' +
'<p>In this experiment, two images will be shown ' +
'on the screen. Choose the image that looks more like you. </p>' +
'<p>Press the letter <strong>E</strong> on the keyboard to select the image on the left.</p>' +
'<p>Press the letter <strong>I</strong> on the keyboard to select the image on the right.</p> ' +
'<p></p>' +
"<div style='width: 900px; margin: auto;'>" +
"<div style='float: left;'><img width='300' src='" +
exampleImageOne +
"' alt='Error loading example 1'/>" +
"<p class='small'><strong>Press the E key</strong></p></div>" +
"<div class='float: right;'><img width='300' src='" +
exampleImageTwo +
"' alt='Error loading example 2'/>" +
"<p class='small'><strong>Press the I key</strong></p></div>" +
'</div>' +
'<p></p>' +
'<p><strong>Press any key to begin.</strong></p>'
);
},
};
timeline.push(instructions);
// Preload images for trials
var preload = {
type: 'preload',
images: stimuliUrls,
};
timeline.push(preload);
/* generate trials with number of trials */
function generateTrials(numberOfTrial) |
const fixation = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:red; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 1000, // From 1500
data: { label: 'fixation' },
};
const trial = {
type: 'html-keyboard-response',
stimulus: jsPsych.timelineVariable('stimulus'),
choices: ['e', 'i'],
data: jsPsych.timelineVariable('data'),
trial_duration: 1000, // 1000,
post_trial_gap: 0,
on_finish: function (data) {
if (data.response === 'e') {
data.selection = 'left';
} else if (data.response === 'i') {
data.selection = 'right';
} else {
data.selection = 'none';
}
},
};
const postTrialPause = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:blue; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 250, // From 1500
data: { label: 'post-fixation' },
};
// Transforms the experimental data from JsPsych to follow the back end JSON scheme
function transformExperimentData() {
let trialSelections = jsPsych.data
.get()
.filter({ label: 'trial' })
.select('selection').values;
let newData = [];
let columnHeaders = {
stimulus: 'trial number',
response:
'trial response is whether or not the user chose the original image; 1 = correct, -1 = incorrect',
trait: 'untrustworthy by default',
subject:
'trial subject is the placement of original image; 1 = left, 2 = right',
};
newData.push(columnHeaders);
for (
let trialNumber = 0;
trialNumber < trialSelections.length;
trialNumber++
) {
let trialResponse;
let trialSubject;
// If the user doesn't make a selection, we are counting it as '-1'; an untrustworthy trial.
if (trialNumber <= NUMBER_OF_TRIALS / 2) {
// For the first half trials, original image on left.
trialResponse = trialSelections[trialNumber] === 'left' ? 1 : -1;
trialSubject = 1;
} else {
// For the second half trials, original image on right.
trialResponse = trialSelections[trialNumber] === 'right' ? 1 : -1;
trialSubject = 2;
}
let trialRow = {
stimulus: trialNumber + 1,
response: trialResponse,
trait: 'untrustworthy',
subject: trialSubject,
};
newData.push(trialRow);
}
return newData;
}
// Call backend api storeExperimentResult to connect with FireBase and update Users Collection with experiment data.
function saveExperimentData(experimentData) {
uploadSelectionResult(participantId, experimentId, experimentData);
selectionTaskCompletionHandler(true);
}
const trialProcedure = {
timeline: [fixation, trial, postTrialPause],
timeline_variables: generateTrials(NUMBER_OF_TRIALS),
randomize_order: false,
repetitions: 1,
};
timeline.push(trialProcedure);
const reportBlock = {
type: 'html-keyboard-response',
stimulus: function () {
const trials = jsPsych.data.get().filter({ label: 'trial' });
const trialCount = trials.count();
const leftTrials = trials.filter({ selection: 'left' }).count();
const rightTrials = trials.filter({ selection: 'right' }).count();
const responseTime = Math.round(trials.select('rt').mean());
return (
'<h1>Completed!</h1>' +
'<p>You completed ' +
trialCount +
' trials.</p>' +
'<p>You selected image on the left in ' +
leftTrials +
' trials.</p>' +
'<p>You selected image on the right in ' +
rightTrials +
' trials.</p>' +
'<p>Your average response time ' +
responseTime +
'ms.</p>' +
'<p></p>' +
'<p>Press any key to complete the experiment. Thank you!</p>'
);
},
};
timeline.push(reportBlock);
// Checks to see if we have participantId, experimentId, and stimuli URLs ready
if (ready) {
/* start the experiment */
jsPsych.init({
timeline: timeline,
display_element: 'jspsych-target',
on_finish: function () {
// Filter out data to only show 'trial' data via label
let experimentalData = transformExperimentData(
jsPsych.data.get().filter({ label: 'trial' }).json('pretty'),
);
saveExperimentData(experimentalData);
},
});
}
// Implementation of previous team timeline & trial logic ------------------------------ END
}, 1000);
}
}, [ready]);
// Error handler that prompts the participant to re-click experiment.
const errorLoadingJsPsych = (errorCode) => {
window.alert(
'Something went wrong. Please click on experiment again.' +
' Error code: ' +
errorCode,
);
};
return (
<div>
{/* Including necessary JsPsych plugin classes & button cues */}
<h4 id="title">Which one do you pick?</h4>
<div id="instruction">
<div class="key-instruction">E - select image on left</div>
<div id="between" />
<div class="key-instruction">I - select image on right</div>
</div>
<p/>
<h5 id="note">Note: close experiment window when finished</h5>
<div id="jspsych-target"></div>
</div>
);
};
| {
const trials = [];
for (let i = 0; i <= numberOfTrial; i++) {
const invFilePath = stimuliUrls[i];
const oriFilePath = stimuliUrls[i + 1];
const twoStimulusHtml =
// For the first 200 images that are rendered, show original on left & show inverted on right
i <= numberOfTrial / 2
? "<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
'</div>'
: // For the last 200 images that are rendered, show inverted on left & show original on right
"<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
'</div>';
const newStimuli = {
stimulus: twoStimulusHtml,
data: { label: 'trial', trial_num: i },
};
trials.push(newStimuli);
}
return trials;
} | identifier_body |
JsPsych.js | /* eslint-disable */
import React, { useEffect, useState } from 'react';
import { useParams } from 'react-router-dom';
import {
observeStimuliCompletion,
uploadSelectionResult,
} from '../../../firebase/api/gcp-utils';
/**
* Component to load jsPsych's "Self Image Experiment".
*
* @component
* @return {object} (
* <React.Fragment />
* )
*/
export const JsPsych = ({ selectionTaskCompletionHandler }) => {
const { experimentId, participantId } = useParams(); // Parse URL params
const [stimuliUrls, setStimuliUrls] = useState([]);
const [ready, setReady] = useState(false);
useEffect(async () => {
// Load participantId, experimentId, and 400 stimuli urls
await observeStimuliCompletion(
participantId,
experimentId,
setStimuliUrls,
errorLoadingJsPsych,
);
}, []);
useEffect(() => {
if (stimuliUrls.length > 0) {
setReady(true);
}
}, [stimuliUrls]);
useEffect(() => {
if (ready) {
setTimeout(() => {
// Implementation of previous team timeline & trial logic -------------------------- START
/* create timeline */
const timeline = [];
/* number of trials */
// NOTE: Adjust line below to shorten the number of trials. 199 will go through all 200 iterations.
// NUMBER_OF_TRIALS = 199, means a total of 200 trials (0-indexed)
const NUMBER_OF_TRIALS = 199;
const exampleImageOne =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample1.png?alt=media&token=8a6ee16a-0700-40ef-a7cf-5c3e380b5b3f';
const exampleImageTwo =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample2.png?alt=media&token=fb0a7373-add3-4187-9449-2f206fd08ae3';
/* define instructions */
const instructions = {
type: 'html-keyboard-response',
stimulus: function () {
return (
'<h1>Instruction</h1>' +
'<p>In this experiment, two images will be shown ' +
'on the screen. Choose the image that looks more like you. </p>' +
'<p>Press the letter <strong>E</strong> on the keyboard to select the image on the left.</p>' +
'<p>Press the letter <strong>I</strong> on the keyboard to select the image on the right.</p> ' +
'<p></p>' +
"<div style='width: 900px; margin: auto;'>" +
"<div style='float: left;'><img width='300' src='" +
exampleImageOne +
"' alt='Error loading example 1'/>" +
"<p class='small'><strong>Press the E key</strong></p></div>" +
"<div class='float: right;'><img width='300' src='" +
exampleImageTwo +
"' alt='Error loading example 2'/>" +
"<p class='small'><strong>Press the I key</strong></p></div>" +
'</div>' +
'<p></p>' +
'<p><strong>Press any key to begin.</strong></p>'
);
},
};
timeline.push(instructions);
// Preload images for trials
var preload = {
type: 'preload',
images: stimuliUrls,
};
timeline.push(preload);
/* generate trials with number of trials */
function | (numberOfTrial) {
const trials = [];
for (let i = 0; i <= numberOfTrial; i++) {
const invFilePath = stimuliUrls[i];
const oriFilePath = stimuliUrls[i + 1];
const twoStimulusHtml =
// For the first 200 images that are rendered, show original on left & show inverted on right
i <= numberOfTrial / 2
? "<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
'</div>'
: // For the last 200 images that are rendered, show inverted on left & show original on right
"<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
'</div>';
const newStimuli = {
stimulus: twoStimulusHtml,
data: { label: 'trial', trial_num: i },
};
trials.push(newStimuli);
}
return trials;
}
const fixation = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:red; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 1000, // From 1500
data: { label: 'fixation' },
};
const trial = {
type: 'html-keyboard-response',
stimulus: jsPsych.timelineVariable('stimulus'),
choices: ['e', 'i'],
data: jsPsych.timelineVariable('data'),
trial_duration: 1000, // 1000,
post_trial_gap: 0,
on_finish: function (data) {
if (data.response === 'e') {
data.selection = 'left';
} else if (data.response === 'i') {
data.selection = 'right';
} else {
data.selection = 'none';
}
},
};
const postTrialPause = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:blue; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 250, // From 1500
data: { label: 'post-fixation' },
};
// Transforms the experimental data from JsPsych to follow the back end JSON scheme
function transformExperimentData() {
let trialSelections = jsPsych.data
.get()
.filter({ label: 'trial' })
.select('selection').values;
let newData = [];
let columnHeaders = {
stimulus: 'trial number',
response:
'trial response is whether or not the user chose the original image; 1 = correct, -1 = incorrect',
trait: 'untrustworthy by default',
subject:
'trial subject is the placement of original image; 1 = left, 2 = right',
};
newData.push(columnHeaders);
for (
let trialNumber = 0;
trialNumber < trialSelections.length;
trialNumber++
) {
let trialResponse;
let trialSubject;
// If the user doesn't make a selection, we are counting it as '-1'; an untrustworthy trial.
if (trialNumber <= NUMBER_OF_TRIALS / 2) {
// For the first half trials, original image on left.
trialResponse = trialSelections[trialNumber] === 'left' ? 1 : -1;
trialSubject = 1;
} else {
// For the second half trials, original image on right.
trialResponse = trialSelections[trialNumber] === 'right' ? 1 : -1;
trialSubject = 2;
}
let trialRow = {
stimulus: trialNumber + 1,
response: trialResponse,
trait: 'untrustworthy',
subject: trialSubject,
};
newData.push(trialRow);
}
return newData;
}
// Call backend api storeExperimentResult to connect with FireBase and update Users Collection with experiment data.
function saveExperimentData(experimentData) {
uploadSelectionResult(participantId, experimentId, experimentData);
selectionTaskCompletionHandler(true);
}
const trialProcedure = {
timeline: [fixation, trial, postTrialPause],
timeline_variables: generateTrials(NUMBER_OF_TRIALS),
randomize_order: false,
repetitions: 1,
};
timeline.push(trialProcedure);
const reportBlock = {
type: 'html-keyboard-response',
stimulus: function () {
const trials = jsPsych.data.get().filter({ label: 'trial' });
const trialCount = trials.count();
const leftTrials = trials.filter({ selection: 'left' }).count();
const rightTrials = trials.filter({ selection: 'right' }).count();
const responseTime = Math.round(trials.select('rt').mean());
return (
'<h1>Completed!</h1>' +
'<p>You completed ' +
trialCount +
' trials.</p>' +
'<p>You selected image on the left in ' +
leftTrials +
' trials.</p>' +
'<p>You selected image on the right in ' +
rightTrials +
' trials.</p>' +
'<p>Your average response time ' +
responseTime +
'ms.</p>' +
'<p></p>' +
'<p>Press any key to complete the experiment. Thank you!</p>'
);
},
};
timeline.push(reportBlock);
// Checks to see if we have participantId, experimentId, and stimuli URLs ready
if (ready) {
/* start the experiment */
jsPsych.init({
timeline: timeline,
display_element: 'jspsych-target',
on_finish: function () {
// Filter out data to only show 'trial' data via label
let experimentalData = transformExperimentData(
jsPsych.data.get().filter({ label: 'trial' }).json('pretty'),
);
saveExperimentData(experimentalData);
},
});
}
// Implementation of previous team timeline & trial logic ------------------------------ END
}, 1000);
}
}, [ready]);
// Error handler that prompts the participant to re-click experiment.
const errorLoadingJsPsych = (errorCode) => {
window.alert(
'Something went wrong. Please click on experiment again.' +
' Error code: ' +
errorCode,
);
};
return (
<div>
{/* Including necessary JsPsych plugin classes & button cues */}
<h4 id="title">Which one do you pick?</h4>
<div id="instruction">
<div class="key-instruction">E - select image on left</div>
<div id="between" />
<div class="key-instruction">I - select image on right</div>
</div>
<p/>
<h5 id="note">Note: close experiment window when finished</h5>
<div id="jspsych-target"></div>
</div>
);
};
| generateTrials | identifier_name |
JsPsych.js | /* eslint-disable */
import React, { useEffect, useState } from 'react';
import { useParams } from 'react-router-dom';
import {
observeStimuliCompletion,
uploadSelectionResult,
} from '../../../firebase/api/gcp-utils';
/**
* Component to load jsPsych's "Self Image Experiment".
*
* @component
* @return {object} (
* <React.Fragment />
* )
*/
export const JsPsych = ({ selectionTaskCompletionHandler }) => {
const { experimentId, participantId } = useParams(); // Parse URL params
const [stimuliUrls, setStimuliUrls] = useState([]);
const [ready, setReady] = useState(false);
useEffect(async () => {
// Load participantId, experimentId, and 400 stimuli urls
await observeStimuliCompletion(
participantId,
experimentId,
setStimuliUrls,
errorLoadingJsPsych,
);
}, []);
useEffect(() => {
if (stimuliUrls.length > 0) {
setReady(true);
}
}, [stimuliUrls]);
useEffect(() => {
if (ready) {
setTimeout(() => {
// Implementation of previous team timeline & trial logic -------------------------- START
/* create timeline */
const timeline = [];
/* number of trials */
// NOTE: Adjust line below to shorten the number of trials. 199 will go through all 200 iterations.
// NUMBER_OF_TRIALS = 199, means a total of 200 trials (0-indexed)
const NUMBER_OF_TRIALS = 199;
const exampleImageOne =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample1.png?alt=media&token=8a6ee16a-0700-40ef-a7cf-5c3e380b5b3f';
const exampleImageTwo =
'https://firebasestorage.googleapis.com/v0/b/cs6510-spr2021.appspot.com/o/example-' +
'stimuli-images%2Fexample2.png?alt=media&token=fb0a7373-add3-4187-9449-2f206fd08ae3';
/* define instructions */
const instructions = {
type: 'html-keyboard-response',
stimulus: function () {
return (
'<h1>Instruction</h1>' +
'<p>In this experiment, two images will be shown ' +
'on the screen. Choose the image that looks more like you. </p>' +
'<p>Press the letter <strong>E</strong> on the keyboard to select the image on the left.</p>' +
'<p>Press the letter <strong>I</strong> on the keyboard to select the image on the right.</p> ' +
'<p></p>' +
"<div style='width: 900px; margin: auto;'>" +
"<div style='float: left;'><img width='300' src='" +
exampleImageOne +
"' alt='Error loading example 1'/>" +
"<p class='small'><strong>Press the E key</strong></p></div>" +
"<div class='float: right;'><img width='300' src='" +
exampleImageTwo +
"' alt='Error loading example 2'/>" +
"<p class='small'><strong>Press the I key</strong></p></div>" +
'</div>' +
'<p></p>' +
'<p><strong>Press any key to begin.</strong></p>'
);
},
};
timeline.push(instructions);
// Preload images for trials
var preload = {
type: 'preload',
images: stimuliUrls,
};
timeline.push(preload);
/* generate trials with number of trials */
function generateTrials(numberOfTrial) {
const trials = [];
for (let i = 0; i <= numberOfTrial; i++) {
const invFilePath = stimuliUrls[i];
const oriFilePath = stimuliUrls[i + 1];
const twoStimulusHtml =
// For the first 200 images that are rendered, show original on left & show inverted on right
i <= numberOfTrial / 2
? "<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
'</div>'
: // For the last 200 images that are rendered, show inverted on left & show original on right
"<div style='width: 900px; margin: auto;'>" +
"<div class='float: left;'><img width='300' src='" +
invFilePath +
"'/>" +
'</div>' +
"<div style='float: left; width: 300px; height: 300px;'>" +
"<div style='font-size: 60px; width:300px height: 30px; margin-top: 135px; margin-bottom: 135px;'>+</div>" +
'</div>' +
"<div class='float: left;'><img width='300' src='" +
oriFilePath +
"'/>" +
'</div>' +
'</div>';
const newStimuli = {
stimulus: twoStimulusHtml, | data: { label: 'trial', trial_num: i },
};
trials.push(newStimuli);
}
return trials;
}
const fixation = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:red; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 1000, // From 1500
data: { label: 'fixation' },
};
const trial = {
type: 'html-keyboard-response',
stimulus: jsPsych.timelineVariable('stimulus'),
choices: ['e', 'i'],
data: jsPsych.timelineVariable('data'),
trial_duration: 1000, // 1000,
post_trial_gap: 0,
on_finish: function (data) {
if (data.response === 'e') {
data.selection = 'left';
} else if (data.response === 'i') {
data.selection = 'right';
} else {
data.selection = 'none';
}
},
};
const postTrialPause = {
type: 'html-keyboard-response',
stimulus:
'<div style="height:307px; width:900px;"><div style="width:900px; height: 30px; color:blue; margin-top:135px; font-size:60px;">+</div></div>',
choices: jsPsych.NO_KEYS,
trial_duration: 250, // From 1500
data: { label: 'post-fixation' },
};
// Transforms the experimental data from JsPsych to follow the back end JSON scheme
function transformExperimentData() {
let trialSelections = jsPsych.data
.get()
.filter({ label: 'trial' })
.select('selection').values;
let newData = [];
let columnHeaders = {
stimulus: 'trial number',
response:
'trial response is whether or not the user chose the original image; 1 = correct, -1 = incorrect',
trait: 'untrustworthy by default',
subject:
'trial subject is the placement of original image; 1 = left, 2 = right',
};
newData.push(columnHeaders);
for (
let trialNumber = 0;
trialNumber < trialSelections.length;
trialNumber++
) {
let trialResponse;
let trialSubject;
// If the user doesn't make a selection, we are counting it as '-1'; an untrustworthy trial.
if (trialNumber <= NUMBER_OF_TRIALS / 2) {
// For the first half trials, original image on left.
trialResponse = trialSelections[trialNumber] === 'left' ? 1 : -1;
trialSubject = 1;
} else {
// For the second half trials, original image on right.
trialResponse = trialSelections[trialNumber] === 'right' ? 1 : -1;
trialSubject = 2;
}
let trialRow = {
stimulus: trialNumber + 1,
response: trialResponse,
trait: 'untrustworthy',
subject: trialSubject,
};
newData.push(trialRow);
}
return newData;
}
// Call backend api storeExperimentResult to connect with FireBase and update Users Collection with experiment data.
function saveExperimentData(experimentData) {
uploadSelectionResult(participantId, experimentId, experimentData);
selectionTaskCompletionHandler(true);
}
const trialProcedure = {
timeline: [fixation, trial, postTrialPause],
timeline_variables: generateTrials(NUMBER_OF_TRIALS),
randomize_order: false,
repetitions: 1,
};
timeline.push(trialProcedure);
const reportBlock = {
type: 'html-keyboard-response',
stimulus: function () {
const trials = jsPsych.data.get().filter({ label: 'trial' });
const trialCount = trials.count();
const leftTrials = trials.filter({ selection: 'left' }).count();
const rightTrials = trials.filter({ selection: 'right' }).count();
const responseTime = Math.round(trials.select('rt').mean());
return (
'<h1>Completed!</h1>' +
'<p>You completed ' +
trialCount +
' trials.</p>' +
'<p>You selected image on the left in ' +
leftTrials +
' trials.</p>' +
'<p>You selected image on the right in ' +
rightTrials +
' trials.</p>' +
'<p>Your average response time ' +
responseTime +
'ms.</p>' +
'<p></p>' +
'<p>Press any key to complete the experiment. Thank you!</p>'
);
},
};
timeline.push(reportBlock);
// Checks to see if we have participantId, experimentId, and stimuli URLs ready
if (ready) {
/* start the experiment */
jsPsych.init({
timeline: timeline,
display_element: 'jspsych-target',
on_finish: function () {
// Filter out data to only show 'trial' data via label
let experimentalData = transformExperimentData(
jsPsych.data.get().filter({ label: 'trial' }).json('pretty'),
);
saveExperimentData(experimentalData);
},
});
}
// Implementation of previous team timeline & trial logic ------------------------------ END
}, 1000);
}
}, [ready]);
// Error handler that prompts the participant to re-click experiment.
const errorLoadingJsPsych = (errorCode) => {
window.alert(
'Something went wrong. Please click on experiment again.' +
' Error code: ' +
errorCode,
);
};
return (
<div>
{/* Including necessary JsPsych plugin classes & button cues */}
<h4 id="title">Which one do you pick?</h4>
<div id="instruction">
<div class="key-instruction">E - select image on left</div>
<div id="between" />
<div class="key-instruction">I - select image on right</div>
</div>
<p/>
<h5 id="note">Note: close experiment window when finished</h5>
<div id="jspsych-target"></div>
</div>
);
}; | random_line_split | |
merge_zips.go | // Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"errors"
"flag"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strings"
"android/soong/response"
"github.com/google/blueprint/pathtools"
"android/soong/jar"
"android/soong/third_party/zip"
)
// Input zip: we can open it, close it, and obtain an array of entries
type InputZip interface {
Name() string
Open() error
Close() error
Entries() []*zip.File
IsOpen() bool
}
// An entry that can be written to the output zip
type ZipEntryContents interface {
String() string
IsDir() bool
CRC32() uint32
Size() uint64
WriteToZip(dest string, zw *zip.Writer) error
}
// a ZipEntryFromZip is a ZipEntryContents that pulls its content from another zip
// identified by the input zip and the index of the entry in its entries array
type ZipEntryFromZip struct {
inputZip InputZip
index int
name string
isDir bool
crc32 uint32
size uint64
}
func NewZipEntryFromZip(inputZip InputZip, entryIndex int) *ZipEntryFromZip {
fi := inputZip.Entries()[entryIndex]
newEntry := ZipEntryFromZip{inputZip: inputZip,
index: entryIndex,
name: fi.Name,
isDir: fi.FileInfo().IsDir(),
crc32: fi.CRC32,
size: fi.UncompressedSize64,
}
return &newEntry
}
func (ze ZipEntryFromZip) String() string {
return fmt.Sprintf("%s!%s", ze.inputZip.Name(), ze.name)
}
func (ze ZipEntryFromZip) IsDir() bool {
return ze.isDir
}
func (ze ZipEntryFromZip) CRC32() uint32 {
return ze.crc32
}
func (ze ZipEntryFromZip) Size() uint64 {
return ze.size
}
func (ze ZipEntryFromZip) WriteToZip(dest string, zw *zip.Writer) error {
if err := ze.inputZip.Open(); err != nil {
return err
}
return zw.CopyFrom(ze.inputZip.Entries()[ze.index], dest)
}
// a ZipEntryFromBuffer is a ZipEntryContents that pulls its content from a []byte
type ZipEntryFromBuffer struct {
fh *zip.FileHeader
content []byte
}
func (be ZipEntryFromBuffer) String() string {
return "internal buffer"
}
func (be ZipEntryFromBuffer) IsDir() bool {
return be.fh.FileInfo().IsDir()
}
func (be ZipEntryFromBuffer) CRC32() uint32 {
return crc32.ChecksumIEEE(be.content)
}
func (be ZipEntryFromBuffer) Size() uint64 {
return uint64(len(be.content))
}
func (be ZipEntryFromBuffer) WriteToZip(dest string, zw *zip.Writer) error {
w, err := zw.CreateHeader(be.fh)
if err != nil {
return err
}
if !be.IsDir() {
_, err = w.Write(be.content)
if err != nil {
return err
}
}
return nil
}
// Processing state.
type OutputZip struct {
outputWriter *zip.Writer
stripDirEntries bool
emulateJar bool
sortEntries bool
ignoreDuplicates bool
excludeDirs []string
excludeFiles []string
sourceByDest map[string]ZipEntryContents
}
func NewOutputZip(outputWriter *zip.Writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates bool) *OutputZip {
return &OutputZip{
outputWriter: outputWriter,
stripDirEntries: stripDirEntries,
emulateJar: emulateJar,
sortEntries: sortEntries,
sourceByDest: make(map[string]ZipEntryContents, 0),
ignoreDuplicates: ignoreDuplicates,
}
}
func (oz *OutputZip) setExcludeDirs(excludeDirs []string) {
oz.excludeDirs = make([]string, len(excludeDirs))
for i, dir := range excludeDirs {
oz.excludeDirs[i] = filepath.Clean(dir)
}
}
func (oz *OutputZip) setExcludeFiles(excludeFiles []string) {
oz.excludeFiles = excludeFiles
}
// Adds an entry with given name whose source is given ZipEntryContents. Returns old ZipEntryContents
// if entry with given name already exists.
func (oz *OutputZip) addZipEntry(name string, source ZipEntryContents) (ZipEntryContents, error) {
if existingSource, exists := oz.sourceByDest[name]; exists {
return existingSource, nil
}
oz.sourceByDest[name] = source
// Delay writing an entry if entries need to be rearranged.
if oz.emulateJar || oz.sortEntries {
return nil, nil
}
return nil, source.WriteToZip(name, oz.outputWriter)
}
// Adds an entry for the manifest (META-INF/MANIFEST.MF from the given file
func (oz *OutputZip) addManifest(manifestPath string) error {
if !oz.stripDirEntries {
if _, err := oz.addZipEntry(jar.MetaDir, ZipEntryFromBuffer{jar.MetaDirFileHeader(), nil}); err != nil {
return err
}
}
contents, err := ioutil.ReadFile(manifestPath)
if err == nil {
fh, buf, err := jar.ManifestFileContents(contents)
if err == nil {
_, err = oz.addZipEntry(jar.ManifestFile, ZipEntryFromBuffer{fh, buf})
}
}
return err
}
// Adds an entry with given name and contents read from given file
func (oz *OutputZip) addZipEntryFromFile(name string, path string) error {
buf, err := ioutil.ReadFile(path)
if err == nil {
fh := &zip.FileHeader{
Name: name,
Method: zip.Store,
UncompressedSize64: uint64(len(buf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err = oz.addZipEntry(name, ZipEntryFromBuffer{fh, buf})
}
return err
}
func (oz *OutputZip) addEmptyEntry(entry string) error {
var emptyBuf []byte
fh := &zip.FileHeader{
Name: entry,
Method: zip.Store,
UncompressedSize64: uint64(len(emptyBuf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err := oz.addZipEntry(entry, ZipEntryFromBuffer{fh, emptyBuf})
return err
}
// Returns true if given entry is to be excluded
func (oz *OutputZip) isEntryExcluded(name string) bool {
for _, dir := range oz.excludeDirs {
dir = filepath.Clean(dir)
patterns := []string{
dir + "/", // the directory itself
dir + "/**/*", // files recursively in the directory
dir + "/**/*/", // directories recursively in the directory
}
for _, pattern := range patterns {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
if oz.emulateJar {
// When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
// requested.
// TODO(ccross): which files does this affect?
if name != jar.MetaDir && name != jar.ManifestFile {
return true
}
}
return true
}
}
}
for _, pattern := range oz.excludeFiles {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
return true
}
}
return false
}
// Creates a zip entry whose contents is an entry from the given input zip.
func (oz *OutputZip) copyEntry(inputZip InputZip, index int) error {
entry := NewZipEntryFromZip(inputZip, index)
if oz.stripDirEntries && entry.IsDir() {
return nil
}
existingEntry, err := oz.addZipEntry(entry.name, entry)
if err != nil {
return err
}
if existingEntry == nil {
return nil
}
// File types should match
if existingEntry.IsDir() != entry.IsDir() {
return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
entry.name, existingEntry, entry)
}
if oz.ignoreDuplicates ||
// Skip manifest and module info files that are not from the first input file
(oz.emulateJar && entry.name == jar.ManifestFile || entry.name == jar.ModuleInfoClass) ||
// Identical entries
(existingEntry.CRC32() == entry.CRC32() && existingEntry.Size() == entry.Size()) ||
// Directory entries
entry.IsDir() {
return nil
}
return fmt.Errorf("Duplicate path %v found in %v and %v\n", entry.name, existingEntry, inputZip.Name())
}
func (oz *OutputZip) entriesArray() []string {
entries := make([]string, len(oz.sourceByDest))
i := 0
for entry := range oz.sourceByDest {
entries[i] = entry
i++
}
return entries
}
func (oz *OutputZip) jarSorted() []string {
entries := oz.entriesArray()
sort.SliceStable(entries, func(i, j int) bool { return jar.EntryNamesLess(entries[i], entries[j]) })
return entries
}
func (oz *OutputZip) alphanumericSorted() []string {
entries := oz.entriesArray()
sort.Strings(entries)
return entries
}
func (oz *OutputZip) writeEntries(entries []string) error {
for _, entry := range entries {
source, _ := oz.sourceByDest[entry]
if err := source.WriteToZip(entry, oz.outputWriter); err != nil {
return err
}
}
return nil
}
func (oz *OutputZip) getUninitializedPythonPackages(inputZips []InputZip) ([]string, error) {
// the runfiles packages needs to be populated with "__init__.py".
// the runfiles dirs have been treated as packages.
allPackages := make(map[string]bool)
initedPackages := make(map[string]bool)
getPackage := func(path string) string {
ret := filepath.Dir(path)
// filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
if ret == "." || ret == "/" {
return ""
}
return ret
}
// put existing __init__.py files to a set first. This set is used for preventing
// generated __init__.py files from overwriting existing ones.
for _, inputZip := range inputZips |
noInitPackages := make([]string, 0)
for pyPkg := range allPackages {
if _, found := initedPackages[pyPkg]; !found {
noInitPackages = append(noInitPackages, pyPkg)
}
}
return noInitPackages, nil
}
// An InputZip owned by the InputZipsManager. Opened ManagedInputZip's are chained in the open order.
type ManagedInputZip struct {
owner *InputZipsManager
realInputZip InputZip
older *ManagedInputZip
newer *ManagedInputZip
}
// Maintains the array of ManagedInputZips, keeping track of open input ones. When an InputZip is opened,
// may close some other InputZip to limit the number of open ones.
type InputZipsManager struct {
inputZips []*ManagedInputZip
nOpenZips int
maxOpenZips int
openInputZips *ManagedInputZip
}
func (miz *ManagedInputZip) unlink() {
olderMiz := miz.older
newerMiz := miz.newer
if newerMiz.older != miz || olderMiz.newer != miz {
panic(fmt.Errorf("removing %p:%#v: broken list between %p:%#v and %p:%#v",
miz, miz, newerMiz, newerMiz, olderMiz, olderMiz))
}
olderMiz.newer = newerMiz
newerMiz.older = olderMiz
miz.newer = nil
miz.older = nil
}
func (miz *ManagedInputZip) link(olderMiz *ManagedInputZip) {
if olderMiz.newer != nil || olderMiz.older != nil {
panic(fmt.Errorf("inputZip is already open"))
}
oldOlderMiz := miz.older
if oldOlderMiz.newer != miz {
panic(fmt.Errorf("broken list between %p:%#v and %p:%#v", miz, miz, oldOlderMiz, oldOlderMiz))
}
miz.older = olderMiz
olderMiz.older = oldOlderMiz
oldOlderMiz.newer = olderMiz
olderMiz.newer = miz
}
func NewInputZipsManager(nInputZips, maxOpenZips int) *InputZipsManager {
if maxOpenZips < 3 {
panic(fmt.Errorf("open zips limit should be above 3"))
}
// In the fake element .older points to the most recently opened InputZip, and .newer points to the oldest.
head := new(ManagedInputZip)
head.older = head
head.newer = head
return &InputZipsManager{
inputZips: make([]*ManagedInputZip, 0, nInputZips),
maxOpenZips: maxOpenZips,
openInputZips: head,
}
}
// InputZip factory
func (izm *InputZipsManager) Manage(inz InputZip) InputZip {
iz := &ManagedInputZip{owner: izm, realInputZip: inz}
izm.inputZips = append(izm.inputZips, iz)
return iz
}
// Opens or reopens ManagedInputZip.
func (izm *InputZipsManager) reopen(miz *ManagedInputZip) error {
if miz.realInputZip.IsOpen() {
if miz != izm.openInputZips {
miz.unlink()
izm.openInputZips.link(miz)
}
return nil
}
if izm.nOpenZips >= izm.maxOpenZips {
if err := izm.close(izm.openInputZips.older); err != nil {
return err
}
}
if err := miz.realInputZip.Open(); err != nil {
return err
}
izm.openInputZips.link(miz)
izm.nOpenZips++
return nil
}
func (izm *InputZipsManager) close(miz *ManagedInputZip) error {
if miz.IsOpen() {
err := miz.realInputZip.Close()
izm.nOpenZips--
miz.unlink()
return err
}
return nil
}
// Checks that openInputZips deque is valid
func (izm *InputZipsManager) checkOpenZipsDeque() {
nReallyOpen := 0
el := izm.openInputZips
for {
elNext := el.older
if elNext.newer != el {
panic(fmt.Errorf("Element:\n %p: %v\nNext:\n %p %v", el, el, elNext, elNext))
}
if elNext == izm.openInputZips {
break
}
el = elNext
if !el.IsOpen() {
panic(fmt.Errorf("Found unopened element"))
}
nReallyOpen++
if nReallyOpen > izm.nOpenZips {
panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
}
}
if nReallyOpen > izm.nOpenZips {
panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
}
}
func (miz *ManagedInputZip) Name() string {
return miz.realInputZip.Name()
}
func (miz *ManagedInputZip) Open() error {
return miz.owner.reopen(miz)
}
func (miz *ManagedInputZip) Close() error {
return miz.owner.close(miz)
}
func (miz *ManagedInputZip) IsOpen() bool {
return miz.realInputZip.IsOpen()
}
func (miz *ManagedInputZip) Entries() []*zip.File {
if !miz.IsOpen() {
panic(fmt.Errorf("%s: is not open", miz.Name()))
}
return miz.realInputZip.Entries()
}
// Actual processing.
func mergeZips(inputZips []InputZip, writer *zip.Writer, manifest, pyMain string,
sortEntries, emulateJar, emulatePar, stripDirEntries, ignoreDuplicates bool,
excludeFiles, excludeDirs []string, zipsToNotStrip map[string]bool) error {
out := NewOutputZip(writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates)
out.setExcludeFiles(excludeFiles)
out.setExcludeDirs(excludeDirs)
if manifest != "" {
if err := out.addManifest(manifest); err != nil {
return err
}
}
if pyMain != "" {
if err := out.addZipEntryFromFile("__main__.py", pyMain); err != nil {
return err
}
}
if emulatePar {
noInitPackages, err := out.getUninitializedPythonPackages(inputZips)
if err != nil {
return err
}
for _, uninitializedPyPackage := range noInitPackages {
if err = out.addEmptyEntry(filepath.Join(uninitializedPyPackage, "__init__.py")); err != nil {
return err
}
}
}
// Finally, add entries from all the input zips.
for _, inputZip := range inputZips {
_, copyFully := zipsToNotStrip[inputZip.Name()]
if err := inputZip.Open(); err != nil {
return err
}
for i, entry := range inputZip.Entries() {
if copyFully || !out.isEntryExcluded(entry.Name) {
if err := out.copyEntry(inputZip, i); err != nil {
return err
}
}
}
// Unless we need to rearrange the entries, the input zip can now be closed.
if !(emulateJar || sortEntries) {
if err := inputZip.Close(); err != nil {
return err
}
}
}
if emulateJar {
return out.writeEntries(out.jarSorted())
} else if sortEntries {
return out.writeEntries(out.alphanumericSorted())
}
return nil
}
// Process command line
type fileList []string
func (f *fileList) String() string {
return `""`
}
func (f *fileList) Set(name string) error {
*f = append(*f, filepath.Clean(name))
return nil
}
type zipsToNotStripSet map[string]bool
func (s zipsToNotStripSet) String() string {
return `""`
}
func (s zipsToNotStripSet) Set(path string) error {
s[path] = true
return nil
}
var (
sortEntries = flag.Bool("s", false, "sort entries (defaults to the order from the input zip files)")
emulateJar = flag.Bool("j", false, "sort zip entries using jar ordering (META-INF first)")
emulatePar = flag.Bool("p", false, "merge zip entries based on par format")
excludeDirs fileList
excludeFiles fileList
zipsToNotStrip = make(zipsToNotStripSet)
stripDirEntries = flag.Bool("D", false, "strip directory entries from the output zip file")
manifest = flag.String("m", "", "manifest file to insert in jar")
pyMain = flag.String("pm", "", "__main__.py file to insert in par")
prefix = flag.String("prefix", "", "A file to prefix to the zip file")
ignoreDuplicates = flag.Bool("ignore-duplicates", false, "take each entry from the first zip it exists in and don't warn")
)
func init() {
flag.Var(&excludeDirs, "stripDir", "directories to be excluded from the output zip, accepts wildcards")
flag.Var(&excludeFiles, "stripFile", "files to be excluded from the output zip, accepts wildcards")
flag.Var(&zipsToNotStrip, "zipToNotStrip", "the input zip file which is not applicable for stripping")
}
type FileInputZip struct {
name string
reader *zip.ReadCloser
}
func (fiz *FileInputZip) Name() string {
return fiz.name
}
func (fiz *FileInputZip) Close() error {
if fiz.IsOpen() {
reader := fiz.reader
fiz.reader = nil
return reader.Close()
}
return nil
}
func (fiz *FileInputZip) Entries() []*zip.File {
if !fiz.IsOpen() {
panic(fmt.Errorf("%s: is not open", fiz.Name()))
}
return fiz.reader.File
}
func (fiz *FileInputZip) IsOpen() bool {
return fiz.reader != nil
}
func (fiz *FileInputZip) Open() error {
if fiz.IsOpen() {
return nil
}
var err error
if fiz.reader, err = zip.OpenReader(fiz.Name()); err != nil {
return fmt.Errorf("%s: %s", fiz.Name(), err.Error())
}
return nil
}
func main() {
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "usage: merge_zips [-jpsD] [-m manifest] [--prefix script] [-pm __main__.py] OutputZip [inputs...]")
flag.PrintDefaults()
}
// parse args
flag.Parse()
args := flag.Args()
if len(args) < 1 {
flag.Usage()
os.Exit(1)
}
outputPath := args[0]
inputs := make([]string, 0)
for _, input := range args[1:] {
if input[0] == '@' {
f, err := os.Open(strings.TrimPrefix(input[1:], "@"))
if err != nil {
log.Fatal(err)
}
rspInputs, err := response.ReadRspFile(f)
f.Close()
if err != nil {
log.Fatal(err)
}
inputs = append(inputs, rspInputs...)
} else {
inputs = append(inputs, input)
}
}
log.SetFlags(log.Lshortfile)
// make writer
outputZip, err := os.Create(outputPath)
if err != nil {
log.Fatal(err)
}
defer outputZip.Close()
var offset int64
if *prefix != "" {
prefixFile, err := os.Open(*prefix)
if err != nil {
log.Fatal(err)
}
offset, err = io.Copy(outputZip, prefixFile)
if err != nil {
log.Fatal(err)
}
}
writer := zip.NewWriter(outputZip)
defer func() {
err := writer.Close()
if err != nil {
log.Fatal(err)
}
}()
writer.SetOffset(offset)
if *manifest != "" && !*emulateJar {
log.Fatal(errors.New("must specify -j when specifying a manifest via -m"))
}
if *pyMain != "" && !*emulatePar {
log.Fatal(errors.New("must specify -p when specifying a Python __main__.py via -pm"))
}
// do merge
inputZipsManager := NewInputZipsManager(len(inputs), 1000)
inputZips := make([]InputZip, len(inputs))
for i, input := range inputs {
inputZips[i] = inputZipsManager.Manage(&FileInputZip{name: input})
}
err = mergeZips(inputZips, writer, *manifest, *pyMain, *sortEntries, *emulateJar, *emulatePar,
*stripDirEntries, *ignoreDuplicates, []string(excludeFiles), []string(excludeDirs),
map[string]bool(zipsToNotStrip))
if err != nil {
log.Fatal(err)
}
}
| {
if err := inputZip.Open(); err != nil {
return nil, err
}
for _, file := range inputZip.Entries() {
pyPkg := getPackage(file.Name)
if filepath.Base(file.Name) == "__init__.py" {
if _, found := initedPackages[pyPkg]; found {
panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q", file.Name))
}
initedPackages[pyPkg] = true
}
for pyPkg != "" {
if _, found := allPackages[pyPkg]; found {
break
}
allPackages[pyPkg] = true
pyPkg = getPackage(pyPkg)
}
}
} | conditional_block |
merge_zips.go | // Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"errors"
"flag"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strings"
"android/soong/response"
"github.com/google/blueprint/pathtools"
"android/soong/jar"
"android/soong/third_party/zip"
)
// Input zip: we can open it, close it, and obtain an array of entries
type InputZip interface {
Name() string
Open() error
Close() error
Entries() []*zip.File
IsOpen() bool
}
// An entry that can be written to the output zip
type ZipEntryContents interface {
String() string
IsDir() bool
CRC32() uint32
Size() uint64
WriteToZip(dest string, zw *zip.Writer) error
}
// a ZipEntryFromZip is a ZipEntryContents that pulls its content from another zip
// identified by the input zip and the index of the entry in its entries array
type ZipEntryFromZip struct {
inputZip InputZip
index int
name string
isDir bool
crc32 uint32
size uint64
}
func NewZipEntryFromZip(inputZip InputZip, entryIndex int) *ZipEntryFromZip {
fi := inputZip.Entries()[entryIndex]
newEntry := ZipEntryFromZip{inputZip: inputZip,
index: entryIndex,
name: fi.Name,
isDir: fi.FileInfo().IsDir(),
crc32: fi.CRC32,
size: fi.UncompressedSize64,
}
return &newEntry
}
func (ze ZipEntryFromZip) String() string {
return fmt.Sprintf("%s!%s", ze.inputZip.Name(), ze.name)
}
func (ze ZipEntryFromZip) IsDir() bool {
return ze.isDir
}
func (ze ZipEntryFromZip) CRC32() uint32 {
return ze.crc32
}
func (ze ZipEntryFromZip) Size() uint64 {
return ze.size
}
func (ze ZipEntryFromZip) WriteToZip(dest string, zw *zip.Writer) error {
if err := ze.inputZip.Open(); err != nil {
return err
}
return zw.CopyFrom(ze.inputZip.Entries()[ze.index], dest)
}
// a ZipEntryFromBuffer is a ZipEntryContents that pulls its content from a []byte
type ZipEntryFromBuffer struct {
fh *zip.FileHeader
content []byte
}
func (be ZipEntryFromBuffer) String() string {
return "internal buffer"
}
func (be ZipEntryFromBuffer) IsDir() bool {
return be.fh.FileInfo().IsDir()
}
func (be ZipEntryFromBuffer) CRC32() uint32 {
return crc32.ChecksumIEEE(be.content)
}
func (be ZipEntryFromBuffer) Size() uint64 {
return uint64(len(be.content))
}
func (be ZipEntryFromBuffer) WriteToZip(dest string, zw *zip.Writer) error {
w, err := zw.CreateHeader(be.fh)
if err != nil {
return err
}
if !be.IsDir() {
_, err = w.Write(be.content)
if err != nil {
return err
}
}
return nil
}
// Processing state.
type OutputZip struct {
outputWriter *zip.Writer
stripDirEntries bool
emulateJar bool
sortEntries bool
ignoreDuplicates bool
excludeDirs []string
excludeFiles []string
sourceByDest map[string]ZipEntryContents
}
func NewOutputZip(outputWriter *zip.Writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates bool) *OutputZip {
return &OutputZip{
outputWriter: outputWriter,
stripDirEntries: stripDirEntries,
emulateJar: emulateJar,
sortEntries: sortEntries,
sourceByDest: make(map[string]ZipEntryContents, 0),
ignoreDuplicates: ignoreDuplicates,
}
}
func (oz *OutputZip) setExcludeDirs(excludeDirs []string) {
oz.excludeDirs = make([]string, len(excludeDirs))
for i, dir := range excludeDirs {
oz.excludeDirs[i] = filepath.Clean(dir)
}
}
func (oz *OutputZip) setExcludeFiles(excludeFiles []string) {
oz.excludeFiles = excludeFiles
}
// Adds an entry with given name whose source is given ZipEntryContents. Returns old ZipEntryContents
// if entry with given name already exists.
func (oz *OutputZip) addZipEntry(name string, source ZipEntryContents) (ZipEntryContents, error) {
if existingSource, exists := oz.sourceByDest[name]; exists {
return existingSource, nil
}
oz.sourceByDest[name] = source
// Delay writing an entry if entries need to be rearranged.
if oz.emulateJar || oz.sortEntries {
return nil, nil
}
return nil, source.WriteToZip(name, oz.outputWriter)
}
// Adds an entry for the manifest (META-INF/MANIFEST.MF from the given file
func (oz *OutputZip) addManifest(manifestPath string) error {
if !oz.stripDirEntries {
if _, err := oz.addZipEntry(jar.MetaDir, ZipEntryFromBuffer{jar.MetaDirFileHeader(), nil}); err != nil {
return err
}
}
contents, err := ioutil.ReadFile(manifestPath)
if err == nil {
fh, buf, err := jar.ManifestFileContents(contents)
if err == nil {
_, err = oz.addZipEntry(jar.ManifestFile, ZipEntryFromBuffer{fh, buf})
}
}
return err
}
// Adds an entry with given name and contents read from given file
func (oz *OutputZip) addZipEntryFromFile(name string, path string) error {
buf, err := ioutil.ReadFile(path)
if err == nil {
fh := &zip.FileHeader{
Name: name,
Method: zip.Store,
UncompressedSize64: uint64(len(buf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err = oz.addZipEntry(name, ZipEntryFromBuffer{fh, buf})
}
return err
}
func (oz *OutputZip) addEmptyEntry(entry string) error {
var emptyBuf []byte
fh := &zip.FileHeader{
Name: entry,
Method: zip.Store,
UncompressedSize64: uint64(len(emptyBuf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err := oz.addZipEntry(entry, ZipEntryFromBuffer{fh, emptyBuf})
return err
}
// Returns true if given entry is to be excluded
func (oz *OutputZip) isEntryExcluded(name string) bool {
for _, dir := range oz.excludeDirs {
dir = filepath.Clean(dir)
patterns := []string{
dir + "/", // the directory itself
dir + "/**/*", // files recursively in the directory
dir + "/**/*/", // directories recursively in the directory
}
for _, pattern := range patterns {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
if oz.emulateJar {
// When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
// requested.
// TODO(ccross): which files does this affect?
if name != jar.MetaDir && name != jar.ManifestFile {
return true
}
}
return true
}
}
}
for _, pattern := range oz.excludeFiles {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
return true
}
}
return false
}
// Creates a zip entry whose contents is an entry from the given input zip.
func (oz *OutputZip) | (inputZip InputZip, index int) error {
entry := NewZipEntryFromZip(inputZip, index)
if oz.stripDirEntries && entry.IsDir() {
return nil
}
existingEntry, err := oz.addZipEntry(entry.name, entry)
if err != nil {
return err
}
if existingEntry == nil {
return nil
}
// File types should match
if existingEntry.IsDir() != entry.IsDir() {
return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
entry.name, existingEntry, entry)
}
if oz.ignoreDuplicates ||
// Skip manifest and module info files that are not from the first input file
(oz.emulateJar && entry.name == jar.ManifestFile || entry.name == jar.ModuleInfoClass) ||
// Identical entries
(existingEntry.CRC32() == entry.CRC32() && existingEntry.Size() == entry.Size()) ||
// Directory entries
entry.IsDir() {
return nil
}
return fmt.Errorf("Duplicate path %v found in %v and %v\n", entry.name, existingEntry, inputZip.Name())
}
func (oz *OutputZip) entriesArray() []string {
entries := make([]string, len(oz.sourceByDest))
i := 0
for entry := range oz.sourceByDest {
entries[i] = entry
i++
}
return entries
}
func (oz *OutputZip) jarSorted() []string {
entries := oz.entriesArray()
sort.SliceStable(entries, func(i, j int) bool { return jar.EntryNamesLess(entries[i], entries[j]) })
return entries
}
func (oz *OutputZip) alphanumericSorted() []string {
entries := oz.entriesArray()
sort.Strings(entries)
return entries
}
func (oz *OutputZip) writeEntries(entries []string) error {
for _, entry := range entries {
source, _ := oz.sourceByDest[entry]
if err := source.WriteToZip(entry, oz.outputWriter); err != nil {
return err
}
}
return nil
}
func (oz *OutputZip) getUninitializedPythonPackages(inputZips []InputZip) ([]string, error) {
// the runfiles packages needs to be populated with "__init__.py".
// the runfiles dirs have been treated as packages.
allPackages := make(map[string]bool)
initedPackages := make(map[string]bool)
getPackage := func(path string) string {
ret := filepath.Dir(path)
// filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
if ret == "." || ret == "/" {
return ""
}
return ret
}
// put existing __init__.py files to a set first. This set is used for preventing
// generated __init__.py files from overwriting existing ones.
for _, inputZip := range inputZips {
if err := inputZip.Open(); err != nil {
return nil, err
}
for _, file := range inputZip.Entries() {
pyPkg := getPackage(file.Name)
if filepath.Base(file.Name) == "__init__.py" {
if _, found := initedPackages[pyPkg]; found {
panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q", file.Name))
}
initedPackages[pyPkg] = true
}
for pyPkg != "" {
if _, found := allPackages[pyPkg]; found {
break
}
allPackages[pyPkg] = true
pyPkg = getPackage(pyPkg)
}
}
}
noInitPackages := make([]string, 0)
for pyPkg := range allPackages {
if _, found := initedPackages[pyPkg]; !found {
noInitPackages = append(noInitPackages, pyPkg)
}
}
return noInitPackages, nil
}
// An InputZip owned by the InputZipsManager. Opened ManagedInputZip's are chained in the open order.
type ManagedInputZip struct {
owner *InputZipsManager
realInputZip InputZip
older *ManagedInputZip
newer *ManagedInputZip
}
// Maintains the array of ManagedInputZips, keeping track of open input ones. When an InputZip is opened,
// may close some other InputZip to limit the number of open ones.
type InputZipsManager struct {
inputZips []*ManagedInputZip
nOpenZips int
maxOpenZips int
openInputZips *ManagedInputZip
}
func (miz *ManagedInputZip) unlink() {
olderMiz := miz.older
newerMiz := miz.newer
if newerMiz.older != miz || olderMiz.newer != miz {
panic(fmt.Errorf("removing %p:%#v: broken list between %p:%#v and %p:%#v",
miz, miz, newerMiz, newerMiz, olderMiz, olderMiz))
}
olderMiz.newer = newerMiz
newerMiz.older = olderMiz
miz.newer = nil
miz.older = nil
}
func (miz *ManagedInputZip) link(olderMiz *ManagedInputZip) {
if olderMiz.newer != nil || olderMiz.older != nil {
panic(fmt.Errorf("inputZip is already open"))
}
oldOlderMiz := miz.older
if oldOlderMiz.newer != miz {
panic(fmt.Errorf("broken list between %p:%#v and %p:%#v", miz, miz, oldOlderMiz, oldOlderMiz))
}
miz.older = olderMiz
olderMiz.older = oldOlderMiz
oldOlderMiz.newer = olderMiz
olderMiz.newer = miz
}
func NewInputZipsManager(nInputZips, maxOpenZips int) *InputZipsManager {
if maxOpenZips < 3 {
panic(fmt.Errorf("open zips limit should be above 3"))
}
// In the fake element .older points to the most recently opened InputZip, and .newer points to the oldest.
head := new(ManagedInputZip)
head.older = head
head.newer = head
return &InputZipsManager{
inputZips: make([]*ManagedInputZip, 0, nInputZips),
maxOpenZips: maxOpenZips,
openInputZips: head,
}
}
// InputZip factory
func (izm *InputZipsManager) Manage(inz InputZip) InputZip {
iz := &ManagedInputZip{owner: izm, realInputZip: inz}
izm.inputZips = append(izm.inputZips, iz)
return iz
}
// Opens or reopens ManagedInputZip.
func (izm *InputZipsManager) reopen(miz *ManagedInputZip) error {
if miz.realInputZip.IsOpen() {
if miz != izm.openInputZips {
miz.unlink()
izm.openInputZips.link(miz)
}
return nil
}
if izm.nOpenZips >= izm.maxOpenZips {
if err := izm.close(izm.openInputZips.older); err != nil {
return err
}
}
if err := miz.realInputZip.Open(); err != nil {
return err
}
izm.openInputZips.link(miz)
izm.nOpenZips++
return nil
}
func (izm *InputZipsManager) close(miz *ManagedInputZip) error {
if miz.IsOpen() {
err := miz.realInputZip.Close()
izm.nOpenZips--
miz.unlink()
return err
}
return nil
}
// Checks that openInputZips deque is valid
func (izm *InputZipsManager) checkOpenZipsDeque() {
nReallyOpen := 0
el := izm.openInputZips
for {
elNext := el.older
if elNext.newer != el {
panic(fmt.Errorf("Element:\n %p: %v\nNext:\n %p %v", el, el, elNext, elNext))
}
if elNext == izm.openInputZips {
break
}
el = elNext
if !el.IsOpen() {
panic(fmt.Errorf("Found unopened element"))
}
nReallyOpen++
if nReallyOpen > izm.nOpenZips {
panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
}
}
if nReallyOpen > izm.nOpenZips {
panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
}
}
func (miz *ManagedInputZip) Name() string {
return miz.realInputZip.Name()
}
func (miz *ManagedInputZip) Open() error {
return miz.owner.reopen(miz)
}
func (miz *ManagedInputZip) Close() error {
return miz.owner.close(miz)
}
func (miz *ManagedInputZip) IsOpen() bool {
return miz.realInputZip.IsOpen()
}
func (miz *ManagedInputZip) Entries() []*zip.File {
if !miz.IsOpen() {
panic(fmt.Errorf("%s: is not open", miz.Name()))
}
return miz.realInputZip.Entries()
}
// Actual processing.
func mergeZips(inputZips []InputZip, writer *zip.Writer, manifest, pyMain string,
sortEntries, emulateJar, emulatePar, stripDirEntries, ignoreDuplicates bool,
excludeFiles, excludeDirs []string, zipsToNotStrip map[string]bool) error {
out := NewOutputZip(writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates)
out.setExcludeFiles(excludeFiles)
out.setExcludeDirs(excludeDirs)
if manifest != "" {
if err := out.addManifest(manifest); err != nil {
return err
}
}
if pyMain != "" {
if err := out.addZipEntryFromFile("__main__.py", pyMain); err != nil {
return err
}
}
if emulatePar {
noInitPackages, err := out.getUninitializedPythonPackages(inputZips)
if err != nil {
return err
}
for _, uninitializedPyPackage := range noInitPackages {
if err = out.addEmptyEntry(filepath.Join(uninitializedPyPackage, "__init__.py")); err != nil {
return err
}
}
}
// Finally, add entries from all the input zips.
for _, inputZip := range inputZips {
_, copyFully := zipsToNotStrip[inputZip.Name()]
if err := inputZip.Open(); err != nil {
return err
}
for i, entry := range inputZip.Entries() {
if copyFully || !out.isEntryExcluded(entry.Name) {
if err := out.copyEntry(inputZip, i); err != nil {
return err
}
}
}
// Unless we need to rearrange the entries, the input zip can now be closed.
if !(emulateJar || sortEntries) {
if err := inputZip.Close(); err != nil {
return err
}
}
}
if emulateJar {
return out.writeEntries(out.jarSorted())
} else if sortEntries {
return out.writeEntries(out.alphanumericSorted())
}
return nil
}
// Process command line
type fileList []string
func (f *fileList) String() string {
return `""`
}
func (f *fileList) Set(name string) error {
*f = append(*f, filepath.Clean(name))
return nil
}
type zipsToNotStripSet map[string]bool
func (s zipsToNotStripSet) String() string {
return `""`
}
func (s zipsToNotStripSet) Set(path string) error {
s[path] = true
return nil
}
var (
sortEntries = flag.Bool("s", false, "sort entries (defaults to the order from the input zip files)")
emulateJar = flag.Bool("j", false, "sort zip entries using jar ordering (META-INF first)")
emulatePar = flag.Bool("p", false, "merge zip entries based on par format")
excludeDirs fileList
excludeFiles fileList
zipsToNotStrip = make(zipsToNotStripSet)
stripDirEntries = flag.Bool("D", false, "strip directory entries from the output zip file")
manifest = flag.String("m", "", "manifest file to insert in jar")
pyMain = flag.String("pm", "", "__main__.py file to insert in par")
prefix = flag.String("prefix", "", "A file to prefix to the zip file")
ignoreDuplicates = flag.Bool("ignore-duplicates", false, "take each entry from the first zip it exists in and don't warn")
)
func init() {
flag.Var(&excludeDirs, "stripDir", "directories to be excluded from the output zip, accepts wildcards")
flag.Var(&excludeFiles, "stripFile", "files to be excluded from the output zip, accepts wildcards")
flag.Var(&zipsToNotStrip, "zipToNotStrip", "the input zip file which is not applicable for stripping")
}
type FileInputZip struct {
name string
reader *zip.ReadCloser
}
func (fiz *FileInputZip) Name() string {
return fiz.name
}
func (fiz *FileInputZip) Close() error {
if fiz.IsOpen() {
reader := fiz.reader
fiz.reader = nil
return reader.Close()
}
return nil
}
func (fiz *FileInputZip) Entries() []*zip.File {
if !fiz.IsOpen() {
panic(fmt.Errorf("%s: is not open", fiz.Name()))
}
return fiz.reader.File
}
func (fiz *FileInputZip) IsOpen() bool {
return fiz.reader != nil
}
func (fiz *FileInputZip) Open() error {
if fiz.IsOpen() {
return nil
}
var err error
if fiz.reader, err = zip.OpenReader(fiz.Name()); err != nil {
return fmt.Errorf("%s: %s", fiz.Name(), err.Error())
}
return nil
}
func main() {
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "usage: merge_zips [-jpsD] [-m manifest] [--prefix script] [-pm __main__.py] OutputZip [inputs...]")
flag.PrintDefaults()
}
// parse args
flag.Parse()
args := flag.Args()
if len(args) < 1 {
flag.Usage()
os.Exit(1)
}
outputPath := args[0]
inputs := make([]string, 0)
for _, input := range args[1:] {
if input[0] == '@' {
f, err := os.Open(strings.TrimPrefix(input[1:], "@"))
if err != nil {
log.Fatal(err)
}
rspInputs, err := response.ReadRspFile(f)
f.Close()
if err != nil {
log.Fatal(err)
}
inputs = append(inputs, rspInputs...)
} else {
inputs = append(inputs, input)
}
}
log.SetFlags(log.Lshortfile)
// make writer
outputZip, err := os.Create(outputPath)
if err != nil {
log.Fatal(err)
}
defer outputZip.Close()
var offset int64
if *prefix != "" {
prefixFile, err := os.Open(*prefix)
if err != nil {
log.Fatal(err)
}
offset, err = io.Copy(outputZip, prefixFile)
if err != nil {
log.Fatal(err)
}
}
writer := zip.NewWriter(outputZip)
defer func() {
err := writer.Close()
if err != nil {
log.Fatal(err)
}
}()
writer.SetOffset(offset)
if *manifest != "" && !*emulateJar {
log.Fatal(errors.New("must specify -j when specifying a manifest via -m"))
}
if *pyMain != "" && !*emulatePar {
log.Fatal(errors.New("must specify -p when specifying a Python __main__.py via -pm"))
}
// do merge
inputZipsManager := NewInputZipsManager(len(inputs), 1000)
inputZips := make([]InputZip, len(inputs))
for i, input := range inputs {
inputZips[i] = inputZipsManager.Manage(&FileInputZip{name: input})
}
err = mergeZips(inputZips, writer, *manifest, *pyMain, *sortEntries, *emulateJar, *emulatePar,
*stripDirEntries, *ignoreDuplicates, []string(excludeFiles), []string(excludeDirs),
map[string]bool(zipsToNotStrip))
if err != nil {
log.Fatal(err)
}
}
| copyEntry | identifier_name |
merge_zips.go | // Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"errors"
"flag"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strings"
"android/soong/response"
"github.com/google/blueprint/pathtools"
"android/soong/jar"
"android/soong/third_party/zip"
)
// Input zip: we can open it, close it, and obtain an array of entries
type InputZip interface {
Name() string
Open() error
Close() error
Entries() []*zip.File
IsOpen() bool
}
// An entry that can be written to the output zip
type ZipEntryContents interface {
String() string
IsDir() bool
CRC32() uint32
Size() uint64
WriteToZip(dest string, zw *zip.Writer) error
}
// a ZipEntryFromZip is a ZipEntryContents that pulls its content from another zip
// identified by the input zip and the index of the entry in its entries array
type ZipEntryFromZip struct {
inputZip InputZip
index int
name string
isDir bool
crc32 uint32
size uint64
}
func NewZipEntryFromZip(inputZip InputZip, entryIndex int) *ZipEntryFromZip {
fi := inputZip.Entries()[entryIndex]
newEntry := ZipEntryFromZip{inputZip: inputZip,
index: entryIndex,
name: fi.Name,
isDir: fi.FileInfo().IsDir(),
crc32: fi.CRC32,
size: fi.UncompressedSize64,
}
return &newEntry
}
func (ze ZipEntryFromZip) String() string {
return fmt.Sprintf("%s!%s", ze.inputZip.Name(), ze.name)
}
func (ze ZipEntryFromZip) IsDir() bool {
return ze.isDir
}
func (ze ZipEntryFromZip) CRC32() uint32 {
return ze.crc32
}
func (ze ZipEntryFromZip) Size() uint64 {
return ze.size
}
func (ze ZipEntryFromZip) WriteToZip(dest string, zw *zip.Writer) error {
if err := ze.inputZip.Open(); err != nil {
return err
}
return zw.CopyFrom(ze.inputZip.Entries()[ze.index], dest)
}
// a ZipEntryFromBuffer is a ZipEntryContents that pulls its content from a []byte
type ZipEntryFromBuffer struct {
fh *zip.FileHeader
content []byte
}
func (be ZipEntryFromBuffer) String() string {
return "internal buffer"
}
func (be ZipEntryFromBuffer) IsDir() bool {
return be.fh.FileInfo().IsDir()
}
func (be ZipEntryFromBuffer) CRC32() uint32 {
return crc32.ChecksumIEEE(be.content)
}
func (be ZipEntryFromBuffer) Size() uint64 {
return uint64(len(be.content))
}
func (be ZipEntryFromBuffer) WriteToZip(dest string, zw *zip.Writer) error {
w, err := zw.CreateHeader(be.fh)
if err != nil {
return err
}
if !be.IsDir() {
_, err = w.Write(be.content)
if err != nil {
return err
}
}
return nil
}
// Processing state.
type OutputZip struct {
outputWriter *zip.Writer
stripDirEntries bool
emulateJar bool
sortEntries bool
ignoreDuplicates bool
excludeDirs []string
excludeFiles []string
sourceByDest map[string]ZipEntryContents
}
func NewOutputZip(outputWriter *zip.Writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates bool) *OutputZip {
return &OutputZip{
outputWriter: outputWriter,
stripDirEntries: stripDirEntries,
emulateJar: emulateJar,
sortEntries: sortEntries,
sourceByDest: make(map[string]ZipEntryContents, 0),
ignoreDuplicates: ignoreDuplicates,
}
}
func (oz *OutputZip) setExcludeDirs(excludeDirs []string) {
oz.excludeDirs = make([]string, len(excludeDirs))
for i, dir := range excludeDirs {
oz.excludeDirs[i] = filepath.Clean(dir)
}
}
func (oz *OutputZip) setExcludeFiles(excludeFiles []string) {
oz.excludeFiles = excludeFiles
}
// Adds an entry with given name whose source is given ZipEntryContents. Returns old ZipEntryContents
// if entry with given name already exists.
func (oz *OutputZip) addZipEntry(name string, source ZipEntryContents) (ZipEntryContents, error) {
if existingSource, exists := oz.sourceByDest[name]; exists {
return existingSource, nil
}
oz.sourceByDest[name] = source
// Delay writing an entry if entries need to be rearranged.
if oz.emulateJar || oz.sortEntries {
return nil, nil
}
return nil, source.WriteToZip(name, oz.outputWriter)
}
// Adds an entry for the manifest (META-INF/MANIFEST.MF from the given file
func (oz *OutputZip) addManifest(manifestPath string) error {
if !oz.stripDirEntries {
if _, err := oz.addZipEntry(jar.MetaDir, ZipEntryFromBuffer{jar.MetaDirFileHeader(), nil}); err != nil {
return err
}
}
contents, err := ioutil.ReadFile(manifestPath)
if err == nil {
fh, buf, err := jar.ManifestFileContents(contents)
if err == nil {
_, err = oz.addZipEntry(jar.ManifestFile, ZipEntryFromBuffer{fh, buf})
}
}
return err
}
// Adds an entry with given name and contents read from given file
func (oz *OutputZip) addZipEntryFromFile(name string, path string) error {
buf, err := ioutil.ReadFile(path)
if err == nil {
fh := &zip.FileHeader{
Name: name,
Method: zip.Store,
UncompressedSize64: uint64(len(buf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err = oz.addZipEntry(name, ZipEntryFromBuffer{fh, buf})
}
return err
}
func (oz *OutputZip) addEmptyEntry(entry string) error {
var emptyBuf []byte
fh := &zip.FileHeader{
Name: entry,
Method: zip.Store,
UncompressedSize64: uint64(len(emptyBuf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err := oz.addZipEntry(entry, ZipEntryFromBuffer{fh, emptyBuf})
return err
}
// Returns true if given entry is to be excluded
func (oz *OutputZip) isEntryExcluded(name string) bool {
for _, dir := range oz.excludeDirs {
dir = filepath.Clean(dir)
patterns := []string{
dir + "/", // the directory itself
dir + "/**/*", // files recursively in the directory
dir + "/**/*/", // directories recursively in the directory
}
for _, pattern := range patterns {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
if oz.emulateJar {
// When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
// requested.
// TODO(ccross): which files does this affect?
if name != jar.MetaDir && name != jar.ManifestFile {
return true
}
}
return true
}
}
}
for _, pattern := range oz.excludeFiles {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern)) | }
return false
}
// Creates a zip entry whose contents is an entry from the given input zip.
func (oz *OutputZip) copyEntry(inputZip InputZip, index int) error {
entry := NewZipEntryFromZip(inputZip, index)
if oz.stripDirEntries && entry.IsDir() {
return nil
}
existingEntry, err := oz.addZipEntry(entry.name, entry)
if err != nil {
return err
}
if existingEntry == nil {
return nil
}
// File types should match
if existingEntry.IsDir() != entry.IsDir() {
return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
entry.name, existingEntry, entry)
}
if oz.ignoreDuplicates ||
// Skip manifest and module info files that are not from the first input file
(oz.emulateJar && entry.name == jar.ManifestFile || entry.name == jar.ModuleInfoClass) ||
// Identical entries
(existingEntry.CRC32() == entry.CRC32() && existingEntry.Size() == entry.Size()) ||
// Directory entries
entry.IsDir() {
return nil
}
return fmt.Errorf("Duplicate path %v found in %v and %v\n", entry.name, existingEntry, inputZip.Name())
}
func (oz *OutputZip) entriesArray() []string {
entries := make([]string, len(oz.sourceByDest))
i := 0
for entry := range oz.sourceByDest {
entries[i] = entry
i++
}
return entries
}
func (oz *OutputZip) jarSorted() []string {
entries := oz.entriesArray()
sort.SliceStable(entries, func(i, j int) bool { return jar.EntryNamesLess(entries[i], entries[j]) })
return entries
}
func (oz *OutputZip) alphanumericSorted() []string {
entries := oz.entriesArray()
sort.Strings(entries)
return entries
}
func (oz *OutputZip) writeEntries(entries []string) error {
for _, entry := range entries {
source, _ := oz.sourceByDest[entry]
if err := source.WriteToZip(entry, oz.outputWriter); err != nil {
return err
}
}
return nil
}
func (oz *OutputZip) getUninitializedPythonPackages(inputZips []InputZip) ([]string, error) {
// the runfiles packages needs to be populated with "__init__.py".
// the runfiles dirs have been treated as packages.
allPackages := make(map[string]bool)
initedPackages := make(map[string]bool)
getPackage := func(path string) string {
ret := filepath.Dir(path)
// filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
if ret == "." || ret == "/" {
return ""
}
return ret
}
// put existing __init__.py files to a set first. This set is used for preventing
// generated __init__.py files from overwriting existing ones.
for _, inputZip := range inputZips {
if err := inputZip.Open(); err != nil {
return nil, err
}
for _, file := range inputZip.Entries() {
pyPkg := getPackage(file.Name)
if filepath.Base(file.Name) == "__init__.py" {
if _, found := initedPackages[pyPkg]; found {
panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q", file.Name))
}
initedPackages[pyPkg] = true
}
for pyPkg != "" {
if _, found := allPackages[pyPkg]; found {
break
}
allPackages[pyPkg] = true
pyPkg = getPackage(pyPkg)
}
}
}
noInitPackages := make([]string, 0)
for pyPkg := range allPackages {
if _, found := initedPackages[pyPkg]; !found {
noInitPackages = append(noInitPackages, pyPkg)
}
}
return noInitPackages, nil
}
// An InputZip owned by the InputZipsManager. Opened ManagedInputZip's are chained in the open order.
type ManagedInputZip struct {
owner *InputZipsManager
realInputZip InputZip
older *ManagedInputZip
newer *ManagedInputZip
}
// Maintains the array of ManagedInputZips, keeping track of open input ones. When an InputZip is opened,
// may close some other InputZip to limit the number of open ones.
type InputZipsManager struct {
inputZips []*ManagedInputZip
nOpenZips int
maxOpenZips int
openInputZips *ManagedInputZip
}
func (miz *ManagedInputZip) unlink() {
olderMiz := miz.older
newerMiz := miz.newer
if newerMiz.older != miz || olderMiz.newer != miz {
panic(fmt.Errorf("removing %p:%#v: broken list between %p:%#v and %p:%#v",
miz, miz, newerMiz, newerMiz, olderMiz, olderMiz))
}
olderMiz.newer = newerMiz
newerMiz.older = olderMiz
miz.newer = nil
miz.older = nil
}
func (miz *ManagedInputZip) link(olderMiz *ManagedInputZip) {
if olderMiz.newer != nil || olderMiz.older != nil {
panic(fmt.Errorf("inputZip is already open"))
}
oldOlderMiz := miz.older
if oldOlderMiz.newer != miz {
panic(fmt.Errorf("broken list between %p:%#v and %p:%#v", miz, miz, oldOlderMiz, oldOlderMiz))
}
miz.older = olderMiz
olderMiz.older = oldOlderMiz
oldOlderMiz.newer = olderMiz
olderMiz.newer = miz
}
func NewInputZipsManager(nInputZips, maxOpenZips int) *InputZipsManager {
if maxOpenZips < 3 {
panic(fmt.Errorf("open zips limit should be above 3"))
}
// In the fake element .older points to the most recently opened InputZip, and .newer points to the oldest.
head := new(ManagedInputZip)
head.older = head
head.newer = head
return &InputZipsManager{
inputZips: make([]*ManagedInputZip, 0, nInputZips),
maxOpenZips: maxOpenZips,
openInputZips: head,
}
}
// InputZip factory
func (izm *InputZipsManager) Manage(inz InputZip) InputZip {
iz := &ManagedInputZip{owner: izm, realInputZip: inz}
izm.inputZips = append(izm.inputZips, iz)
return iz
}
// Opens or reopens ManagedInputZip.
func (izm *InputZipsManager) reopen(miz *ManagedInputZip) error {
if miz.realInputZip.IsOpen() {
if miz != izm.openInputZips {
miz.unlink()
izm.openInputZips.link(miz)
}
return nil
}
if izm.nOpenZips >= izm.maxOpenZips {
if err := izm.close(izm.openInputZips.older); err != nil {
return err
}
}
if err := miz.realInputZip.Open(); err != nil {
return err
}
izm.openInputZips.link(miz)
izm.nOpenZips++
return nil
}
func (izm *InputZipsManager) close(miz *ManagedInputZip) error {
if miz.IsOpen() {
err := miz.realInputZip.Close()
izm.nOpenZips--
miz.unlink()
return err
}
return nil
}
// Checks that openInputZips deque is valid
func (izm *InputZipsManager) checkOpenZipsDeque() {
nReallyOpen := 0
el := izm.openInputZips
for {
elNext := el.older
if elNext.newer != el {
panic(fmt.Errorf("Element:\n %p: %v\nNext:\n %p %v", el, el, elNext, elNext))
}
if elNext == izm.openInputZips {
break
}
el = elNext
if !el.IsOpen() {
panic(fmt.Errorf("Found unopened element"))
}
nReallyOpen++
if nReallyOpen > izm.nOpenZips {
panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
}
}
if nReallyOpen > izm.nOpenZips {
panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
}
}
func (miz *ManagedInputZip) Name() string {
return miz.realInputZip.Name()
}
func (miz *ManagedInputZip) Open() error {
return miz.owner.reopen(miz)
}
func (miz *ManagedInputZip) Close() error {
return miz.owner.close(miz)
}
func (miz *ManagedInputZip) IsOpen() bool {
return miz.realInputZip.IsOpen()
}
func (miz *ManagedInputZip) Entries() []*zip.File {
if !miz.IsOpen() {
panic(fmt.Errorf("%s: is not open", miz.Name()))
}
return miz.realInputZip.Entries()
}
// Actual processing.
func mergeZips(inputZips []InputZip, writer *zip.Writer, manifest, pyMain string,
sortEntries, emulateJar, emulatePar, stripDirEntries, ignoreDuplicates bool,
excludeFiles, excludeDirs []string, zipsToNotStrip map[string]bool) error {
out := NewOutputZip(writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates)
out.setExcludeFiles(excludeFiles)
out.setExcludeDirs(excludeDirs)
if manifest != "" {
if err := out.addManifest(manifest); err != nil {
return err
}
}
if pyMain != "" {
if err := out.addZipEntryFromFile("__main__.py", pyMain); err != nil {
return err
}
}
if emulatePar {
noInitPackages, err := out.getUninitializedPythonPackages(inputZips)
if err != nil {
return err
}
for _, uninitializedPyPackage := range noInitPackages {
if err = out.addEmptyEntry(filepath.Join(uninitializedPyPackage, "__init__.py")); err != nil {
return err
}
}
}
// Finally, add entries from all the input zips.
for _, inputZip := range inputZips {
_, copyFully := zipsToNotStrip[inputZip.Name()]
if err := inputZip.Open(); err != nil {
return err
}
for i, entry := range inputZip.Entries() {
if copyFully || !out.isEntryExcluded(entry.Name) {
if err := out.copyEntry(inputZip, i); err != nil {
return err
}
}
}
// Unless we need to rearrange the entries, the input zip can now be closed.
if !(emulateJar || sortEntries) {
if err := inputZip.Close(); err != nil {
return err
}
}
}
if emulateJar {
return out.writeEntries(out.jarSorted())
} else if sortEntries {
return out.writeEntries(out.alphanumericSorted())
}
return nil
}
// Process command line
type fileList []string
func (f *fileList) String() string {
return `""`
}
func (f *fileList) Set(name string) error {
*f = append(*f, filepath.Clean(name))
return nil
}
type zipsToNotStripSet map[string]bool
func (s zipsToNotStripSet) String() string {
return `""`
}
func (s zipsToNotStripSet) Set(path string) error {
s[path] = true
return nil
}
var (
sortEntries = flag.Bool("s", false, "sort entries (defaults to the order from the input zip files)")
emulateJar = flag.Bool("j", false, "sort zip entries using jar ordering (META-INF first)")
emulatePar = flag.Bool("p", false, "merge zip entries based on par format")
excludeDirs fileList
excludeFiles fileList
zipsToNotStrip = make(zipsToNotStripSet)
stripDirEntries = flag.Bool("D", false, "strip directory entries from the output zip file")
manifest = flag.String("m", "", "manifest file to insert in jar")
pyMain = flag.String("pm", "", "__main__.py file to insert in par")
prefix = flag.String("prefix", "", "A file to prefix to the zip file")
ignoreDuplicates = flag.Bool("ignore-duplicates", false, "take each entry from the first zip it exists in and don't warn")
)
func init() {
flag.Var(&excludeDirs, "stripDir", "directories to be excluded from the output zip, accepts wildcards")
flag.Var(&excludeFiles, "stripFile", "files to be excluded from the output zip, accepts wildcards")
flag.Var(&zipsToNotStrip, "zipToNotStrip", "the input zip file which is not applicable for stripping")
}
type FileInputZip struct {
name string
reader *zip.ReadCloser
}
func (fiz *FileInputZip) Name() string {
return fiz.name
}
func (fiz *FileInputZip) Close() error {
if fiz.IsOpen() {
reader := fiz.reader
fiz.reader = nil
return reader.Close()
}
return nil
}
func (fiz *FileInputZip) Entries() []*zip.File {
if !fiz.IsOpen() {
panic(fmt.Errorf("%s: is not open", fiz.Name()))
}
return fiz.reader.File
}
func (fiz *FileInputZip) IsOpen() bool {
return fiz.reader != nil
}
func (fiz *FileInputZip) Open() error {
if fiz.IsOpen() {
return nil
}
var err error
if fiz.reader, err = zip.OpenReader(fiz.Name()); err != nil {
return fmt.Errorf("%s: %s", fiz.Name(), err.Error())
}
return nil
}
func main() {
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "usage: merge_zips [-jpsD] [-m manifest] [--prefix script] [-pm __main__.py] OutputZip [inputs...]")
flag.PrintDefaults()
}
// parse args
flag.Parse()
args := flag.Args()
if len(args) < 1 {
flag.Usage()
os.Exit(1)
}
outputPath := args[0]
inputs := make([]string, 0)
for _, input := range args[1:] {
if input[0] == '@' {
f, err := os.Open(strings.TrimPrefix(input[1:], "@"))
if err != nil {
log.Fatal(err)
}
rspInputs, err := response.ReadRspFile(f)
f.Close()
if err != nil {
log.Fatal(err)
}
inputs = append(inputs, rspInputs...)
} else {
inputs = append(inputs, input)
}
}
log.SetFlags(log.Lshortfile)
// make writer
outputZip, err := os.Create(outputPath)
if err != nil {
log.Fatal(err)
}
defer outputZip.Close()
var offset int64
if *prefix != "" {
prefixFile, err := os.Open(*prefix)
if err != nil {
log.Fatal(err)
}
offset, err = io.Copy(outputZip, prefixFile)
if err != nil {
log.Fatal(err)
}
}
writer := zip.NewWriter(outputZip)
defer func() {
err := writer.Close()
if err != nil {
log.Fatal(err)
}
}()
writer.SetOffset(offset)
if *manifest != "" && !*emulateJar {
log.Fatal(errors.New("must specify -j when specifying a manifest via -m"))
}
if *pyMain != "" && !*emulatePar {
log.Fatal(errors.New("must specify -p when specifying a Python __main__.py via -pm"))
}
// do merge
inputZipsManager := NewInputZipsManager(len(inputs), 1000)
inputZips := make([]InputZip, len(inputs))
for i, input := range inputs {
inputZips[i] = inputZipsManager.Manage(&FileInputZip{name: input})
}
err = mergeZips(inputZips, writer, *manifest, *pyMain, *sortEntries, *emulateJar, *emulatePar,
*stripDirEntries, *ignoreDuplicates, []string(excludeFiles), []string(excludeDirs),
map[string]bool(zipsToNotStrip))
if err != nil {
log.Fatal(err)
}
} | }
if match {
return true
} | random_line_split |
merge_zips.go | // Copyright 2017 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"errors"
"flag"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strings"
"android/soong/response"
"github.com/google/blueprint/pathtools"
"android/soong/jar"
"android/soong/third_party/zip"
)
// Input zip: we can open it, close it, and obtain an array of entries
type InputZip interface {
Name() string
Open() error
Close() error
Entries() []*zip.File
IsOpen() bool
}
// An entry that can be written to the output zip
type ZipEntryContents interface {
String() string
IsDir() bool
CRC32() uint32
Size() uint64
WriteToZip(dest string, zw *zip.Writer) error
}
// a ZipEntryFromZip is a ZipEntryContents that pulls its content from another zip
// identified by the input zip and the index of the entry in its entries array
type ZipEntryFromZip struct {
inputZip InputZip
index int
name string
isDir bool
crc32 uint32
size uint64
}
func NewZipEntryFromZip(inputZip InputZip, entryIndex int) *ZipEntryFromZip {
fi := inputZip.Entries()[entryIndex]
newEntry := ZipEntryFromZip{inputZip: inputZip,
index: entryIndex,
name: fi.Name,
isDir: fi.FileInfo().IsDir(),
crc32: fi.CRC32,
size: fi.UncompressedSize64,
}
return &newEntry
}
func (ze ZipEntryFromZip) String() string {
return fmt.Sprintf("%s!%s", ze.inputZip.Name(), ze.name)
}
func (ze ZipEntryFromZip) IsDir() bool {
return ze.isDir
}
func (ze ZipEntryFromZip) CRC32() uint32 {
return ze.crc32
}
func (ze ZipEntryFromZip) Size() uint64 {
return ze.size
}
func (ze ZipEntryFromZip) WriteToZip(dest string, zw *zip.Writer) error {
if err := ze.inputZip.Open(); err != nil {
return err
}
return zw.CopyFrom(ze.inputZip.Entries()[ze.index], dest)
}
// a ZipEntryFromBuffer is a ZipEntryContents that pulls its content from a []byte
type ZipEntryFromBuffer struct {
fh *zip.FileHeader
content []byte
}
func (be ZipEntryFromBuffer) String() string {
return "internal buffer"
}
func (be ZipEntryFromBuffer) IsDir() bool {
return be.fh.FileInfo().IsDir()
}
func (be ZipEntryFromBuffer) CRC32() uint32 {
return crc32.ChecksumIEEE(be.content)
}
func (be ZipEntryFromBuffer) Size() uint64 {
return uint64(len(be.content))
}
func (be ZipEntryFromBuffer) WriteToZip(dest string, zw *zip.Writer) error {
w, err := zw.CreateHeader(be.fh)
if err != nil {
return err
}
if !be.IsDir() {
_, err = w.Write(be.content)
if err != nil {
return err
}
}
return nil
}
// Processing state.
type OutputZip struct {
outputWriter *zip.Writer
stripDirEntries bool
emulateJar bool
sortEntries bool
ignoreDuplicates bool
excludeDirs []string
excludeFiles []string
sourceByDest map[string]ZipEntryContents
}
func NewOutputZip(outputWriter *zip.Writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates bool) *OutputZip {
return &OutputZip{
outputWriter: outputWriter,
stripDirEntries: stripDirEntries,
emulateJar: emulateJar,
sortEntries: sortEntries,
sourceByDest: make(map[string]ZipEntryContents, 0),
ignoreDuplicates: ignoreDuplicates,
}
}
func (oz *OutputZip) setExcludeDirs(excludeDirs []string) {
oz.excludeDirs = make([]string, len(excludeDirs))
for i, dir := range excludeDirs {
oz.excludeDirs[i] = filepath.Clean(dir)
}
}
func (oz *OutputZip) setExcludeFiles(excludeFiles []string) {
oz.excludeFiles = excludeFiles
}
// Adds an entry with given name whose source is given ZipEntryContents. Returns old ZipEntryContents
// if entry with given name already exists.
func (oz *OutputZip) addZipEntry(name string, source ZipEntryContents) (ZipEntryContents, error) {
if existingSource, exists := oz.sourceByDest[name]; exists {
return existingSource, nil
}
oz.sourceByDest[name] = source
// Delay writing an entry if entries need to be rearranged.
if oz.emulateJar || oz.sortEntries {
return nil, nil
}
return nil, source.WriteToZip(name, oz.outputWriter)
}
// Adds an entry for the manifest (META-INF/MANIFEST.MF from the given file
func (oz *OutputZip) addManifest(manifestPath string) error {
if !oz.stripDirEntries {
if _, err := oz.addZipEntry(jar.MetaDir, ZipEntryFromBuffer{jar.MetaDirFileHeader(), nil}); err != nil {
return err
}
}
contents, err := ioutil.ReadFile(manifestPath)
if err == nil {
fh, buf, err := jar.ManifestFileContents(contents)
if err == nil {
_, err = oz.addZipEntry(jar.ManifestFile, ZipEntryFromBuffer{fh, buf})
}
}
return err
}
// Adds an entry with given name and contents read from given file
func (oz *OutputZip) addZipEntryFromFile(name string, path string) error {
buf, err := ioutil.ReadFile(path)
if err == nil {
fh := &zip.FileHeader{
Name: name,
Method: zip.Store,
UncompressedSize64: uint64(len(buf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err = oz.addZipEntry(name, ZipEntryFromBuffer{fh, buf})
}
return err
}
func (oz *OutputZip) addEmptyEntry(entry string) error {
var emptyBuf []byte
fh := &zip.FileHeader{
Name: entry,
Method: zip.Store,
UncompressedSize64: uint64(len(emptyBuf)),
}
fh.SetMode(0700)
fh.SetModTime(jar.DefaultTime)
_, err := oz.addZipEntry(entry, ZipEntryFromBuffer{fh, emptyBuf})
return err
}
// Returns true if given entry is to be excluded
func (oz *OutputZip) isEntryExcluded(name string) bool {
for _, dir := range oz.excludeDirs {
dir = filepath.Clean(dir)
patterns := []string{
dir + "/", // the directory itself
dir + "/**/*", // files recursively in the directory
dir + "/**/*/", // directories recursively in the directory
}
for _, pattern := range patterns {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
if oz.emulateJar {
// When merging jar files, don't strip META-INF/MANIFEST.MF even if stripping META-INF is
// requested.
// TODO(ccross): which files does this affect?
if name != jar.MetaDir && name != jar.ManifestFile {
return true
}
}
return true
}
}
}
for _, pattern := range oz.excludeFiles {
match, err := pathtools.Match(pattern, name)
if err != nil {
panic(fmt.Errorf("%s: %s", err.Error(), pattern))
}
if match {
return true
}
}
return false
}
// Creates a zip entry whose contents is an entry from the given input zip.
func (oz *OutputZip) copyEntry(inputZip InputZip, index int) error {
entry := NewZipEntryFromZip(inputZip, index)
if oz.stripDirEntries && entry.IsDir() {
return nil
}
existingEntry, err := oz.addZipEntry(entry.name, entry)
if err != nil {
return err
}
if existingEntry == nil {
return nil
}
// File types should match
if existingEntry.IsDir() != entry.IsDir() {
return fmt.Errorf("Directory/file mismatch at %v from %v and %v\n",
entry.name, existingEntry, entry)
}
if oz.ignoreDuplicates ||
// Skip manifest and module info files that are not from the first input file
(oz.emulateJar && entry.name == jar.ManifestFile || entry.name == jar.ModuleInfoClass) ||
// Identical entries
(existingEntry.CRC32() == entry.CRC32() && existingEntry.Size() == entry.Size()) ||
// Directory entries
entry.IsDir() {
return nil
}
return fmt.Errorf("Duplicate path %v found in %v and %v\n", entry.name, existingEntry, inputZip.Name())
}
func (oz *OutputZip) entriesArray() []string {
entries := make([]string, len(oz.sourceByDest))
i := 0
for entry := range oz.sourceByDest {
entries[i] = entry
i++
}
return entries
}
func (oz *OutputZip) jarSorted() []string |
func (oz *OutputZip) alphanumericSorted() []string {
entries := oz.entriesArray()
sort.Strings(entries)
return entries
}
func (oz *OutputZip) writeEntries(entries []string) error {
for _, entry := range entries {
source, _ := oz.sourceByDest[entry]
if err := source.WriteToZip(entry, oz.outputWriter); err != nil {
return err
}
}
return nil
}
func (oz *OutputZip) getUninitializedPythonPackages(inputZips []InputZip) ([]string, error) {
// the runfiles packages needs to be populated with "__init__.py".
// the runfiles dirs have been treated as packages.
allPackages := make(map[string]bool)
initedPackages := make(map[string]bool)
getPackage := func(path string) string {
ret := filepath.Dir(path)
// filepath.Dir("abc") -> "." and filepath.Dir("/abc") -> "/".
if ret == "." || ret == "/" {
return ""
}
return ret
}
// put existing __init__.py files to a set first. This set is used for preventing
// generated __init__.py files from overwriting existing ones.
for _, inputZip := range inputZips {
if err := inputZip.Open(); err != nil {
return nil, err
}
for _, file := range inputZip.Entries() {
pyPkg := getPackage(file.Name)
if filepath.Base(file.Name) == "__init__.py" {
if _, found := initedPackages[pyPkg]; found {
panic(fmt.Errorf("found __init__.py path duplicates during pars merging: %q", file.Name))
}
initedPackages[pyPkg] = true
}
for pyPkg != "" {
if _, found := allPackages[pyPkg]; found {
break
}
allPackages[pyPkg] = true
pyPkg = getPackage(pyPkg)
}
}
}
noInitPackages := make([]string, 0)
for pyPkg := range allPackages {
if _, found := initedPackages[pyPkg]; !found {
noInitPackages = append(noInitPackages, pyPkg)
}
}
return noInitPackages, nil
}
// An InputZip owned by the InputZipsManager. Opened ManagedInputZip's are chained in the open order.
type ManagedInputZip struct {
owner *InputZipsManager
realInputZip InputZip
older *ManagedInputZip
newer *ManagedInputZip
}
// Maintains the array of ManagedInputZips, keeping track of open input ones. When an InputZip is opened,
// may close some other InputZip to limit the number of open ones.
type InputZipsManager struct {
inputZips []*ManagedInputZip
nOpenZips int
maxOpenZips int
openInputZips *ManagedInputZip
}
func (miz *ManagedInputZip) unlink() {
olderMiz := miz.older
newerMiz := miz.newer
if newerMiz.older != miz || olderMiz.newer != miz {
panic(fmt.Errorf("removing %p:%#v: broken list between %p:%#v and %p:%#v",
miz, miz, newerMiz, newerMiz, olderMiz, olderMiz))
}
olderMiz.newer = newerMiz
newerMiz.older = olderMiz
miz.newer = nil
miz.older = nil
}
func (miz *ManagedInputZip) link(olderMiz *ManagedInputZip) {
if olderMiz.newer != nil || olderMiz.older != nil {
panic(fmt.Errorf("inputZip is already open"))
}
oldOlderMiz := miz.older
if oldOlderMiz.newer != miz {
panic(fmt.Errorf("broken list between %p:%#v and %p:%#v", miz, miz, oldOlderMiz, oldOlderMiz))
}
miz.older = olderMiz
olderMiz.older = oldOlderMiz
oldOlderMiz.newer = olderMiz
olderMiz.newer = miz
}
func NewInputZipsManager(nInputZips, maxOpenZips int) *InputZipsManager {
if maxOpenZips < 3 {
panic(fmt.Errorf("open zips limit should be above 3"))
}
// In the fake element .older points to the most recently opened InputZip, and .newer points to the oldest.
head := new(ManagedInputZip)
head.older = head
head.newer = head
return &InputZipsManager{
inputZips: make([]*ManagedInputZip, 0, nInputZips),
maxOpenZips: maxOpenZips,
openInputZips: head,
}
}
// InputZip factory
func (izm *InputZipsManager) Manage(inz InputZip) InputZip {
iz := &ManagedInputZip{owner: izm, realInputZip: inz}
izm.inputZips = append(izm.inputZips, iz)
return iz
}
// Opens or reopens ManagedInputZip.
func (izm *InputZipsManager) reopen(miz *ManagedInputZip) error {
if miz.realInputZip.IsOpen() {
if miz != izm.openInputZips {
miz.unlink()
izm.openInputZips.link(miz)
}
return nil
}
if izm.nOpenZips >= izm.maxOpenZips {
if err := izm.close(izm.openInputZips.older); err != nil {
return err
}
}
if err := miz.realInputZip.Open(); err != nil {
return err
}
izm.openInputZips.link(miz)
izm.nOpenZips++
return nil
}
func (izm *InputZipsManager) close(miz *ManagedInputZip) error {
if miz.IsOpen() {
err := miz.realInputZip.Close()
izm.nOpenZips--
miz.unlink()
return err
}
return nil
}
// Checks that openInputZips deque is valid
func (izm *InputZipsManager) checkOpenZipsDeque() {
nReallyOpen := 0
el := izm.openInputZips
for {
elNext := el.older
if elNext.newer != el {
panic(fmt.Errorf("Element:\n %p: %v\nNext:\n %p %v", el, el, elNext, elNext))
}
if elNext == izm.openInputZips {
break
}
el = elNext
if !el.IsOpen() {
panic(fmt.Errorf("Found unopened element"))
}
nReallyOpen++
if nReallyOpen > izm.nOpenZips {
panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
}
}
if nReallyOpen > izm.nOpenZips {
panic(fmt.Errorf("found %d open zips, should be %d", nReallyOpen, izm.nOpenZips))
}
}
func (miz *ManagedInputZip) Name() string {
return miz.realInputZip.Name()
}
func (miz *ManagedInputZip) Open() error {
return miz.owner.reopen(miz)
}
func (miz *ManagedInputZip) Close() error {
return miz.owner.close(miz)
}
func (miz *ManagedInputZip) IsOpen() bool {
return miz.realInputZip.IsOpen()
}
func (miz *ManagedInputZip) Entries() []*zip.File {
if !miz.IsOpen() {
panic(fmt.Errorf("%s: is not open", miz.Name()))
}
return miz.realInputZip.Entries()
}
// Actual processing.
func mergeZips(inputZips []InputZip, writer *zip.Writer, manifest, pyMain string,
sortEntries, emulateJar, emulatePar, stripDirEntries, ignoreDuplicates bool,
excludeFiles, excludeDirs []string, zipsToNotStrip map[string]bool) error {
out := NewOutputZip(writer, sortEntries, emulateJar, stripDirEntries, ignoreDuplicates)
out.setExcludeFiles(excludeFiles)
out.setExcludeDirs(excludeDirs)
if manifest != "" {
if err := out.addManifest(manifest); err != nil {
return err
}
}
if pyMain != "" {
if err := out.addZipEntryFromFile("__main__.py", pyMain); err != nil {
return err
}
}
if emulatePar {
noInitPackages, err := out.getUninitializedPythonPackages(inputZips)
if err != nil {
return err
}
for _, uninitializedPyPackage := range noInitPackages {
if err = out.addEmptyEntry(filepath.Join(uninitializedPyPackage, "__init__.py")); err != nil {
return err
}
}
}
// Finally, add entries from all the input zips.
for _, inputZip := range inputZips {
_, copyFully := zipsToNotStrip[inputZip.Name()]
if err := inputZip.Open(); err != nil {
return err
}
for i, entry := range inputZip.Entries() {
if copyFully || !out.isEntryExcluded(entry.Name) {
if err := out.copyEntry(inputZip, i); err != nil {
return err
}
}
}
// Unless we need to rearrange the entries, the input zip can now be closed.
if !(emulateJar || sortEntries) {
if err := inputZip.Close(); err != nil {
return err
}
}
}
if emulateJar {
return out.writeEntries(out.jarSorted())
} else if sortEntries {
return out.writeEntries(out.alphanumericSorted())
}
return nil
}
// Process command line
type fileList []string
func (f *fileList) String() string {
return `""`
}
func (f *fileList) Set(name string) error {
*f = append(*f, filepath.Clean(name))
return nil
}
type zipsToNotStripSet map[string]bool
func (s zipsToNotStripSet) String() string {
return `""`
}
func (s zipsToNotStripSet) Set(path string) error {
s[path] = true
return nil
}
var (
sortEntries = flag.Bool("s", false, "sort entries (defaults to the order from the input zip files)")
emulateJar = flag.Bool("j", false, "sort zip entries using jar ordering (META-INF first)")
emulatePar = flag.Bool("p", false, "merge zip entries based on par format")
excludeDirs fileList
excludeFiles fileList
zipsToNotStrip = make(zipsToNotStripSet)
stripDirEntries = flag.Bool("D", false, "strip directory entries from the output zip file")
manifest = flag.String("m", "", "manifest file to insert in jar")
pyMain = flag.String("pm", "", "__main__.py file to insert in par")
prefix = flag.String("prefix", "", "A file to prefix to the zip file")
ignoreDuplicates = flag.Bool("ignore-duplicates", false, "take each entry from the first zip it exists in and don't warn")
)
func init() {
flag.Var(&excludeDirs, "stripDir", "directories to be excluded from the output zip, accepts wildcards")
flag.Var(&excludeFiles, "stripFile", "files to be excluded from the output zip, accepts wildcards")
flag.Var(&zipsToNotStrip, "zipToNotStrip", "the input zip file which is not applicable for stripping")
}
type FileInputZip struct {
name string
reader *zip.ReadCloser
}
func (fiz *FileInputZip) Name() string {
return fiz.name
}
func (fiz *FileInputZip) Close() error {
if fiz.IsOpen() {
reader := fiz.reader
fiz.reader = nil
return reader.Close()
}
return nil
}
func (fiz *FileInputZip) Entries() []*zip.File {
if !fiz.IsOpen() {
panic(fmt.Errorf("%s: is not open", fiz.Name()))
}
return fiz.reader.File
}
func (fiz *FileInputZip) IsOpen() bool {
return fiz.reader != nil
}
func (fiz *FileInputZip) Open() error {
if fiz.IsOpen() {
return nil
}
var err error
if fiz.reader, err = zip.OpenReader(fiz.Name()); err != nil {
return fmt.Errorf("%s: %s", fiz.Name(), err.Error())
}
return nil
}
func main() {
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "usage: merge_zips [-jpsD] [-m manifest] [--prefix script] [-pm __main__.py] OutputZip [inputs...]")
flag.PrintDefaults()
}
// parse args
flag.Parse()
args := flag.Args()
if len(args) < 1 {
flag.Usage()
os.Exit(1)
}
outputPath := args[0]
inputs := make([]string, 0)
for _, input := range args[1:] {
if input[0] == '@' {
f, err := os.Open(strings.TrimPrefix(input[1:], "@"))
if err != nil {
log.Fatal(err)
}
rspInputs, err := response.ReadRspFile(f)
f.Close()
if err != nil {
log.Fatal(err)
}
inputs = append(inputs, rspInputs...)
} else {
inputs = append(inputs, input)
}
}
log.SetFlags(log.Lshortfile)
// make writer
outputZip, err := os.Create(outputPath)
if err != nil {
log.Fatal(err)
}
defer outputZip.Close()
var offset int64
if *prefix != "" {
prefixFile, err := os.Open(*prefix)
if err != nil {
log.Fatal(err)
}
offset, err = io.Copy(outputZip, prefixFile)
if err != nil {
log.Fatal(err)
}
}
writer := zip.NewWriter(outputZip)
defer func() {
err := writer.Close()
if err != nil {
log.Fatal(err)
}
}()
writer.SetOffset(offset)
if *manifest != "" && !*emulateJar {
log.Fatal(errors.New("must specify -j when specifying a manifest via -m"))
}
if *pyMain != "" && !*emulatePar {
log.Fatal(errors.New("must specify -p when specifying a Python __main__.py via -pm"))
}
// do merge
inputZipsManager := NewInputZipsManager(len(inputs), 1000)
inputZips := make([]InputZip, len(inputs))
for i, input := range inputs {
inputZips[i] = inputZipsManager.Manage(&FileInputZip{name: input})
}
err = mergeZips(inputZips, writer, *manifest, *pyMain, *sortEntries, *emulateJar, *emulatePar,
*stripDirEntries, *ignoreDuplicates, []string(excludeFiles), []string(excludeDirs),
map[string]bool(zipsToNotStrip))
if err != nil {
log.Fatal(err)
}
}
| {
entries := oz.entriesArray()
sort.SliceStable(entries, func(i, j int) bool { return jar.EntryNamesLess(entries[i], entries[j]) })
return entries
} | identifier_body |
submasterDurations.py | #!/tps/bin/python -B
import os, sys, json, re
from math import floor
import elasticsearch1, urllib3
from elasticsearch1 import helpers
pwd = os.getcwd()
sys.path.insert(0, '{}/msl-datalytics/src/'.format(pwd))
from spazz import *
import timeit
start = time.time()
# from msldatalytics.src.spazz import *
#from spazz import *
es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov', sniff_on_start=False)
# es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov',sniff_on_start=False)
urllib3.disable_warnings()
global index
index = 'mslice_db'
def main():
#Query for all submasters. We want all activity groups (Pie observations) where the seqID field = sub_XXXX in the last 1000 sols.
# --------------------------------------------- Input Parameters and Initializaton -------------------------------------------------
# parameters that should eventually be inputs
verbose = False # a verbose flag that identifies every time a submaster was rejected from the analysis
filename = 'demonstrationoutput' # name of the .json file output to be used as a pseudo-database
queryLen = 5000 # how large do we let the query get. Currently we wouldn't want anything larger than 5000 results
# earliestSol = 2170 # the earliest sol of results we want to include in our data. With our naming convention for submaster sequences we should only query within modulo 1000
#note that margin strategy changed on 2169
#================================================================================================================================================
#======================================================INPUT=====================================================================================
starting_Sol = 2000
latestSol = 2150
# while(earliestSol == 0 and latestSol == 0):
# inputstart = input("Start Sol: ")
# inputend = input("End Sol: ")
# earliestSol = inputstart
# latestSol = inputend
#================================================================================================================================================
#================================================================================================================================================
#================================================================================================================================================
keepOutSols = range(1759, 1779)+range(2172,2209)+range(2320,2348) # a list of soles we know we don't want to include in the results;
#1759-1779 = conjunction; 2172-2209 = 2172 anomaly recovery; 2320-2348 = Safing on RCE-A on 2320 and again on 2339 and subsequent swap to B
# create some counters that explain the reason for dropping various submasters
numDuplicateSubsErrors = 0
numKeepOutSolsErrors = 0
numSubDatabaseErrors = 0
numMissingMarginErrors = 0
numMarginDatabaseErrors = 0
numMissingActualsErrors = 0
numMultipleActualsErrors = 0
# initialize Spazz for a future query
spazzObj = spazz({'beginTime' : "Sol-" + str(starting_Sol) + "M00:00:00",'timeType': "LST"})
#initialize the query
# the "not" line should remove all instances of sub_00000
# This query is essensially a frame work for the elasticsearch to base off from. It continuosly parses through EVR files to
# match tihs query.
query = {
"query": {
"filtered": {
"query": {
"bool" : {
"must":[
{ "match": {"seqId":"sub"}}
]
}
},
"filter": {
"bool":{
"must":[
{"range" : {
"planSol" : {
"gte" : starting_Sol,
"lte" : latestSol
}
}},
{"term" : {"Tag" : "activitygroup" }},
| {"not": {"term" : {"seqId": "00000"}}}
]
}
}
}
},
"size": queryLen,
"_source": ["seqId","Duration","Children","masterSol", "seqgenDuration"],
"sort": { "masterSol": { "order": "desc" }}
}
# ------------------------------------------ Search ---------------------------------------------------
#send query to ES and reduce it down to results
search = es.search(index=index, body=query)
results = search['hits']['hits']
totalHits = len(search['hits']['hits'])
# print("Results are ======== ", )search
#create a variable to store unidentified backbone child names for troubleshooting
unidentifiedBackbones = []
marginNamesSanityCheck = []
#create a variable to store submaster children when the script couldn' identify the associated margin
noMarginFoundChildNames = []
#initialize a new dict to reorganize the information
submasters = {};
# ------------------------------ iterate through results; build pseudo database ----------------------------
# loop through the submasters and populate a new entry in the submasters dict
percentComplete = 0
for count,result in enumerate(results):
#print a message every 10% of the results that has been analyzed
if floor(totalHits/100) == False:
pass
elif (count % (floor(totalHits/100))) == 0: #This is smart lol
print("{}%".format(percentComplete))
percentComplete+=1
seqId = result['_source']['seqId']
# masterSol = int(result['_source']['masterSol'])
masterSol = int(result['_source'].get('masterSol',"0"))
uniqueID = 'sol' + str(masterSol)+'_' + seqId
# initialize a new entry in the temporary submasters dict for this submaster sequence
keepSeqId = True
seqIdDict = {}
# print("Am I getting data?", masterSol)
# Skip all EKO's sub_00000; this should never happen so if it does, please warn user
if seqId == 'sub_00000':
print('')
print('ERROR: Found an unexpected sub_00000; this should not be possible with the query. It will be ignored.')
print('')
keepSeqId = False
continue
# the user can define keep out sols, such as Conjunction or holiday plannning. Immediately ignore these sols from analysis as they will skew our data.
elif masterSol in keepOutSols:
if verbose:
print('')
print('ERROR: Submaster ' + seqId + ' on sol' + str(masterSol) +' falls in the user defined keepOutSols. It will be ignored.')
print('')
keepSeqId = False
numKeepOutSolsErrors += 1
continue
else:
try:
# calculate and initialize the planned duration fields
seqIdDict['seqId'] = seqId
seqIdDict['masterSol'] = masterSol
seqIdDict['backboneType'] = []
seqIdDict['planTotalDur'] = result['_source']['Duration']
seqIdDict['planMarginDur'] = 0
seqIdDict['uniqueID'] = uniqueID
# calculate and initialize the seqgen duration fields
#seqIdDict['totalSeqgenDuration'] = result['_source']['seqgenDuration']
#seqIdDict['totalSeqgenDurationMinutes'] = round(result['_source']['seqgenDuration']/60, 2)
except:
if verbose:
print('')
print('ERROR: Could not identify Duration field for the submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numSubDatabaseErrors+=1
continue
# loop through children to identify the backbone type,
marginsFound = 0
# if we find a margin, query for it's duration
for ii, child in enumerate(result['_source']['Children']):
# see if this child has margin in its string identifier
if 'margin' in child.lower():
# there is a templated activity called: APXS Short Standalone with margin + cleanup
# If it is that ignore it
if 'apxs' in child.lower():
seqIdDict['backboneType'].append('unidentified')
else:
marginsFound+=1
# if margin is in the name, identify and extract the id
idRegex = r"\(sol\d{5}_tap_end_of_sol_.{22}\)$"
idMatch = re.search(idRegex, child)
# if you can successfully identify the id, then break it out, else print error message
if idMatch:
#if you need the name it is here:
childName = child[:idMatch.start()]
if childName not in marginNamesSanityCheck:
marginNamesSanityCheck.append(childName)
#grab the child Id, remove the parentheses, so we can identify it in the database
childId = child[idMatch.start()+1:idMatch.end()-1]
#get margin information with a direct query
marginEntry = es.get(id=childId, index=index)
try:
#store the margin duration as a running sum (for when there are multiple margins associated with a single submaster)
seqIdDict['planMarginDur'] += marginEntry['_source']['Duration']
continue
except:
if verbose:
print('')
print('ERROR: Could not identify a duration for the identified margin activity for submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results.')
print('Margin activity results were: ')
print(marginEntry)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to identify an id for the child:' + child + '. Removing submaster ' + seqId + ' from results')
print('Child string that was searched:')
print(child)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
# if I can successfully identify a Science Block, then identify that as the type
elif (('science block' in child.lower()) or ('sb' in child.lower())) and 'SB' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('SB')
# if I can successfully identify Post Drive imaging, then identify that as the type
elif (('pdi' in child.lower()) or ('post-drive imaging' in child.lower())) and 'PDI' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('PDI')
# if I can successfully identify a mobility backbone, then identify that as the type
elif 'mobility backbone' in child.lower() and 'drive' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('drive')
# if I can successfully identify an arm backbone, then identify that as the type
elif 'arm' in child.lower() and 'arm' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('arm')
# identify ECAM imaging
elif (('slip assessment' in child.lower()) or ('ecam trending' in child.lower())) and 'ECAM' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('ECAM')
# ignore dan actives, mahli merges, SAPP_RIMU_DATA_Collection, and SAM activities (for now).
elif ('dan_active' in child.lower()) or ('mahli merges' in child.lower())or ('sapp_rimu_data_collection' in child.lower()) or ('sam' in child.lower()):
seqIdDict['backboneType'].append('otherSci')
# if I can't identify it as one of the above, then print to screen to help find other problems, and also flag it as unidentified.
else:
unidentifiedBackbones.append(child)
if 'unidentified' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('unidentified')
# if I couldn't find a margin, then throw an error
if (ii == (len(result['_source']['Children'])-1) and marginsFound == 0):
if verbose:
print('')
print('ERROR: Unable to find a margin associated with ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('List of children for ' + seqId + ':')
print(result['_source']['Children'])
print('')
keepSeqId = False
noMarginFoundChildNames += result['_source']['Children']
numMissingMarginErrors += 1
continue
if keepSeqId:
# now query for actuals
hits, _ = spazzObj.get_as_run_sequences(seqids=[seqId])
# print("NEVER GOT HERE")
if (len(hits) >= 1):
actual_found = False
for kk, hit in enumerate(hits):
#actuals database doesn't have master sol. It has master seqID and execution start time. Can backsolve with those to determine mastersol:
# mstr00XXX is either sol 0XXX,1XXX, or 2XXX. execution times on 2164 or 2165 may be associated with master sol 2164.
# so borrow the first digit from execution time, and the last three from master sequence ID, and voila, a master sol number
actuals_temp_execution_sol = int(hits[kk]['start_lmst'][4:8])
mstrSeqId = int(hits[kk]['parent'][4:])
actuals_temp_master_sol = mstrSeqId+(actuals_temp_execution_sol//1000*1000)
#Now correlate
if actuals_temp_master_sol == seqIdDict['masterSol']:
actual_found = True
seqIdDict['actActivityDur'] = hits[kk]['dur_earth']
#calculate actual margin
seqIdDict['actMarginDur'] = seqIdDict['planTotalDur'] - seqIdDict['actActivityDur']
break
if not actual_found:
if verbose:
print('')
print('ERROR: Found one or more as run durations associated with submaster: ' + seqId + ' on sol ' +str(masterSol)+', ')
print('but could not find a corresponding actual duration on this sol. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMultipleActualsErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to find an actual execution duration for submaster: ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMissingActualsErrors += 1
continue
if keepSeqId:
#calculate the activity duration
seqIdDict['planActivityDur'] = seqIdDict['planTotalDur']-seqIdDict['planMarginDur']
submasters[uniqueID] = seqIdDict
# --------------------------------------- Print Errors and summaries of dropped entries -----------------------------------------
print('')
print('Kept ' + str(len(submasters)) + ' of ' + str(totalHits) + ' for analysis.')
print('Removed ' + str(numDuplicateSubsErrors) + ' submasters because of duplication in the databse.')
print('Removed ' + str(numKeepOutSolsErrors) + ' submasters because of user defined keep out sols.')
print('Removed ' + str(numSubDatabaseErrors) + ' submasters because of errors associated with reading expected fields in the database.')
print('Removed ' + str(numMissingMarginErrors) + ' submasters because script could not identify the associated margin.')
print('Removed ' + str(numMarginDatabaseErrors) + ' submasters because there were database issues with the identified margin.')
print('Removed ' + str(numMultipleActualsErrors) + ' submasters because there were database issues with the identified actual durations (implying it may not have executed).')
print('Removed ' + str(numMissingActualsErrors) + ' submasters because there were no actuals for the submaster (implying it did not execute).')
with open(filename + '.json', 'w') as fp:
json.dump(submasters, fp, sort_keys=True, indent=4, encoding = 'utf-8')
with open('unidentifiedChildren.json', 'w') as fp2:
json.dump(unidentifiedBackbones, fp2, sort_keys=True, indent=4)
with open('differentNamesforMargin.json', 'w') as fp3:
json.dump(marginNamesSanityCheck, fp3, sort_keys = True, indent= 4)
with open('childNamesWhenMissingMargins.json', 'w') as fp3:
json.dump(noMarginFoundChildNames, fp3, sort_keys = True, indent= 4)
print('Successfully wrote output to ' + filename + '.json')
print('Script Complete')
end = time.time()
mins = 0
result_time = end - start
if result_time > 60:
mins = int(floor(result_time/60))
seconds = int(floor(result_time % 60))
print("Run time: {} minutes {} seconds".format(mins, seconds))
else:
print("Run time: {} seconds".format(result_time))
#print(submasters)
###############################################################################
#def index_docs(docs):
# helpers.bulk(es,docs)
###############################################################################
def usage(): #Prints out usage statement
print("")
print(sys.argv[0])
print("Analyzes the durations of Submasters and associated parameters for the Margin Workging Group\n")
print("USAGE:")
###############################################################################
if __name__ == "__main__":
main() | random_line_split | |
submasterDurations.py | #!/tps/bin/python -B
import os, sys, json, re
from math import floor
import elasticsearch1, urllib3
from elasticsearch1 import helpers
pwd = os.getcwd()
sys.path.insert(0, '{}/msl-datalytics/src/'.format(pwd))
from spazz import *
import timeit
start = time.time()
# from msldatalytics.src.spazz import *
#from spazz import *
es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov', sniff_on_start=False)
# es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov',sniff_on_start=False)
urllib3.disable_warnings()
global index
index = 'mslice_db'
def main():
#Query for all submasters. We want all activity groups (Pie observations) where the seqID field = sub_XXXX in the last 1000 sols.
# --------------------------------------------- Input Parameters and Initializaton -------------------------------------------------
# parameters that should eventually be inputs
|
###############################################################################
#def index_docs(docs):
# helpers.bulk(es,docs)
###############################################################################
def usage(): #Prints out usage statement
print("")
print(sys.argv[0])
print("Analyzes the durations of Submasters and associated parameters for the Margin Workging Group\n")
print("USAGE:")
###############################################################################
if __name__ == "__main__":
main()
| verbose = False # a verbose flag that identifies every time a submaster was rejected from the analysis
filename = 'demonstrationoutput' # name of the .json file output to be used as a pseudo-database
queryLen = 5000 # how large do we let the query get. Currently we wouldn't want anything larger than 5000 results
# earliestSol = 2170 # the earliest sol of results we want to include in our data. With our naming convention for submaster sequences we should only query within modulo 1000
#note that margin strategy changed on 2169
#================================================================================================================================================
#======================================================INPUT=====================================================================================
starting_Sol = 2000
latestSol = 2150
# while(earliestSol == 0 and latestSol == 0):
# inputstart = input("Start Sol: ")
# inputend = input("End Sol: ")
# earliestSol = inputstart
# latestSol = inputend
#================================================================================================================================================
#================================================================================================================================================
#================================================================================================================================================
keepOutSols = range(1759, 1779)+range(2172,2209)+range(2320,2348) # a list of soles we know we don't want to include in the results;
#1759-1779 = conjunction; 2172-2209 = 2172 anomaly recovery; 2320-2348 = Safing on RCE-A on 2320 and again on 2339 and subsequent swap to B
# create some counters that explain the reason for dropping various submasters
numDuplicateSubsErrors = 0
numKeepOutSolsErrors = 0
numSubDatabaseErrors = 0
numMissingMarginErrors = 0
numMarginDatabaseErrors = 0
numMissingActualsErrors = 0
numMultipleActualsErrors = 0
# initialize Spazz for a future query
spazzObj = spazz({'beginTime' : "Sol-" + str(starting_Sol) + "M00:00:00",'timeType': "LST"})
#initialize the query
# the "not" line should remove all instances of sub_00000
# This query is essensially a frame work for the elasticsearch to base off from. It continuosly parses through EVR files to
# match tihs query.
query = {
"query": {
"filtered": {
"query": {
"bool" : {
"must":[
{ "match": {"seqId":"sub"}}
]
}
},
"filter": {
"bool":{
"must":[
{"range" : {
"planSol" : {
"gte" : starting_Sol,
"lte" : latestSol
}
}},
{"term" : {"Tag" : "activitygroup" }},
{"not": {"term" : {"seqId": "00000"}}}
]
}
}
}
},
"size": queryLen,
"_source": ["seqId","Duration","Children","masterSol", "seqgenDuration"],
"sort": { "masterSol": { "order": "desc" }}
}
# ------------------------------------------ Search ---------------------------------------------------
#send query to ES and reduce it down to results
search = es.search(index=index, body=query)
results = search['hits']['hits']
totalHits = len(search['hits']['hits'])
# print("Results are ======== ", )search
#create a variable to store unidentified backbone child names for troubleshooting
unidentifiedBackbones = []
marginNamesSanityCheck = []
#create a variable to store submaster children when the script couldn' identify the associated margin
noMarginFoundChildNames = []
#initialize a new dict to reorganize the information
submasters = {};
# ------------------------------ iterate through results; build pseudo database ----------------------------
# loop through the submasters and populate a new entry in the submasters dict
percentComplete = 0
for count,result in enumerate(results):
#print a message every 10% of the results that has been analyzed
if floor(totalHits/100) == False:
pass
elif (count % (floor(totalHits/100))) == 0: #This is smart lol
print("{}%".format(percentComplete))
percentComplete+=1
seqId = result['_source']['seqId']
# masterSol = int(result['_source']['masterSol'])
masterSol = int(result['_source'].get('masterSol',"0"))
uniqueID = 'sol' + str(masterSol)+'_' + seqId
# initialize a new entry in the temporary submasters dict for this submaster sequence
keepSeqId = True
seqIdDict = {}
# print("Am I getting data?", masterSol)
# Skip all EKO's sub_00000; this should never happen so if it does, please warn user
if seqId == 'sub_00000':
print('')
print('ERROR: Found an unexpected sub_00000; this should not be possible with the query. It will be ignored.')
print('')
keepSeqId = False
continue
# the user can define keep out sols, such as Conjunction or holiday plannning. Immediately ignore these sols from analysis as they will skew our data.
elif masterSol in keepOutSols:
if verbose:
print('')
print('ERROR: Submaster ' + seqId + ' on sol' + str(masterSol) +' falls in the user defined keepOutSols. It will be ignored.')
print('')
keepSeqId = False
numKeepOutSolsErrors += 1
continue
else:
try:
# calculate and initialize the planned duration fields
seqIdDict['seqId'] = seqId
seqIdDict['masterSol'] = masterSol
seqIdDict['backboneType'] = []
seqIdDict['planTotalDur'] = result['_source']['Duration']
seqIdDict['planMarginDur'] = 0
seqIdDict['uniqueID'] = uniqueID
# calculate and initialize the seqgen duration fields
#seqIdDict['totalSeqgenDuration'] = result['_source']['seqgenDuration']
#seqIdDict['totalSeqgenDurationMinutes'] = round(result['_source']['seqgenDuration']/60, 2)
except:
if verbose:
print('')
print('ERROR: Could not identify Duration field for the submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numSubDatabaseErrors+=1
continue
# loop through children to identify the backbone type,
marginsFound = 0
# if we find a margin, query for it's duration
for ii, child in enumerate(result['_source']['Children']):
# see if this child has margin in its string identifier
if 'margin' in child.lower():
# there is a templated activity called: APXS Short Standalone with margin + cleanup
# If it is that ignore it
if 'apxs' in child.lower():
seqIdDict['backboneType'].append('unidentified')
else:
marginsFound+=1
# if margin is in the name, identify and extract the id
idRegex = r"\(sol\d{5}_tap_end_of_sol_.{22}\)$"
idMatch = re.search(idRegex, child)
# if you can successfully identify the id, then break it out, else print error message
if idMatch:
#if you need the name it is here:
childName = child[:idMatch.start()]
if childName not in marginNamesSanityCheck:
marginNamesSanityCheck.append(childName)
#grab the child Id, remove the parentheses, so we can identify it in the database
childId = child[idMatch.start()+1:idMatch.end()-1]
#get margin information with a direct query
marginEntry = es.get(id=childId, index=index)
try:
#store the margin duration as a running sum (for when there are multiple margins associated with a single submaster)
seqIdDict['planMarginDur'] += marginEntry['_source']['Duration']
continue
except:
if verbose:
print('')
print('ERROR: Could not identify a duration for the identified margin activity for submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results.')
print('Margin activity results were: ')
print(marginEntry)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to identify an id for the child:' + child + '. Removing submaster ' + seqId + ' from results')
print('Child string that was searched:')
print(child)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
# if I can successfully identify a Science Block, then identify that as the type
elif (('science block' in child.lower()) or ('sb' in child.lower())) and 'SB' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('SB')
# if I can successfully identify Post Drive imaging, then identify that as the type
elif (('pdi' in child.lower()) or ('post-drive imaging' in child.lower())) and 'PDI' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('PDI')
# if I can successfully identify a mobility backbone, then identify that as the type
elif 'mobility backbone' in child.lower() and 'drive' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('drive')
# if I can successfully identify an arm backbone, then identify that as the type
elif 'arm' in child.lower() and 'arm' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('arm')
# identify ECAM imaging
elif (('slip assessment' in child.lower()) or ('ecam trending' in child.lower())) and 'ECAM' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('ECAM')
# ignore dan actives, mahli merges, SAPP_RIMU_DATA_Collection, and SAM activities (for now).
elif ('dan_active' in child.lower()) or ('mahli merges' in child.lower())or ('sapp_rimu_data_collection' in child.lower()) or ('sam' in child.lower()):
seqIdDict['backboneType'].append('otherSci')
# if I can't identify it as one of the above, then print to screen to help find other problems, and also flag it as unidentified.
else:
unidentifiedBackbones.append(child)
if 'unidentified' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('unidentified')
# if I couldn't find a margin, then throw an error
if (ii == (len(result['_source']['Children'])-1) and marginsFound == 0):
if verbose:
print('')
print('ERROR: Unable to find a margin associated with ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('List of children for ' + seqId + ':')
print(result['_source']['Children'])
print('')
keepSeqId = False
noMarginFoundChildNames += result['_source']['Children']
numMissingMarginErrors += 1
continue
if keepSeqId:
# now query for actuals
hits, _ = spazzObj.get_as_run_sequences(seqids=[seqId])
# print("NEVER GOT HERE")
if (len(hits) >= 1):
actual_found = False
for kk, hit in enumerate(hits):
#actuals database doesn't have master sol. It has master seqID and execution start time. Can backsolve with those to determine mastersol:
# mstr00XXX is either sol 0XXX,1XXX, or 2XXX. execution times on 2164 or 2165 may be associated with master sol 2164.
# so borrow the first digit from execution time, and the last three from master sequence ID, and voila, a master sol number
actuals_temp_execution_sol = int(hits[kk]['start_lmst'][4:8])
mstrSeqId = int(hits[kk]['parent'][4:])
actuals_temp_master_sol = mstrSeqId+(actuals_temp_execution_sol//1000*1000)
#Now correlate
if actuals_temp_master_sol == seqIdDict['masterSol']:
actual_found = True
seqIdDict['actActivityDur'] = hits[kk]['dur_earth']
#calculate actual margin
seqIdDict['actMarginDur'] = seqIdDict['planTotalDur'] - seqIdDict['actActivityDur']
break
if not actual_found:
if verbose:
print('')
print('ERROR: Found one or more as run durations associated with submaster: ' + seqId + ' on sol ' +str(masterSol)+', ')
print('but could not find a corresponding actual duration on this sol. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMultipleActualsErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to find an actual execution duration for submaster: ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMissingActualsErrors += 1
continue
if keepSeqId:
#calculate the activity duration
seqIdDict['planActivityDur'] = seqIdDict['planTotalDur']-seqIdDict['planMarginDur']
submasters[uniqueID] = seqIdDict
# --------------------------------------- Print Errors and summaries of dropped entries -----------------------------------------
print('')
print('Kept ' + str(len(submasters)) + ' of ' + str(totalHits) + ' for analysis.')
print('Removed ' + str(numDuplicateSubsErrors) + ' submasters because of duplication in the databse.')
print('Removed ' + str(numKeepOutSolsErrors) + ' submasters because of user defined keep out sols.')
print('Removed ' + str(numSubDatabaseErrors) + ' submasters because of errors associated with reading expected fields in the database.')
print('Removed ' + str(numMissingMarginErrors) + ' submasters because script could not identify the associated margin.')
print('Removed ' + str(numMarginDatabaseErrors) + ' submasters because there were database issues with the identified margin.')
print('Removed ' + str(numMultipleActualsErrors) + ' submasters because there were database issues with the identified actual durations (implying it may not have executed).')
print('Removed ' + str(numMissingActualsErrors) + ' submasters because there were no actuals for the submaster (implying it did not execute).')
with open(filename + '.json', 'w') as fp:
json.dump(submasters, fp, sort_keys=True, indent=4, encoding = 'utf-8')
with open('unidentifiedChildren.json', 'w') as fp2:
json.dump(unidentifiedBackbones, fp2, sort_keys=True, indent=4)
with open('differentNamesforMargin.json', 'w') as fp3:
json.dump(marginNamesSanityCheck, fp3, sort_keys = True, indent= 4)
with open('childNamesWhenMissingMargins.json', 'w') as fp3:
json.dump(noMarginFoundChildNames, fp3, sort_keys = True, indent= 4)
print('Successfully wrote output to ' + filename + '.json')
print('Script Complete')
end = time.time()
mins = 0
result_time = end - start
if result_time > 60:
mins = int(floor(result_time/60))
seconds = int(floor(result_time % 60))
print("Run time: {} minutes {} seconds".format(mins, seconds))
else:
print("Run time: {} seconds".format(result_time))
#print(submasters)
| identifier_body |
submasterDurations.py | #!/tps/bin/python -B
import os, sys, json, re
from math import floor
import elasticsearch1, urllib3
from elasticsearch1 import helpers
pwd = os.getcwd()
sys.path.insert(0, '{}/msl-datalytics/src/'.format(pwd))
from spazz import *
import timeit
start = time.time()
# from msldatalytics.src.spazz import *
#from spazz import *
es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov', sniff_on_start=False)
# es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov',sniff_on_start=False)
urllib3.disable_warnings()
global index
index = 'mslice_db'
def main():
#Query for all submasters. We want all activity groups (Pie observations) where the seqID field = sub_XXXX in the last 1000 sols.
# --------------------------------------------- Input Parameters and Initializaton -------------------------------------------------
# parameters that should eventually be inputs
verbose = False # a verbose flag that identifies every time a submaster was rejected from the analysis
filename = 'demonstrationoutput' # name of the .json file output to be used as a pseudo-database
queryLen = 5000 # how large do we let the query get. Currently we wouldn't want anything larger than 5000 results
# earliestSol = 2170 # the earliest sol of results we want to include in our data. With our naming convention for submaster sequences we should only query within modulo 1000
#note that margin strategy changed on 2169
#================================================================================================================================================
#======================================================INPUT=====================================================================================
starting_Sol = 2000
latestSol = 2150
# while(earliestSol == 0 and latestSol == 0):
# inputstart = input("Start Sol: ")
# inputend = input("End Sol: ")
# earliestSol = inputstart
# latestSol = inputend
#================================================================================================================================================
#================================================================================================================================================
#================================================================================================================================================
keepOutSols = range(1759, 1779)+range(2172,2209)+range(2320,2348) # a list of soles we know we don't want to include in the results;
#1759-1779 = conjunction; 2172-2209 = 2172 anomaly recovery; 2320-2348 = Safing on RCE-A on 2320 and again on 2339 and subsequent swap to B
# create some counters that explain the reason for dropping various submasters
numDuplicateSubsErrors = 0
numKeepOutSolsErrors = 0
numSubDatabaseErrors = 0
numMissingMarginErrors = 0
numMarginDatabaseErrors = 0
numMissingActualsErrors = 0
numMultipleActualsErrors = 0
# initialize Spazz for a future query
spazzObj = spazz({'beginTime' : "Sol-" + str(starting_Sol) + "M00:00:00",'timeType': "LST"})
#initialize the query
# the "not" line should remove all instances of sub_00000
# This query is essensially a frame work for the elasticsearch to base off from. It continuosly parses through EVR files to
# match tihs query.
query = {
"query": {
"filtered": {
"query": {
"bool" : {
"must":[
{ "match": {"seqId":"sub"}}
]
}
},
"filter": {
"bool":{
"must":[
{"range" : {
"planSol" : {
"gte" : starting_Sol,
"lte" : latestSol
}
}},
{"term" : {"Tag" : "activitygroup" }},
{"not": {"term" : {"seqId": "00000"}}}
]
}
}
}
},
"size": queryLen,
"_source": ["seqId","Duration","Children","masterSol", "seqgenDuration"],
"sort": { "masterSol": { "order": "desc" }}
}
# ------------------------------------------ Search ---------------------------------------------------
#send query to ES and reduce it down to results
search = es.search(index=index, body=query)
results = search['hits']['hits']
totalHits = len(search['hits']['hits'])
# print("Results are ======== ", )search
#create a variable to store unidentified backbone child names for troubleshooting
unidentifiedBackbones = []
marginNamesSanityCheck = []
#create a variable to store submaster children when the script couldn' identify the associated margin
noMarginFoundChildNames = []
#initialize a new dict to reorganize the information
submasters = {};
# ------------------------------ iterate through results; build pseudo database ----------------------------
# loop through the submasters and populate a new entry in the submasters dict
percentComplete = 0
for count,result in enumerate(results):
#print a message every 10% of the results that has been analyzed
if floor(totalHits/100) == False:
pass
elif (count % (floor(totalHits/100))) == 0: #This is smart lol
|
seqId = result['_source']['seqId']
# masterSol = int(result['_source']['masterSol'])
masterSol = int(result['_source'].get('masterSol',"0"))
uniqueID = 'sol' + str(masterSol)+'_' + seqId
# initialize a new entry in the temporary submasters dict for this submaster sequence
keepSeqId = True
seqIdDict = {}
# print("Am I getting data?", masterSol)
# Skip all EKO's sub_00000; this should never happen so if it does, please warn user
if seqId == 'sub_00000':
print('')
print('ERROR: Found an unexpected sub_00000; this should not be possible with the query. It will be ignored.')
print('')
keepSeqId = False
continue
# the user can define keep out sols, such as Conjunction or holiday plannning. Immediately ignore these sols from analysis as they will skew our data.
elif masterSol in keepOutSols:
if verbose:
print('')
print('ERROR: Submaster ' + seqId + ' on sol' + str(masterSol) +' falls in the user defined keepOutSols. It will be ignored.')
print('')
keepSeqId = False
numKeepOutSolsErrors += 1
continue
else:
try:
# calculate and initialize the planned duration fields
seqIdDict['seqId'] = seqId
seqIdDict['masterSol'] = masterSol
seqIdDict['backboneType'] = []
seqIdDict['planTotalDur'] = result['_source']['Duration']
seqIdDict['planMarginDur'] = 0
seqIdDict['uniqueID'] = uniqueID
# calculate and initialize the seqgen duration fields
#seqIdDict['totalSeqgenDuration'] = result['_source']['seqgenDuration']
#seqIdDict['totalSeqgenDurationMinutes'] = round(result['_source']['seqgenDuration']/60, 2)
except:
if verbose:
print('')
print('ERROR: Could not identify Duration field for the submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numSubDatabaseErrors+=1
continue
# loop through children to identify the backbone type,
marginsFound = 0
# if we find a margin, query for it's duration
for ii, child in enumerate(result['_source']['Children']):
# see if this child has margin in its string identifier
if 'margin' in child.lower():
# there is a templated activity called: APXS Short Standalone with margin + cleanup
# If it is that ignore it
if 'apxs' in child.lower():
seqIdDict['backboneType'].append('unidentified')
else:
marginsFound+=1
# if margin is in the name, identify and extract the id
idRegex = r"\(sol\d{5}_tap_end_of_sol_.{22}\)$"
idMatch = re.search(idRegex, child)
# if you can successfully identify the id, then break it out, else print error message
if idMatch:
#if you need the name it is here:
childName = child[:idMatch.start()]
if childName not in marginNamesSanityCheck:
marginNamesSanityCheck.append(childName)
#grab the child Id, remove the parentheses, so we can identify it in the database
childId = child[idMatch.start()+1:idMatch.end()-1]
#get margin information with a direct query
marginEntry = es.get(id=childId, index=index)
try:
#store the margin duration as a running sum (for when there are multiple margins associated with a single submaster)
seqIdDict['planMarginDur'] += marginEntry['_source']['Duration']
continue
except:
if verbose:
print('')
print('ERROR: Could not identify a duration for the identified margin activity for submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results.')
print('Margin activity results were: ')
print(marginEntry)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to identify an id for the child:' + child + '. Removing submaster ' + seqId + ' from results')
print('Child string that was searched:')
print(child)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
# if I can successfully identify a Science Block, then identify that as the type
elif (('science block' in child.lower()) or ('sb' in child.lower())) and 'SB' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('SB')
# if I can successfully identify Post Drive imaging, then identify that as the type
elif (('pdi' in child.lower()) or ('post-drive imaging' in child.lower())) and 'PDI' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('PDI')
# if I can successfully identify a mobility backbone, then identify that as the type
elif 'mobility backbone' in child.lower() and 'drive' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('drive')
# if I can successfully identify an arm backbone, then identify that as the type
elif 'arm' in child.lower() and 'arm' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('arm')
# identify ECAM imaging
elif (('slip assessment' in child.lower()) or ('ecam trending' in child.lower())) and 'ECAM' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('ECAM')
# ignore dan actives, mahli merges, SAPP_RIMU_DATA_Collection, and SAM activities (for now).
elif ('dan_active' in child.lower()) or ('mahli merges' in child.lower())or ('sapp_rimu_data_collection' in child.lower()) or ('sam' in child.lower()):
seqIdDict['backboneType'].append('otherSci')
# if I can't identify it as one of the above, then print to screen to help find other problems, and also flag it as unidentified.
else:
unidentifiedBackbones.append(child)
if 'unidentified' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('unidentified')
# if I couldn't find a margin, then throw an error
if (ii == (len(result['_source']['Children'])-1) and marginsFound == 0):
if verbose:
print('')
print('ERROR: Unable to find a margin associated with ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('List of children for ' + seqId + ':')
print(result['_source']['Children'])
print('')
keepSeqId = False
noMarginFoundChildNames += result['_source']['Children']
numMissingMarginErrors += 1
continue
if keepSeqId:
# now query for actuals
hits, _ = spazzObj.get_as_run_sequences(seqids=[seqId])
# print("NEVER GOT HERE")
if (len(hits) >= 1):
actual_found = False
for kk, hit in enumerate(hits):
#actuals database doesn't have master sol. It has master seqID and execution start time. Can backsolve with those to determine mastersol:
# mstr00XXX is either sol 0XXX,1XXX, or 2XXX. execution times on 2164 or 2165 may be associated with master sol 2164.
# so borrow the first digit from execution time, and the last three from master sequence ID, and voila, a master sol number
actuals_temp_execution_sol = int(hits[kk]['start_lmst'][4:8])
mstrSeqId = int(hits[kk]['parent'][4:])
actuals_temp_master_sol = mstrSeqId+(actuals_temp_execution_sol//1000*1000)
#Now correlate
if actuals_temp_master_sol == seqIdDict['masterSol']:
actual_found = True
seqIdDict['actActivityDur'] = hits[kk]['dur_earth']
#calculate actual margin
seqIdDict['actMarginDur'] = seqIdDict['planTotalDur'] - seqIdDict['actActivityDur']
break
if not actual_found:
if verbose:
print('')
print('ERROR: Found one or more as run durations associated with submaster: ' + seqId + ' on sol ' +str(masterSol)+', ')
print('but could not find a corresponding actual duration on this sol. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMultipleActualsErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to find an actual execution duration for submaster: ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMissingActualsErrors += 1
continue
if keepSeqId:
#calculate the activity duration
seqIdDict['planActivityDur'] = seqIdDict['planTotalDur']-seqIdDict['planMarginDur']
submasters[uniqueID] = seqIdDict
# --------------------------------------- Print Errors and summaries of dropped entries -----------------------------------------
print('')
print('Kept ' + str(len(submasters)) + ' of ' + str(totalHits) + ' for analysis.')
print('Removed ' + str(numDuplicateSubsErrors) + ' submasters because of duplication in the databse.')
print('Removed ' + str(numKeepOutSolsErrors) + ' submasters because of user defined keep out sols.')
print('Removed ' + str(numSubDatabaseErrors) + ' submasters because of errors associated with reading expected fields in the database.')
print('Removed ' + str(numMissingMarginErrors) + ' submasters because script could not identify the associated margin.')
print('Removed ' + str(numMarginDatabaseErrors) + ' submasters because there were database issues with the identified margin.')
print('Removed ' + str(numMultipleActualsErrors) + ' submasters because there were database issues with the identified actual durations (implying it may not have executed).')
print('Removed ' + str(numMissingActualsErrors) + ' submasters because there were no actuals for the submaster (implying it did not execute).')
with open(filename + '.json', 'w') as fp:
json.dump(submasters, fp, sort_keys=True, indent=4, encoding = 'utf-8')
with open('unidentifiedChildren.json', 'w') as fp2:
json.dump(unidentifiedBackbones, fp2, sort_keys=True, indent=4)
with open('differentNamesforMargin.json', 'w') as fp3:
json.dump(marginNamesSanityCheck, fp3, sort_keys = True, indent= 4)
with open('childNamesWhenMissingMargins.json', 'w') as fp3:
json.dump(noMarginFoundChildNames, fp3, sort_keys = True, indent= 4)
print('Successfully wrote output to ' + filename + '.json')
print('Script Complete')
end = time.time()
mins = 0
result_time = end - start
if result_time > 60:
mins = int(floor(result_time/60))
seconds = int(floor(result_time % 60))
print("Run time: {} minutes {} seconds".format(mins, seconds))
else:
print("Run time: {} seconds".format(result_time))
#print(submasters)
###############################################################################
#def index_docs(docs):
# helpers.bulk(es,docs)
###############################################################################
def usage(): #Prints out usage statement
print("")
print(sys.argv[0])
print("Analyzes the durations of Submasters and associated parameters for the Margin Workging Group\n")
print("USAGE:")
###############################################################################
if __name__ == "__main__":
main()
| print("{}%".format(percentComplete))
percentComplete+=1 | conditional_block |
submasterDurations.py | #!/tps/bin/python -B
import os, sys, json, re
from math import floor
import elasticsearch1, urllib3
from elasticsearch1 import helpers
pwd = os.getcwd()
sys.path.insert(0, '{}/msl-datalytics/src/'.format(pwd))
from spazz import *
import timeit
start = time.time()
# from msldatalytics.src.spazz import *
#from spazz import *
es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov', sniff_on_start=False)
# es = elasticsearch1.Elasticsearch('https://msl-ops-es.cld.jpl.nasa.gov',sniff_on_start=False)
urllib3.disable_warnings()
global index
index = 'mslice_db'
def | ():
#Query for all submasters. We want all activity groups (Pie observations) where the seqID field = sub_XXXX in the last 1000 sols.
# --------------------------------------------- Input Parameters and Initializaton -------------------------------------------------
# parameters that should eventually be inputs
verbose = False # a verbose flag that identifies every time a submaster was rejected from the analysis
filename = 'demonstrationoutput' # name of the .json file output to be used as a pseudo-database
queryLen = 5000 # how large do we let the query get. Currently we wouldn't want anything larger than 5000 results
# earliestSol = 2170 # the earliest sol of results we want to include in our data. With our naming convention for submaster sequences we should only query within modulo 1000
#note that margin strategy changed on 2169
#================================================================================================================================================
#======================================================INPUT=====================================================================================
starting_Sol = 2000
latestSol = 2150
# while(earliestSol == 0 and latestSol == 0):
# inputstart = input("Start Sol: ")
# inputend = input("End Sol: ")
# earliestSol = inputstart
# latestSol = inputend
#================================================================================================================================================
#================================================================================================================================================
#================================================================================================================================================
keepOutSols = range(1759, 1779)+range(2172,2209)+range(2320,2348) # a list of soles we know we don't want to include in the results;
#1759-1779 = conjunction; 2172-2209 = 2172 anomaly recovery; 2320-2348 = Safing on RCE-A on 2320 and again on 2339 and subsequent swap to B
# create some counters that explain the reason for dropping various submasters
numDuplicateSubsErrors = 0
numKeepOutSolsErrors = 0
numSubDatabaseErrors = 0
numMissingMarginErrors = 0
numMarginDatabaseErrors = 0
numMissingActualsErrors = 0
numMultipleActualsErrors = 0
# initialize Spazz for a future query
spazzObj = spazz({'beginTime' : "Sol-" + str(starting_Sol) + "M00:00:00",'timeType': "LST"})
#initialize the query
# the "not" line should remove all instances of sub_00000
# This query is essensially a frame work for the elasticsearch to base off from. It continuosly parses through EVR files to
# match tihs query.
query = {
"query": {
"filtered": {
"query": {
"bool" : {
"must":[
{ "match": {"seqId":"sub"}}
]
}
},
"filter": {
"bool":{
"must":[
{"range" : {
"planSol" : {
"gte" : starting_Sol,
"lte" : latestSol
}
}},
{"term" : {"Tag" : "activitygroup" }},
{"not": {"term" : {"seqId": "00000"}}}
]
}
}
}
},
"size": queryLen,
"_source": ["seqId","Duration","Children","masterSol", "seqgenDuration"],
"sort": { "masterSol": { "order": "desc" }}
}
# ------------------------------------------ Search ---------------------------------------------------
#send query to ES and reduce it down to results
search = es.search(index=index, body=query)
results = search['hits']['hits']
totalHits = len(search['hits']['hits'])
# print("Results are ======== ", )search
#create a variable to store unidentified backbone child names for troubleshooting
unidentifiedBackbones = []
marginNamesSanityCheck = []
#create a variable to store submaster children when the script couldn' identify the associated margin
noMarginFoundChildNames = []
#initialize a new dict to reorganize the information
submasters = {};
# ------------------------------ iterate through results; build pseudo database ----------------------------
# loop through the submasters and populate a new entry in the submasters dict
percentComplete = 0
for count,result in enumerate(results):
#print a message every 10% of the results that has been analyzed
if floor(totalHits/100) == False:
pass
elif (count % (floor(totalHits/100))) == 0: #This is smart lol
print("{}%".format(percentComplete))
percentComplete+=1
seqId = result['_source']['seqId']
# masterSol = int(result['_source']['masterSol'])
masterSol = int(result['_source'].get('masterSol',"0"))
uniqueID = 'sol' + str(masterSol)+'_' + seqId
# initialize a new entry in the temporary submasters dict for this submaster sequence
keepSeqId = True
seqIdDict = {}
# print("Am I getting data?", masterSol)
# Skip all EKO's sub_00000; this should never happen so if it does, please warn user
if seqId == 'sub_00000':
print('')
print('ERROR: Found an unexpected sub_00000; this should not be possible with the query. It will be ignored.')
print('')
keepSeqId = False
continue
# the user can define keep out sols, such as Conjunction or holiday plannning. Immediately ignore these sols from analysis as they will skew our data.
elif masterSol in keepOutSols:
if verbose:
print('')
print('ERROR: Submaster ' + seqId + ' on sol' + str(masterSol) +' falls in the user defined keepOutSols. It will be ignored.')
print('')
keepSeqId = False
numKeepOutSolsErrors += 1
continue
else:
try:
# calculate and initialize the planned duration fields
seqIdDict['seqId'] = seqId
seqIdDict['masterSol'] = masterSol
seqIdDict['backboneType'] = []
seqIdDict['planTotalDur'] = result['_source']['Duration']
seqIdDict['planMarginDur'] = 0
seqIdDict['uniqueID'] = uniqueID
# calculate and initialize the seqgen duration fields
#seqIdDict['totalSeqgenDuration'] = result['_source']['seqgenDuration']
#seqIdDict['totalSeqgenDurationMinutes'] = round(result['_source']['seqgenDuration']/60, 2)
except:
if verbose:
print('')
print('ERROR: Could not identify Duration field for the submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numSubDatabaseErrors+=1
continue
# loop through children to identify the backbone type,
marginsFound = 0
# if we find a margin, query for it's duration
for ii, child in enumerate(result['_source']['Children']):
# see if this child has margin in its string identifier
if 'margin' in child.lower():
# there is a templated activity called: APXS Short Standalone with margin + cleanup
# If it is that ignore it
if 'apxs' in child.lower():
seqIdDict['backboneType'].append('unidentified')
else:
marginsFound+=1
# if margin is in the name, identify and extract the id
idRegex = r"\(sol\d{5}_tap_end_of_sol_.{22}\)$"
idMatch = re.search(idRegex, child)
# if you can successfully identify the id, then break it out, else print error message
if idMatch:
#if you need the name it is here:
childName = child[:idMatch.start()]
if childName not in marginNamesSanityCheck:
marginNamesSanityCheck.append(childName)
#grab the child Id, remove the parentheses, so we can identify it in the database
childId = child[idMatch.start()+1:idMatch.end()-1]
#get margin information with a direct query
marginEntry = es.get(id=childId, index=index)
try:
#store the margin duration as a running sum (for when there are multiple margins associated with a single submaster)
seqIdDict['planMarginDur'] += marginEntry['_source']['Duration']
continue
except:
if verbose:
print('')
print('ERROR: Could not identify a duration for the identified margin activity for submaster ' + seqId)
print('Excluding submaster ' + seqId + ' from results.')
print('Margin activity results were: ')
print(marginEntry)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to identify an id for the child:' + child + '. Removing submaster ' + seqId + ' from results')
print('Child string that was searched:')
print(child)
print('')
keepSeqId = False
numMarginDatabaseErrors += 1
continue
# if I can successfully identify a Science Block, then identify that as the type
elif (('science block' in child.lower()) or ('sb' in child.lower())) and 'SB' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('SB')
# if I can successfully identify Post Drive imaging, then identify that as the type
elif (('pdi' in child.lower()) or ('post-drive imaging' in child.lower())) and 'PDI' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('PDI')
# if I can successfully identify a mobility backbone, then identify that as the type
elif 'mobility backbone' in child.lower() and 'drive' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('drive')
# if I can successfully identify an arm backbone, then identify that as the type
elif 'arm' in child.lower() and 'arm' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('arm')
# identify ECAM imaging
elif (('slip assessment' in child.lower()) or ('ecam trending' in child.lower())) and 'ECAM' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('ECAM')
# ignore dan actives, mahli merges, SAPP_RIMU_DATA_Collection, and SAM activities (for now).
elif ('dan_active' in child.lower()) or ('mahli merges' in child.lower())or ('sapp_rimu_data_collection' in child.lower()) or ('sam' in child.lower()):
seqIdDict['backboneType'].append('otherSci')
# if I can't identify it as one of the above, then print to screen to help find other problems, and also flag it as unidentified.
else:
unidentifiedBackbones.append(child)
if 'unidentified' not in seqIdDict['backboneType']:
seqIdDict['backboneType'].append('unidentified')
# if I couldn't find a margin, then throw an error
if (ii == (len(result['_source']['Children'])-1) and marginsFound == 0):
if verbose:
print('')
print('ERROR: Unable to find a margin associated with ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('List of children for ' + seqId + ':')
print(result['_source']['Children'])
print('')
keepSeqId = False
noMarginFoundChildNames += result['_source']['Children']
numMissingMarginErrors += 1
continue
if keepSeqId:
# now query for actuals
hits, _ = spazzObj.get_as_run_sequences(seqids=[seqId])
# print("NEVER GOT HERE")
if (len(hits) >= 1):
actual_found = False
for kk, hit in enumerate(hits):
#actuals database doesn't have master sol. It has master seqID and execution start time. Can backsolve with those to determine mastersol:
# mstr00XXX is either sol 0XXX,1XXX, or 2XXX. execution times on 2164 or 2165 may be associated with master sol 2164.
# so borrow the first digit from execution time, and the last three from master sequence ID, and voila, a master sol number
actuals_temp_execution_sol = int(hits[kk]['start_lmst'][4:8])
mstrSeqId = int(hits[kk]['parent'][4:])
actuals_temp_master_sol = mstrSeqId+(actuals_temp_execution_sol//1000*1000)
#Now correlate
if actuals_temp_master_sol == seqIdDict['masterSol']:
actual_found = True
seqIdDict['actActivityDur'] = hits[kk]['dur_earth']
#calculate actual margin
seqIdDict['actMarginDur'] = seqIdDict['planTotalDur'] - seqIdDict['actActivityDur']
break
if not actual_found:
if verbose:
print('')
print('ERROR: Found one or more as run durations associated with submaster: ' + seqId + ' on sol ' +str(masterSol)+', ')
print('but could not find a corresponding actual duration on this sol. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMultipleActualsErrors += 1
continue
else:
if verbose:
print('')
print('ERROR: Unable to find an actual execution duration for submaster: ' + seqId + '. Removing submaster ' + seqId + ' from results')
print('')
keepSeqId = False
numMissingActualsErrors += 1
continue
if keepSeqId:
#calculate the activity duration
seqIdDict['planActivityDur'] = seqIdDict['planTotalDur']-seqIdDict['planMarginDur']
submasters[uniqueID] = seqIdDict
# --------------------------------------- Print Errors and summaries of dropped entries -----------------------------------------
print('')
print('Kept ' + str(len(submasters)) + ' of ' + str(totalHits) + ' for analysis.')
print('Removed ' + str(numDuplicateSubsErrors) + ' submasters because of duplication in the databse.')
print('Removed ' + str(numKeepOutSolsErrors) + ' submasters because of user defined keep out sols.')
print('Removed ' + str(numSubDatabaseErrors) + ' submasters because of errors associated with reading expected fields in the database.')
print('Removed ' + str(numMissingMarginErrors) + ' submasters because script could not identify the associated margin.')
print('Removed ' + str(numMarginDatabaseErrors) + ' submasters because there were database issues with the identified margin.')
print('Removed ' + str(numMultipleActualsErrors) + ' submasters because there were database issues with the identified actual durations (implying it may not have executed).')
print('Removed ' + str(numMissingActualsErrors) + ' submasters because there were no actuals for the submaster (implying it did not execute).')
with open(filename + '.json', 'w') as fp:
json.dump(submasters, fp, sort_keys=True, indent=4, encoding = 'utf-8')
with open('unidentifiedChildren.json', 'w') as fp2:
json.dump(unidentifiedBackbones, fp2, sort_keys=True, indent=4)
with open('differentNamesforMargin.json', 'w') as fp3:
json.dump(marginNamesSanityCheck, fp3, sort_keys = True, indent= 4)
with open('childNamesWhenMissingMargins.json', 'w') as fp3:
json.dump(noMarginFoundChildNames, fp3, sort_keys = True, indent= 4)
print('Successfully wrote output to ' + filename + '.json')
print('Script Complete')
end = time.time()
mins = 0
result_time = end - start
if result_time > 60:
mins = int(floor(result_time/60))
seconds = int(floor(result_time % 60))
print("Run time: {} minutes {} seconds".format(mins, seconds))
else:
print("Run time: {} seconds".format(result_time))
#print(submasters)
###############################################################################
#def index_docs(docs):
# helpers.bulk(es,docs)
###############################################################################
def usage(): #Prints out usage statement
print("")
print(sys.argv[0])
print("Analyzes the durations of Submasters and associated parameters for the Margin Workging Group\n")
print("USAGE:")
###############################################################################
if __name__ == "__main__":
main()
| main | identifier_name |
transaction.component.ts | import { LocationComponent } from './location/location.component';
import { NgbModal, NgbModalRef } from '@ng-bootstrap/ng-bootstrap';
import { GooleMapsService } from './../../../../service/googlemaps.service';
import { CheckValueSevice } from './../../../../service/check-value.sevice';
import { WalletService } from './../../../../service/wallet.service';
import { FomatDateService } from './../../../../service/fomatDate.service';
import { ITransaction } from './../../../../model/transaction.model';
import { IDate } from './../../../../model/date.model';
import { ToastsManager } from 'ng2-toastr/ng2-toastr';
import { Component, ViewChild, ViewContainerRef } from '@angular/core';
import { ActivatedRoute } from '@angular/router';
import { TransactionService } from '../../../../service/transaction.service';
import { } from '@types/googlemaps';
import { FormControl, ReactiveFormsModule } from '@angular/forms';
declare var $: any;
declare var google: any;
@Component({
selector: 'app-transaction',
styleUrls: ['./transaction.component.scss'],
templateUrl: './transaction.component.html',
})
export class TransactionComponent {
dataIncome: Array<any>;
dataExpense: Array<any>;
dataDebtLoan: Array<any>;
// hiện thị phần thêm chi tiết
public adddetail = true;
// KHỞI TẠO CÁC BIẾN VỊ TRÍ
lat: number = 10.812035;
lng: number = 106.7119887
zoom: number = 14;
// DANH SÁCH TẤT CẢ CÁC ĐỊA ĐIỂM
allPlace: any[] = [];
// OBJCET ĐỊA ĐIỂM
objLocation = {
lat: 10.812035,
lng: 106.7119887,
name: "Đặt vị trí",
}
dataWallets: Array<any>;
infoCheckMoney: any = {};
public modalCheckMoney: NgbModalRef;
titleTransaction: String = "Thêm Giao Dịch";
nameButtonTransaction: String = "Thêm Giao Dịch";
dateCurrent = new Date();
nameWallet: String = '';
// TRANSACTION DEFAULT
transaction: ITransaction = {
groupcategory: '',
idcategory: '',
datecreatetransaction: new Date().toDateString(),
moneytransaction: '',
imagecategory: 'default',
categorytransaction: 'Chọn Danh Mục',
idwallet: '',
}
// URL HÌNH ẢNH
public url: String = '';
private fileToUpload: File = null;
ngOnInit() {
// LẤY TẤT CẢ CÁC VÍ HIỂN THỊ LÊN
this.getDataWallets();
// LẤY TOẠ ĐỘ Ở VỊ TRÍ HIỆN TẠI
this.setCurrentPosition();
}
constructor(private FomatDateService: FomatDateService,
private WalletService: WalletService,
private modalService: NgbModal,
private checkvalue: CheckValueSevice,
private TransactionService: TransactionService,
private ActivatedRoute: ActivatedRoute,
private GooleMapsService: GooleMapsService,
public toastr: ToastsManager,
vcr: ViewContainerRef,
) {
this.toastr.setRootViewContainerRef(vcr);
// LẤY TÊN VÍ HIỆN THỊ LÊN GIAO DIỆN
this.paramIdWalletURL();
// PHẦN CHỨC NĂNG TAG USER
let thisglob = this;
window.onload = function () {
$('#taguser').tagEditor({
autocomplete: {
delay: 0.15,
position: { collision: 'flip' },
source: ['ActionScript', 'AppleScript', 'Asp', 'BASIC', 'C', 'C++', 'CSS', 'Clojure', 'COBOL', 'ColdFusion', 'Erlang', 'Fortran', 'Groovy', 'Haskell', 'HTML', 'Java', 'JavaScript', 'Lisp', 'Perl', 'PHP', 'Python', 'Ruby', 'Scala', 'Scheme']
},
forceLowercase: false,
placeholder: 'Với',
onChange: (field, editor, tags) => {
thisglob.transaction.taguser = tags;
}
});
}
}
// LẤY FILE
onSelectFile(event) {
if (event.target.files && event.target.files[0]) {
var reader = new FileReader();
this.fileToUpload = event.target.files[0];
reader.readAsDataURL(event.target.files[0]);
reader.onload = (event: any) => {
this.url = event.target.result;
}
}
}
// HÀM LẤY DATA TẤT CÁ CẢ VÍ
getDataWallets() {
this.WalletService.getDataWallets();
this.WalletService.getAllWallet.subscribe((wallet) => {
this.dataWallets = wallet;
})
}
changeMoneyWallet() {
let obj = {
_id: this.transaction.idwallet,
money: this.infoCheckMoney.moneytrnasction,
namewallet: this.infoCheckMoney.namewallet
}
this.WalletService.updateDataWallet(obj)
.then((result) => {
this.modalCheckMoney.close();
// CHỈNH SỬA XONG CẬP NHẬT LẠI GIAO DIỆN MỚI
this.reloadData();
this.toastr.success('Điều chỉnh số tiền trong ví thành công ! ', 'Success ! ');
});
}
changeMoneyTransaction() {
this.transaction.moneytransaction = this.infoCheckMoney.moneywallet;
this.modalCheckMoney.close();
}
// SUMMIT GỬI GIAO DỊCH
submitTransaction(modalCheckMoney) {
if (this.transaction.groupcategory == '') {
this.toastr.warning('Vui lòng chọn category ! ', 'Cảnh báo ! ');
} else if (this.transaction.moneytransaction == '') {
this.toastr.warning('Vui lòng nhập số tiền vào ! ', 'Cảnh báo ! ');
} else if (isNaN(Number.parseInt(this.transaction.moneytransaction.toString()))) {
this.toastr.warning('Số tiền phải là 1 số ! ', 'Waring ! ');
} else {
let checkMoney = true;
if (this.transaction.groupcategory == "expense") {
this.dataWallets.forEach((wallet) => {
if (wallet._id == this.transaction.idwallet) {
if ((Number.parseInt(this.transaction.moneytransaction.toString())) > wallet.money) {
this.infoCheckMoney['moneywallet'] = wallet.money;
this.infoCheckMoney['moneytrnasction'] = this.transaction.moneytransaction;
checkMoney = false;
}
}
})
}
if (checkMoney == true) {
// thay đổi dấu
if (this.transaction.groupcategory == "income" || this.transaction.groupcategory == "debt") {
if (Number(this.transaction.moneytransaction) < 0) {
this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
if (this.transaction.groupcategory == "expense" || this.transaction.groupcategory == "loan") {
if (Number(this.transaction.moneytransaction) > 0) { | this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
// tạo một giao dịch
this.TransactionService.createTransaction(this.transaction)
.then((result) => {
// upload hình ảnh
if (this.fileToUpload != null) {
this.TransactionService.uploadImage(result._id, this.fileToUpload)
.then((data) => {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
})
} else {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
}
})
.catch((err) => {
this.toastr.error(err, 'Thất bại ! ');
})
} else {
this.modalCheckMoney = this.modalService.open(modalCheckMoney, { windowClass: 'modalCheckMoney' });
}
}
}
// CHỌN THU NHẬP, CHI TIÊU, HAY NỢ
chooseCategory(event) {
this.transaction.groupcategory = event.detect;
this.transaction.imagecategory = event.image;
this.transaction.categorytransaction = event.name;
this.transaction.idcategory = event._id;
if (this.transaction.groupcategory == 'income') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Thu Nhập';
} else if (this.transaction.groupcategory == 'expense') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Chi Tiêu';
} else if (this.transaction.groupcategory == 'debt-loan') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Nợ/Vay';
}
}
// XOÁ HÌNH ẢNH
deleteImage() {
this.url = null;
this.fileToUpload = null;
}
// KHI USER CHỌN NGÀY
changeDate(event) {
this.dateCurrent = new Date(event.value.toDateString());
this.transaction.datecreatetransaction = new Date(event.value.toDateString()).toString();
}
// LẤY 1 VÍ CÓ ID LÀ
paramIdWalletURL() {
//LẤY ID WALLET TỪ URL
this.ActivatedRoute.paramMap
.subscribe((params) => {
if (params['params'].idwallet != undefined) {
this.WalletService.getDataWalletId(params['params'].idwallet).then((data) => {
this.nameWallet = data.namewallet;
this.infoCheckMoney['namewallet'] = data.namewallet;
this.transaction.idwallet = data._id;
})
.catch((err) => { })
}
})
}
// LẤY DỮ LIỆU KHI NGƯỜI DÙNG CHỌN VÍ NÀO
outputIdWallet(event) {
this.nameWallet = event.namewallet;
this.infoCheckMoney['namewallet'] = event.namewallet;
this.transaction.idwallet = event._id;
}
// LOAD LẠI DATA
reloadData() {
let urlIdWallet = (this.ActivatedRoute.snapshot.params.idwallet == undefined) ? '' : this.ActivatedRoute.snapshot.params.idwallet;
// LOAD LẠI CẬP NHẬT BÁO CÁO
this.TransactionService.getTransactions(urlIdWallet);
// LOAD CẬP NHẬT LẠI TẤT CẢ CÁC VÍ
this.WalletService.getDataWallets();
}
// RESET DATA
resetData() {
this.titleTransaction = "Thêm Giao Dịch";
this.nameButtonTransaction = "Thêm Giao Dịch";
this.transaction = {
idcategory: '',
groupcategory: '',
notetransaction: '',
datecreatetransaction: new Date().toDateString(),
moneytransaction: '',
imagecategory: 'default',
categorytransaction: 'Chọn Danh Mục',
idwallet: '',
}
// RESET TẤT CẢ CÁC TAGS
if(this.transaction.taguser != null){
let tags = $('#taguser').tagEditor('getTags')[0].tags;
for (let i = 0; i < tags.length; i++) {
$('#taguser').tagEditor('removeTag', tags[i]);
}
}
this.url = null;
this.fileToUpload = null;
delete this.transaction.location;
this.objLocation.name = "Đặt vị trí";
// RESET WALLET
this.paramIdWalletURL();
// RESET IMAGE
this.url = null;
this.fileToUpload = null;
}
private setCurrentPosition() {
if ("geolocation" in navigator) {
navigator.geolocation.getCurrentPosition((position) => {
this.lat = position.coords.latitude;
this.lng = position.coords.longitude;
this.zoom = 14;
});
}
}
// MỞ MODAL CHỌN ĐỊA ĐIỂM GOOGLE MAP
open(content) {
this.GooleMapsService.getPlaceNear(this.lat, this.lng).then((data) => {
this.allPlace = data.results;
})
this.modalService.open(content);
}
// SUBMIT ĐỊA ĐIỂM
submitLocation(place) {
this.objLocation = {
lat: place.geometry.location.lat,
lng: place.geometry.location.lng,
name: place.name
}
this.transaction.location = this.objLocation;
}
// XOÁ ĐI VỊ CHÍ ĐÃ CHỌN
deleteLocation() {
delete this.transaction.location;
this.objLocation.name = "Đặt vị trí";
}
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.