file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
trab05.py | # Nomes: Eduardo de Sousa Siqueira nUSP: 9278299
# Igor Barbosa Grécia Lúcio nUSP: 9778821
# SCC0251 - Image Processing 1 semestre 2021
# Trabalho 05. Image Descriptors
import numpy as np
import imageio as io
from numpy.lib import stride_tricks
import sys
def luminance_preprocessing(image):
red_channel = image[:, :, 0]
green_channel = image[:, :, 1]
blue_channel = image[:, :, 2]
new_image = np.floor(
(0.299 * red_channel) +
(0.587 * green_channel) +
(0.114 * blue_channel)
)
return new_image.astype(image.dtype)
def quantize(image, bits):
return image >> (8-bits)
#Função que constrói o descritor do histograma de cores normalizado
def normalized_histogram_descriptor(image, bits):
hist, _ = np.histogram(image, bins=2 ** bits)
norm_hist = hist / np.sum(hist)
return norm_hist / np.linalg.norm(norm_hist)
#Função que constrói a matriz de co-ocorrências de intensidade diagonalmente conectadas
def diag_coocurrence_mat(image, b):
mat_size = 2 ** b #O tamanho da matriz é igual à quantidade de intensidades diferentes da imagem
mat = np.zeros((mat_size, mat_size), dtype=int)
#Essa será a lista de todas as co-ocorrências diagonais da imagem
#Para cada pixel x, y, co_occurrences[x, y] será um array de 2 posições, onde
#co_occurrences[x, y, 0] é o valor do pixel e co_occurrences[x, y, 1] é o pixel na diagonal direita
co_occurrences = stride_tricks.as_strided(
x=image,
shape=(image.shape[0]-1, image.shape[1]-1, 2), #Não consideramos as últimas linha e coluna
strides=(*image.strides, image.strides[0] + image.strides[1]), #Os primeiros passos são iguais o da imagem, o terceiro valor guia ao pixel na diagonal (+1 linha +1 coluna)
writeable=False #Evita escritas de memória, pois essa não é uma função segura
)
#Para cada valor de intensidade, contamos todas as suas co_ocorrências usando numpy fancy indexing, e preenchemos essa linha da matriz
for intensity in np.unique(image):
counts, _ = np.histogram(co_occurrences[co_occurrences[:, :, 0] == intensity, 1], bins=mat_size)
mat[intensity] = counts
return mat
#Função que constrói o descritor a partir das 5 métricas da matriz de co-ocorrências
def haralick_texture_descriptor(image, b):
c = diag_coocurrence_mat(image, b)
c = c / np.sum(c) #Normalizando a matriz
descriptors = []
#Energy
descriptors.append(
np.sum(np.square(c))
)
#Entropy
descriptors.append(
np.multiply(
np.sum(c * np.log10(c + 0.001)),
-1 #Multiplica por -1, conforme descrito
)
)
#Contrast
#cálculo de (i - j)²
ii, jj = np.indices(c.shape)
factors = np.square(ii - jj)
#cálculo do contraste
descriptors.append(
np.sum( | c * factors
) / c.size
) #c.size == N² == número de elementos da matriz de co-ocorrencias
#Correlation
#Para o cálculo vetorizado, computamos todos os valores separadamente para cada linha e coluna
#O resultado final será uma matriz, em que cada elemento (i, j) é o valor da correlação para esse pixel
#Calculando somas parciais das linhas e colunas, transformando-as em vetores linha e coluna para possibilitar
#o broadcasting
sum_rows = np.sum(c, axis=1)[np.newaxis, :] #Transforma em um vetor linha de 1 dimensão
sum_cols = np.sum(c, axis=0)[:, np.newaxis] #Transforma em um vetor coluna
#Cálculo das médias direcionais. Será um vetor em que cada valor dir_mean[x] corresponde à média direcional da linha/coluna x
dir_mean_i = np.sum(sum_rows * ii, axis=1, keepdims=True)
dir_mean_j = np.sum(sum_cols * jj, axis=0, keepdims=True)
#Cálculo dos desvios padrões, equivalente ao cálculo anterior
std_dev_i = np.sum(np.square(ii - dir_mean_i) * sum_rows, axis=1, keepdims=True)
std_dev_j = np.sum(np.square(jj - dir_mean_j) * sum_cols, axis=0, keepdims=True)
#Inicializamos a matriz de correlações com zeros, para os casos em que os desvios padrões são 0
corr = np.zeros(c.shape, dtype=np.double)
#Cálculo vetorizado da correlação. Por causa do broadcasting de numpy e as conversões anteriores para vetores linha e coluna,
#a multiplicação de dir_mean_i e dir_mean_j resulta em uma matriz de tamanho igual ao da matriz de co-ocorrências, onde o valor [i, j]
#é igual à multiplicação de dir_mean_i[i] * dir_mean_j[j]. A multiplicação dos desvios ocorre de maneira equivalente
corr = np.divide(
(ii * jj * c) - (dir_mean_i * dir_mean_j),
(std_dev_i * std_dev_j),
out=corr,
where=np.logical_and(std_dev_i != 0, std_dev_j != 0) #O cálculo é feito apenas nas posições em que os desvios são acima de 0
)
#Fazendo a soma dos elementos da matriz anterior, obtemos o valor de correlação geral
descriptors.append(np.sum(corr))
#Homogeneity
descriptors.append(
np.sum(
c / (1 + np.abs(ii - jj))
)
)
#Evitando divisão por 0
norm = np.linalg.norm(descriptors)
return descriptors / norm if norm != 0 else descriptors
#Taken from: https://gist.github.com/arifqodari/dfd734cf61b0a4d01fde48f0024d1dc9
#Caso run.codes não aceite scipy convolve
def strided_convolution(image, weight, stride):
im_h, im_w = image.shape
f_h, f_w = weight.shape
out_shape = (1 + (im_h - f_h) // stride, 1 + (im_w - f_w) // stride, f_h, f_w)
out_strides = (image.strides[0] * stride, image.strides[1] * stride, image.strides[0], image.strides[1])
windows = stride_tricks.as_strided(image, shape=out_shape, strides=out_strides)
return np.tensordot(windows, weight, axes=((2, 3), (0, 1)))
#Função que faz o cálculo do descritor de histograma dos gradientes orientados
def oriented_gradients_histogram(image):
sobel_x = np.array([
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]
])
sobel_y = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]
])
#Try-except para o run.codes caso scipy esteja quebrada
try:
from scipy import ndimage
grad_x = ndimage.convolve(image.astype(np.double), sobel_x)
grad_y = ndimage.convolve(image.astype(np.double), sobel_y)
except ImportError:
grad_x = strided_convolution(image.astype(np.double), sobel_x, 1)
grad_y = strided_convolution(image.astype(np.double), sobel_x, 1)
#Cálculo da matriz de magnitude
magnitude_num = np.sqrt(np.square(grad_x) + np.square(grad_y))
magnitude_mat = magnitude_num / np.sum(magnitude_num)
#Ignora os erros de divisão por 0
np.seterr(divide='ignore', invalid='ignore')
#Algumas divisões de 0/0 resultam em NaN, mas isso é tratado automaticamente pela função np.digitize
angles = np.arctan(grad_y / grad_x)
#Fazendo as conversões
angles = angles + (np.pi / 2)
angles = np.degrees(angles)
#Construindo as bins
bins = np.arange(0, 180, 20)
angle_bins = np.digitize(angles, bins, right=False)
#Preenchendo as bins
descriptor = [np.sum(magnitude_mat[angle_bins == i]) for i in range(9)]
#Evitando divisão por 0
norm = np.linalg.norm(descriptor)
return descriptor / norm if norm != 0 else descriptor
#Função auxiliar que calcula cada descritor e os retorna já concatenados
def compute_descriptor(image, b):
dc = normalized_histogram_descriptor(image, b)
dt = haralick_texture_descriptor(image, b)
dg = oriented_gradients_histogram(image)
return np.concatenate((dc, dt, dg))
#Função que faz a procura de um objeto na imagem à partir dos seus descritores
def find_object(image, b, object_descriptor):
#Fazendo o pré-processamento
quantized_graylevel_image = quantize(luminance_preprocessing(image), b)
#Calculando a quantidade de janelas 32x32 que cabem na imagem
window_coords = ((quantized_graylevel_image.shape[0] // 16)- 1, (quantized_graylevel_image.shape[1] // 16 )- 1) #Nessa conta, a última janela 32x32 seria metade fora do vetor, portanto é ignorada
#Cada passo pula 16 posições, para a próxima janela 32x32
window_strides = (quantized_graylevel_image.strides[0] * 16, quantized_graylevel_image.strides[1] * 16)
#Esse vetor, de 4 dimensões, computa automaticamente todas as janelas 32x32 para a imagem
#windows[x, y] é a matriz 32x32 correspondente à janela y da linha x
windows = stride_tricks.as_strided(
quantized_graylevel_image,
shape=(*window_coords, 32, 32),
#strides[0, 1] faz o cálculo dos pulos para cada janela. strides[2, 3] faz o cálculo do pixel dentro da janela
strides=(*window_strides, *quantized_graylevel_image.strides),
writeable=False
)
distances = np.zeros(window_coords)
for i in range(window_coords[0]):
for j in range(window_coords[1]):
ld = compute_descriptor(windows[i, j], b)
distances[i, j] = np.sqrt(
np.sum(
np.square(object_descriptor - ld)
)
)
coords = np.where(distances == distances.min()) #Distância mínima é onde assumimos que o programa encontrou o objeto
return (coords[0][0], coords[1][0])
def main(opt):
#Lendo entrada do programa
f = input().rstrip()
g = input().rstrip()
b = int(input().rstrip())
#Computando descritor do objeto d
object_image = np.asarray(io.imread(f))
quantized_graylevel_object = quantize(luminance_preprocessing(object_image), b)
d = compute_descriptor(quantized_graylevel_object, b)
#Fazendo a busca baseado no descritor d obtido
large_image = np.asarray(io.imread(g))
i, j = find_object(large_image, b, d)
print(i, j) #Impressão na tela dos índices da janela
#Passando arg 1 na linha de comando, faz a impressão da imagem e o resultado da busca
if opt:
import matplotlib.pyplot as plt
import matplotlib.patches as patches
fig, ax = plt.subplots()
ax . imshow ( large_image )
rect = patches.Rectangle((j * 16 , i * 16 ) , 32 , 32 ,
linewidth =1, edgecolor='r' , facecolor='none')
ax.add_patch ( rect )
plt.show ()
if __name__ == '__main__':
#Conveniência para correção.
opt = False
if len(sys.argv) == 2:
opt = True
main(opt) | random_line_split | |
trab05.py | # Nomes: Eduardo de Sousa Siqueira nUSP: 9278299
# Igor Barbosa Grécia Lúcio nUSP: 9778821
# SCC0251 - Image Processing 1 semestre 2021
# Trabalho 05. Image Descriptors
import numpy as np
import imageio as io
from numpy.lib import stride_tricks
import sys
def luminance_preprocessing(image):
red_channel = image[:, :, 0]
green_channel = image[:, :, 1]
blue_channel = image[:, :, 2]
new_image = np.floor(
(0.299 * red_channel) +
(0.587 * green_channel) +
(0.114 * blue_channel)
)
return new_image.astype(image.dtype)
def quantize(image, bits):
return image >> (8-bits)
#Função que constrói o descritor do histograma de cores normalizado
def normalized_histogram_descriptor(image, bits):
hist, _ = np.histogram(image, bins=2 ** bits)
norm_hist = hist / np.sum(hist)
return norm_hist / np.linalg.norm(norm_hist)
#Função que constrói a matriz de co-ocorrências de intensidade diagonalmente conectadas
def diag_coocurrence_mat(image, b):
mat_size = 2 ** b #O tamanho da matriz é igual à quantidade de intensidades diferentes da imagem
mat = np.zeros((mat_size, mat_size), dtype=int)
#Essa será a lista de todas as co-ocorrências diagonais da imagem
#Para cada pixel x, y, co_occurrences[x, y] será um array de 2 posições, onde
#co_occurrences[x, y, 0] é o valor do pixel e co_occurrences[x, y, 1] é o pixel na diagonal direita
co_occurrences = stride_tricks.as_strided(
x=image,
shape=(image.shape[0]-1, image.shape[1]-1, 2), #Não consideramos as últimas linha e coluna
strides=(*image.strides, image.strides[0] + image.strides[1]), #Os primeiros passos são iguais o da imagem, o terceiro valor guia ao pixel na diagonal (+1 linha +1 coluna)
writeable=False #Evita escritas de memória, pois essa não é uma função segura
)
#Para cada valor de intensidade, contamos todas as suas co_ocorrências usando numpy fancy indexing, e preenchemos essa linha da matriz
for intensity in np.unique(image):
counts, _ = np.histogram(co_occurrences[co_occurrences[:, :, 0] == intensity, 1], bins=mat_size)
mat[intensity] = counts
return mat
#Função que constrói o descritor a partir das 5 métricas da matriz de co-ocorrências
def haralick_texture_descriptor(image, b):
c = diag_coocurrence_mat(image, b)
c = c / np.sum(c) #Normalizando a matriz
descriptors = []
#Energy
descriptors.append(
np.sum(np.square(c))
)
#Entropy
descriptors.append(
np.multiply(
np.sum(c * np.log10(c + 0.001)),
-1 #Multiplica por -1, conforme descrito
)
)
#Contrast
#cálculo de (i - j)²
ii, jj = np.indices(c.shape)
factors = np.square(ii - jj)
#cálculo do contraste
descriptors.append(
np.sum(
c * factors
) / c.size
) #c.size == N² == número de elementos da matriz de co-ocorrencias
#Correlation
#Para o cálculo vetorizado, computamos todos os valores separadamente para cada linha e coluna
#O resultado final será uma matriz, em que cada elemento (i, j) é o valor da correlação para esse pixel
#Calculando somas parciais das linhas e colunas, transformando-as em vetores linha e coluna para possibilitar
#o broadcasting
sum_rows = np.sum(c, axis=1)[np.newaxis, :] #Transforma em um vetor linha de 1 dimensão
sum_cols = np.sum(c, axis=0)[:, np.newaxis] #Transforma em um vetor coluna
#Cálculo das médias direcionais. Será um vetor em que cada valor dir_mean[x] corresponde à média direcional da linha/coluna x
dir_mean_i = np.sum(sum_rows * ii, axis=1, keepdims=True)
dir_mean_j = np.sum(sum_cols * jj, axis=0, keepdims=True)
#Cálculo dos desvios padrões, equivalente ao cálculo anterior
std_dev_i = np.sum(np.square(ii - dir_mean_i) * sum_rows, axis=1, keepdims=True)
std_dev_j = np.sum(np.square(jj - dir_mean_j) * sum_cols, axis=0, keepdims=True)
#Inicializamos a matriz de correlações com zeros, para os casos em que os desvios padrões são 0
corr = np.zeros(c.shape, dtype=np.double)
#Cálculo vetorizado da correlação. Por causa do broadcasting de numpy e as conversões anteriores para vetores linha e coluna,
#a multiplicação de dir_mean_i e dir_mean_j resulta em uma matriz de tamanho igual ao da matriz de co-ocorrências, onde o valor [i, j]
#é igual à multiplicação de dir_mean_i[i] * dir_mean_j[j]. A multiplicação dos desvios ocorre de maneira equivalente
corr = np.divide(
(ii * jj * c) - (dir_mean_i * dir_mean_j),
(std_dev_i * std_dev_j),
out=corr,
where=np.logical_and(std_dev_i != 0, std_dev_j != 0) #O cálculo é feito apenas nas posições em que os desvios são acima de 0
)
#Fazendo a soma dos elementos da matriz anterior, obtemos o valor de correlação geral
descriptors.append(np.sum(corr))
#Homogeneity
descriptors.append(
np.sum(
c / (1 + np.abs(ii - jj))
)
)
#Evitando divisão por 0
norm = np.linalg.norm(descriptors)
return descriptors / norm if norm != 0 else descriptors
#Taken from: https://gist.github.com/arifqodari/dfd734cf61b0a4d01fde48f0024d1dc9
#Caso run.codes não aceite scipy convolve
def strided_convolution(image, weight, stride):
im_h, im_w = image.shape
f_h, f_w = weight.shape
out_shape = (1 + (im_h - f_h) // stride, 1 + (im_w - f_w) // stride, f_h, f_w)
out_strides = (image.strides[0] * stride, image.strides[1] * stride, image.strides[0], image.strides[1])
windows = stride_tricks.as_strided(image, shape=out_shape, strides=out_strides)
return np.tensordot(windows, weight, axes=((2, 3), (0, 1)))
#Função que faz o cálculo do descritor de histograma dos gradientes orientados
def oriented_gradients_histogram(image):
sobel_x = np.array([
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]
])
sobel_y = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]
])
#Try-except para o run.codes caso scipy esteja quebrada
try:
from scipy import ndimage
grad_x = ndimage.convolve(image.astype(np.double), sobel_x)
grad_y = ndimage.convolve(image.astype(np.double), sobel_y)
except ImportError:
grad_x = strided_convolution(image.astype(np.double), sobel_x, 1)
grad_y = strided_convolution(image.astype(np.double), sobel_x, 1)
#Cálculo da matriz de magnitude
magnitude_num = np.sqrt(np.square(grad_x) + np.square(grad_y))
magnitude_mat = magnitude_num / np.sum(magnitude_num)
#Ignora os erros de divisão por 0
np.seterr(divide='ignore', invalid='ignore')
#Algumas divisões de 0/0 resultam em NaN, mas isso é tratado automaticamente pela função np.digitize
angles = np.arctan(grad_y / grad_x)
#Fazendo as conversões
angles = angles + (np.pi / 2)
angles = np.degrees(angles)
#Construindo as bins
bins = np.arange(0, 180, 20)
angle_bins = np.digitize(angles, bins, right=False)
#Preenchendo as bins
descriptor = [np.sum(magnitude_mat[angle_bins == i]) for i in range(9)]
#Evitando divisão por 0
norm = np.linalg.norm(descriptor)
return descriptor / norm if norm != 0 else descriptor
#Função auxiliar que calcula cada descritor e os retorna já concatenados
def compute_descriptor(image, b):
dc = normalized_histogram_descriptor(image, b)
dt = haralick_texture_descriptor(image, b)
dg = oriented_gradients_histogram(image)
return np.concatenate((dc, dt, dg))
#Função que faz a procura de um objeto na imagem à partir dos seus descritores
def find_object(image, b, object_descriptor):
#Fazendo o pré-processamento
quantized_graylevel_image = quantize(luminance_preprocessing(image), b)
#Calculando a quantidade de janelas 32x32 que cabem na imagem
window_coords = ((quantized_graylevel_image.shape[0] // 16)- 1, (quantized_graylevel_image.shape[1] // 16 )- 1) #Nessa conta, a última janela 32x32 seria metade fora do vetor, portanto é ignorada
#Cada passo pula 16 posições, para a próxima janela 32x32
window_strides = (quantized_graylevel_image.strides[0] * 16, quantized_graylevel_image.strides[1] * 16)
#Esse vetor, de 4 dimensões, computa automaticamente todas as janelas 32x32 para a imagem
#windows[x, y] é a matriz 32x32 correspondente à janela y da linha x
windows = stride_tricks.as_strided(
quantized_graylevel_image,
shape=(*window_coords, 32, 32),
#strides[0, 1] faz o cálculo dos pulos para cada janela. strides[2, 3] faz o cálculo do pixel dentro da janela
strides=(*window_strides, *quantized_graylevel_image.strides),
writeable=False
)
distances = np.zeros(window_coords)
for i in range(window_coords[0]):
for j in range(window_coords[1]):
ld = compute_descriptor(windows[i, j], b)
dista | ncontrou o objeto
return (coords[0][0], coords[1][0])
def main(opt):
#Lendo entrada do programa
f = input().rstrip()
g = input().rstrip()
b = int(input().rstrip())
#Computando descritor do objeto d
object_image = np.asarray(io.imread(f))
quantized_graylevel_object = quantize(luminance_preprocessing(object_image), b)
d = compute_descriptor(quantized_graylevel_object, b)
#Fazendo a busca baseado no descritor d obtido
large_image = np.asarray(io.imread(g))
i, j = find_object(large_image, b, d)
print(i, j) #Impressão na tela dos índices da janela
#Passando arg 1 na linha de comando, faz a impressão da imagem e o resultado da busca
if opt:
import matplotlib.pyplot as plt
import matplotlib.patches as patches
fig, ax = plt.subplots()
ax . imshow ( large_image )
rect = patches.Rectangle((j * 16 , i * 16 ) , 32 , 32 ,
linewidth =1, edgecolor='r' , facecolor='none')
ax.add_patch ( rect )
plt.show ()
if __name__ == '__main__':
#Conveniência para correção.
opt = False
if len(sys.argv) == 2:
opt = True
main(opt)
| nces[i, j] = np.sqrt(
np.sum(
np.square(object_descriptor - ld)
)
)
coords = np.where(distances == distances.min()) #Distância mínima é onde assumimos que o programa e | conditional_block |
trab05.py | # Nomes: Eduardo de Sousa Siqueira nUSP: 9278299
# Igor Barbosa Grécia Lúcio nUSP: 9778821
# SCC0251 - Image Processing 1 semestre 2021
# Trabalho 05. Image Descriptors
import numpy as np
import imageio as io
from numpy.lib import stride_tricks
import sys
def luminance_preprocessing(image):
red_channel = image[:, :, 0]
green_channel = image[:, :, 1]
blue_channel = image[:, :, 2]
new_image = np.floor(
(0.299 * red_channel) +
(0.587 * green_channel) +
(0.114 * blue_channel)
)
return new_image.astype(image.dtype)
def qu | mage, bits):
return image >> (8-bits)
#Função que constrói o descritor do histograma de cores normalizado
def normalized_histogram_descriptor(image, bits):
hist, _ = np.histogram(image, bins=2 ** bits)
norm_hist = hist / np.sum(hist)
return norm_hist / np.linalg.norm(norm_hist)
#Função que constrói a matriz de co-ocorrências de intensidade diagonalmente conectadas
def diag_coocurrence_mat(image, b):
mat_size = 2 ** b #O tamanho da matriz é igual à quantidade de intensidades diferentes da imagem
mat = np.zeros((mat_size, mat_size), dtype=int)
#Essa será a lista de todas as co-ocorrências diagonais da imagem
#Para cada pixel x, y, co_occurrences[x, y] será um array de 2 posições, onde
#co_occurrences[x, y, 0] é o valor do pixel e co_occurrences[x, y, 1] é o pixel na diagonal direita
co_occurrences = stride_tricks.as_strided(
x=image,
shape=(image.shape[0]-1, image.shape[1]-1, 2), #Não consideramos as últimas linha e coluna
strides=(*image.strides, image.strides[0] + image.strides[1]), #Os primeiros passos são iguais o da imagem, o terceiro valor guia ao pixel na diagonal (+1 linha +1 coluna)
writeable=False #Evita escritas de memória, pois essa não é uma função segura
)
#Para cada valor de intensidade, contamos todas as suas co_ocorrências usando numpy fancy indexing, e preenchemos essa linha da matriz
for intensity in np.unique(image):
counts, _ = np.histogram(co_occurrences[co_occurrences[:, :, 0] == intensity, 1], bins=mat_size)
mat[intensity] = counts
return mat
#Função que constrói o descritor a partir das 5 métricas da matriz de co-ocorrências
def haralick_texture_descriptor(image, b):
c = diag_coocurrence_mat(image, b)
c = c / np.sum(c) #Normalizando a matriz
descriptors = []
#Energy
descriptors.append(
np.sum(np.square(c))
)
#Entropy
descriptors.append(
np.multiply(
np.sum(c * np.log10(c + 0.001)),
-1 #Multiplica por -1, conforme descrito
)
)
#Contrast
#cálculo de (i - j)²
ii, jj = np.indices(c.shape)
factors = np.square(ii - jj)
#cálculo do contraste
descriptors.append(
np.sum(
c * factors
) / c.size
) #c.size == N² == número de elementos da matriz de co-ocorrencias
#Correlation
#Para o cálculo vetorizado, computamos todos os valores separadamente para cada linha e coluna
#O resultado final será uma matriz, em que cada elemento (i, j) é o valor da correlação para esse pixel
#Calculando somas parciais das linhas e colunas, transformando-as em vetores linha e coluna para possibilitar
#o broadcasting
sum_rows = np.sum(c, axis=1)[np.newaxis, :] #Transforma em um vetor linha de 1 dimensão
sum_cols = np.sum(c, axis=0)[:, np.newaxis] #Transforma em um vetor coluna
#Cálculo das médias direcionais. Será um vetor em que cada valor dir_mean[x] corresponde à média direcional da linha/coluna x
dir_mean_i = np.sum(sum_rows * ii, axis=1, keepdims=True)
dir_mean_j = np.sum(sum_cols * jj, axis=0, keepdims=True)
#Cálculo dos desvios padrões, equivalente ao cálculo anterior
std_dev_i = np.sum(np.square(ii - dir_mean_i) * sum_rows, axis=1, keepdims=True)
std_dev_j = np.sum(np.square(jj - dir_mean_j) * sum_cols, axis=0, keepdims=True)
#Inicializamos a matriz de correlações com zeros, para os casos em que os desvios padrões são 0
corr = np.zeros(c.shape, dtype=np.double)
#Cálculo vetorizado da correlação. Por causa do broadcasting de numpy e as conversões anteriores para vetores linha e coluna,
#a multiplicação de dir_mean_i e dir_mean_j resulta em uma matriz de tamanho igual ao da matriz de co-ocorrências, onde o valor [i, j]
#é igual à multiplicação de dir_mean_i[i] * dir_mean_j[j]. A multiplicação dos desvios ocorre de maneira equivalente
corr = np.divide(
(ii * jj * c) - (dir_mean_i * dir_mean_j),
(std_dev_i * std_dev_j),
out=corr,
where=np.logical_and(std_dev_i != 0, std_dev_j != 0) #O cálculo é feito apenas nas posições em que os desvios são acima de 0
)
#Fazendo a soma dos elementos da matriz anterior, obtemos o valor de correlação geral
descriptors.append(np.sum(corr))
#Homogeneity
descriptors.append(
np.sum(
c / (1 + np.abs(ii - jj))
)
)
#Evitando divisão por 0
norm = np.linalg.norm(descriptors)
return descriptors / norm if norm != 0 else descriptors
#Taken from: https://gist.github.com/arifqodari/dfd734cf61b0a4d01fde48f0024d1dc9
#Caso run.codes não aceite scipy convolve
def strided_convolution(image, weight, stride):
im_h, im_w = image.shape
f_h, f_w = weight.shape
out_shape = (1 + (im_h - f_h) // stride, 1 + (im_w - f_w) // stride, f_h, f_w)
out_strides = (image.strides[0] * stride, image.strides[1] * stride, image.strides[0], image.strides[1])
windows = stride_tricks.as_strided(image, shape=out_shape, strides=out_strides)
return np.tensordot(windows, weight, axes=((2, 3), (0, 1)))
#Função que faz o cálculo do descritor de histograma dos gradientes orientados
def oriented_gradients_histogram(image):
sobel_x = np.array([
[-1, -2, -1],
[0, 0, 0],
[1, 2, 1]
])
sobel_y = np.array([
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]
])
#Try-except para o run.codes caso scipy esteja quebrada
try:
from scipy import ndimage
grad_x = ndimage.convolve(image.astype(np.double), sobel_x)
grad_y = ndimage.convolve(image.astype(np.double), sobel_y)
except ImportError:
grad_x = strided_convolution(image.astype(np.double), sobel_x, 1)
grad_y = strided_convolution(image.astype(np.double), sobel_x, 1)
#Cálculo da matriz de magnitude
magnitude_num = np.sqrt(np.square(grad_x) + np.square(grad_y))
magnitude_mat = magnitude_num / np.sum(magnitude_num)
#Ignora os erros de divisão por 0
np.seterr(divide='ignore', invalid='ignore')
#Algumas divisões de 0/0 resultam em NaN, mas isso é tratado automaticamente pela função np.digitize
angles = np.arctan(grad_y / grad_x)
#Fazendo as conversões
angles = angles + (np.pi / 2)
angles = np.degrees(angles)
#Construindo as bins
bins = np.arange(0, 180, 20)
angle_bins = np.digitize(angles, bins, right=False)
#Preenchendo as bins
descriptor = [np.sum(magnitude_mat[angle_bins == i]) for i in range(9)]
#Evitando divisão por 0
norm = np.linalg.norm(descriptor)
return descriptor / norm if norm != 0 else descriptor
#Função auxiliar que calcula cada descritor e os retorna já concatenados
def compute_descriptor(image, b):
dc = normalized_histogram_descriptor(image, b)
dt = haralick_texture_descriptor(image, b)
dg = oriented_gradients_histogram(image)
return np.concatenate((dc, dt, dg))
#Função que faz a procura de um objeto na imagem à partir dos seus descritores
def find_object(image, b, object_descriptor):
#Fazendo o pré-processamento
quantized_graylevel_image = quantize(luminance_preprocessing(image), b)
#Calculando a quantidade de janelas 32x32 que cabem na imagem
window_coords = ((quantized_graylevel_image.shape[0] // 16)- 1, (quantized_graylevel_image.shape[1] // 16 )- 1) #Nessa conta, a última janela 32x32 seria metade fora do vetor, portanto é ignorada
#Cada passo pula 16 posições, para a próxima janela 32x32
window_strides = (quantized_graylevel_image.strides[0] * 16, quantized_graylevel_image.strides[1] * 16)
#Esse vetor, de 4 dimensões, computa automaticamente todas as janelas 32x32 para a imagem
#windows[x, y] é a matriz 32x32 correspondente à janela y da linha x
windows = stride_tricks.as_strided(
quantized_graylevel_image,
shape=(*window_coords, 32, 32),
#strides[0, 1] faz o cálculo dos pulos para cada janela. strides[2, 3] faz o cálculo do pixel dentro da janela
strides=(*window_strides, *quantized_graylevel_image.strides),
writeable=False
)
distances = np.zeros(window_coords)
for i in range(window_coords[0]):
for j in range(window_coords[1]):
ld = compute_descriptor(windows[i, j], b)
distances[i, j] = np.sqrt(
np.sum(
np.square(object_descriptor - ld)
)
)
coords = np.where(distances == distances.min()) #Distância mínima é onde assumimos que o programa encontrou o objeto
return (coords[0][0], coords[1][0])
def main(opt):
#Lendo entrada do programa
f = input().rstrip()
g = input().rstrip()
b = int(input().rstrip())
#Computando descritor do objeto d
object_image = np.asarray(io.imread(f))
quantized_graylevel_object = quantize(luminance_preprocessing(object_image), b)
d = compute_descriptor(quantized_graylevel_object, b)
#Fazendo a busca baseado no descritor d obtido
large_image = np.asarray(io.imread(g))
i, j = find_object(large_image, b, d)
print(i, j) #Impressão na tela dos índices da janela
#Passando arg 1 na linha de comando, faz a impressão da imagem e o resultado da busca
if opt:
import matplotlib.pyplot as plt
import matplotlib.patches as patches
fig, ax = plt.subplots()
ax . imshow ( large_image )
rect = patches.Rectangle((j * 16 , i * 16 ) , 32 , 32 ,
linewidth =1, edgecolor='r' , facecolor='none')
ax.add_patch ( rect )
plt.show ()
if __name__ == '__main__':
#Conveniência para correção.
opt = False
if len(sys.argv) == 2:
opt = True
main(opt)
| antize(i | identifier_name |
game.rs | #[cfg(feature = "hot-reload")]
use crate::assets::HotReloader;
use crate::config::AudioConfig;
use crate::core::audio::AudioSystem;
use crate::core::camera::{Camera, ProjectionMatrix};
use crate::core::input::{Input, InputAction};
use crate::core::random::{RandomGenerator, Seed};
use crate::core::scene::{Scene, SceneStack};
use crate::core::transform::update_transforms;
use crate::core::window::WindowDim;
use crate::event::GameEvent;
use crate::gameplay::collision::CollisionWorld;
use crate::gameplay::delete::GarbageCollector;
use crate::render::path::debug::DebugQueue;
use crate::render::ui::gui::GuiContext;
use crate::render::Renderer;
use crate::resources::Resources;
use crate::{HEIGHT, WIDTH};
use glfw::{Context, Key, MouseButton, WindowEvent};
use log::info;
use luminance_glfw::GlfwSurface;
use shrev::{EventChannel, ReaderId};
use std::any::Any;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::thread;
use std::time::{Duration, Instant};
/// GameBuilder is used to create a new game. Game struct has a lot of members that do not need to be
/// exposed so gamebuilder provides a simpler way to get started.
pub struct GameBuilder<'a, A>
where
A: InputAction,
{
surface: &'a mut GlfwSurface,
scene: Option<Box<dyn Scene<WindowEvent>>>,
resources: Resources,
phantom: PhantomData<A>,
seed: Option<Seed>,
input_config: Option<(HashMap<Key, A>, HashMap<MouseButton, A>)>,
gui_context: GuiContext,
audio_config: AudioConfig,
}
impl<'a, A> GameBuilder<'a, A>
where
A: InputAction + 'static,
{
pub fn new(surface: &'a mut GlfwSurface) -> Self {
// resources will need at least an event channel and an input
let mut resources = Resources::default();
let chan: EventChannel<GameEvent> = EventChannel::new();
resources.insert(chan);
// and some asset manager;
crate::assets::create_asset_managers(surface, &mut resources);
// the proj matrix.
resources.insert(ProjectionMatrix::new(WIDTH as f32, HEIGHT as f32));
resources.insert(WindowDim::new(WIDTH, HEIGHT));
resources.insert(CollisionWorld::default());
resources.insert(DebugQueue::default());
Self {
gui_context: GuiContext::new(WindowDim::new(WIDTH, HEIGHT)),
surface,
scene: None,
resources,
input_config: None,
phantom: PhantomData::default(),
seed: None,
audio_config: AudioConfig::default(),
}
}
/// Set up the first scene.
pub fn for_scene(mut self, scene: Box<dyn Scene<WindowEvent>>) -> Self {
self.scene = Some(scene);
self
}
pub fn with_input_config(
mut self,
key_map: HashMap<Key, A>,
btn_map: HashMap<MouseButton, A>,
) -> Self {
self.input_config = Some((key_map, btn_map));
self
}
/// Specific config for audio
pub fn with_audio_config(mut self, audio_config: AudioConfig) -> Self {
self.audio_config = audio_config;
self
}
/// Add custom resources.
pub fn with_resource<T: Any>(mut self, r: T) -> Self {
self.resources.insert(r);
self
}
pub fn with_seed(mut self, seed: Seed) -> Self {
self.seed = Some(seed);
self
}
pub fn | (mut self) -> Game<'a, A> {
let renderer = Renderer::new(self.surface, &self.gui_context);
// Need some input :D
let input: Input<A> = {
let (key_mapping, btn_mapping) = self
.input_config
.unwrap_or((A::get_default_key_mapping(), A::get_default_mouse_mapping()));
Input::new(key_mapping, btn_mapping)
};
self.resources.insert(input);
let mut world = hecs::World::new();
// if a seed is provided, let's add it to the resources.
if let Some(seed) = self.seed {
self.resources.insert(RandomGenerator::new(seed));
} else {
self.resources.insert(RandomGenerator::from_entropy());
}
let scene_stack = {
let mut scenes = SceneStack::default();
if let Some(scene) = self.scene {
scenes.push(scene, &mut world, &mut self.resources);
}
scenes
};
let rdr_id = {
let mut chan = self
.resources
.fetch_mut::<EventChannel<GameEvent>>()
.unwrap();
chan.register_reader()
};
let garbage_collector = GarbageCollector::new(&mut self.resources);
// we need a camera :)
world.spawn((Camera::new(),));
info!("Finished building game");
// audio system.
let audio_system = AudioSystem::new(&self.resources, self.audio_config)
.expect("Cannot create audio system");
Game {
surface: self.surface,
renderer,
scene_stack,
world,
audio_system,
resources: self.resources,
rdr_id,
garbage_collector,
phantom: self.phantom,
gui_context: self.gui_context,
#[cfg(feature = "hot-reload")]
hot_reloader: HotReloader::new(),
}
}
}
/// Struct that holds the game state and systems.
///
/// # Lifetime requirement:
/// The opengl context is held in GlfwSurface. This is a mutable reference here as we do not want the
/// context to be dropped at the same time as the systems. If it is dropped before, then releasing GPU
/// resources will throw a segfault.
///
/// # Generic parameters:
/// - A: Action that is derived from the inputs. (e.g. Move Left)
///
pub struct Game<'a, A> {
/// for drawing stuff
surface: &'a mut GlfwSurface,
renderer: Renderer<GlfwSurface>,
/// All the scenes. Current scene will be used in the main loop.
scene_stack: SceneStack<WindowEvent>,
/// Play music and sound effects
audio_system: AudioSystem,
/// Resources (assets, inputs...)
resources: Resources,
/// Current entities.
world: hecs::World,
/// Read events from the systems
rdr_id: ReaderId<GameEvent>,
/// Clean up the dead entities.
garbage_collector: GarbageCollector,
gui_context: GuiContext,
phantom: PhantomData<A>,
#[cfg(feature = "hot-reload")]
hot_reloader: HotReloader<GlfwSurface>,
}
impl<'a, A> Game<'a, A>
where
A: InputAction + 'static,
{
/// Run the game. This is the main loop.
pub fn run(&mut self) {
let mut current_time = Instant::now();
let dt = Duration::from_millis(16);
let mut back_buffer = self.surface.back_buffer().unwrap();
'app: loop {
// 1. Poll the events and update the Input resource
// ------------------------------------------------
let mut resize = false;
self.surface.window.glfw.poll_events();
{
let mut input = self.resources.fetch_mut::<Input<A>>().unwrap();
input.prepare();
self.gui_context.reset_inputs();
for (_, event) in self.surface.events_rx.try_iter() {
match event {
WindowEvent::Close => break 'app,
WindowEvent::FramebufferSize(_, _) => resize = true,
ev => {
self.gui_context.process_event(ev.clone());
if let Some(scene) = self.scene_stack.current_mut() {
scene.process_input(&mut self.world, ev.clone(), &self.resources);
}
input.process_event(ev)
}
}
}
}
// 2. Update the scene.
// ------------------------------------------------
let scene_result = if let Some(scene) = self.scene_stack.current_mut() {
let scene_res = scene.update(dt, &mut self.world, &self.resources);
{
let chan = self.resources.fetch::<EventChannel<GameEvent>>().unwrap();
for ev in chan.read(&mut self.rdr_id) {
scene.process_event(&mut self.world, ev.clone(), &self.resources);
}
}
let maybe_gui =
scene.prepare_gui(dt, &mut self.world, &self.resources, &mut self.gui_context);
self.renderer.prepare_ui(
self.surface,
maybe_gui,
&self.resources,
&mut *self.gui_context.fonts.borrow_mut(),
);
Some(scene_res)
} else {
None
};
// Update children transforms:
// -----------------------------
update_transforms(&mut self.world);
// 3. Clean up dead entities.
// ------------------------------------------------
self.garbage_collector
.collect(&mut self.world, &self.resources);
// 4. Render to screen
// ------------------------------------------------
log::debug!("RENDER");
self.renderer
.update(self.surface, &self.world, dt, &self.resources);
if resize {
back_buffer = self.surface.back_buffer().unwrap();
let new_size = back_buffer.size();
let mut proj = self.resources.fetch_mut::<ProjectionMatrix>().unwrap();
proj.resize(new_size[0] as f32, new_size[1] as f32);
let mut dim = self.resources.fetch_mut::<WindowDim>().unwrap();
dim.resize(new_size[0], new_size[1]);
self.gui_context.window_dim = *dim;
}
let render =
self.renderer
.render(self.surface, &mut back_buffer, &self.world, &self.resources);
if render.is_ok() {
self.surface.window.swap_buffers();
} else {
break 'app;
}
// Play music :)
self.audio_system.process(&self.resources);
// Update collision world for collision queries.
{
let mut collisions = self.resources.fetch_mut::<CollisionWorld>().unwrap();
collisions.synchronize(&self.world);
}
// Either clean up or load new resources.
crate::assets::update_asset_managers(self.surface, &self.resources);
#[cfg(feature = "hot-reload")]
self.hot_reloader.update(&self.resources);
// Now, if need to switch scenes, do it.
if let Some(res) = scene_result {
self.scene_stack
.apply_result(res, &mut self.world, &mut self.resources);
}
let now = Instant::now();
let frame_duration = now - current_time;
if frame_duration < dt {
thread::sleep(dt - frame_duration);
}
current_time = now;
}
info!("Bye bye.");
}
}
| build | identifier_name |
game.rs | #[cfg(feature = "hot-reload")]
use crate::assets::HotReloader;
use crate::config::AudioConfig;
use crate::core::audio::AudioSystem;
use crate::core::camera::{Camera, ProjectionMatrix};
use crate::core::input::{Input, InputAction};
use crate::core::random::{RandomGenerator, Seed};
use crate::core::scene::{Scene, SceneStack};
use crate::core::transform::update_transforms;
use crate::core::window::WindowDim;
use crate::event::GameEvent;
use crate::gameplay::collision::CollisionWorld;
use crate::gameplay::delete::GarbageCollector;
use crate::render::path::debug::DebugQueue;
use crate::render::ui::gui::GuiContext;
use crate::render::Renderer;
use crate::resources::Resources;
use crate::{HEIGHT, WIDTH};
use glfw::{Context, Key, MouseButton, WindowEvent};
use log::info;
use luminance_glfw::GlfwSurface;
use shrev::{EventChannel, ReaderId};
use std::any::Any;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::thread;
use std::time::{Duration, Instant};
/// GameBuilder is used to create a new game. Game struct has a lot of members that do not need to be
/// exposed so gamebuilder provides a simpler way to get started.
pub struct GameBuilder<'a, A>
where
A: InputAction,
{
surface: &'a mut GlfwSurface,
scene: Option<Box<dyn Scene<WindowEvent>>>,
resources: Resources,
phantom: PhantomData<A>,
seed: Option<Seed>,
input_config: Option<(HashMap<Key, A>, HashMap<MouseButton, A>)>,
gui_context: GuiContext,
audio_config: AudioConfig,
}
impl<'a, A> GameBuilder<'a, A>
where
A: InputAction + 'static,
{
pub fn new(surface: &'a mut GlfwSurface) -> Self {
// resources will need at least an event channel and an input
let mut resources = Resources::default();
let chan: EventChannel<GameEvent> = EventChannel::new();
resources.insert(chan);
// and some asset manager;
crate::assets::create_asset_managers(surface, &mut resources);
// the proj matrix.
resources.insert(ProjectionMatrix::new(WIDTH as f32, HEIGHT as f32));
resources.insert(WindowDim::new(WIDTH, HEIGHT));
resources.insert(CollisionWorld::default());
resources.insert(DebugQueue::default());
Self {
gui_context: GuiContext::new(WindowDim::new(WIDTH, HEIGHT)),
surface,
scene: None,
resources,
input_config: None,
phantom: PhantomData::default(),
seed: None,
audio_config: AudioConfig::default(),
}
}
/// Set up the first scene.
pub fn for_scene(mut self, scene: Box<dyn Scene<WindowEvent>>) -> Self {
self.scene = Some(scene);
self
}
pub fn with_input_config(
mut self,
key_map: HashMap<Key, A>,
btn_map: HashMap<MouseButton, A>,
) -> Self {
self.input_config = Some((key_map, btn_map));
self
}
/// Specific config for audio
pub fn with_audio_config(mut self, audio_config: AudioConfig) -> Self {
self.audio_config = audio_config;
self
}
/// Add custom resources.
pub fn with_resource<T: Any>(mut self, r: T) -> Self {
self.resources.insert(r);
self
}
pub fn with_seed(mut self, seed: Seed) -> Self {
self.seed = Some(seed);
self
}
pub fn build(mut self) -> Game<'a, A> {
let renderer = Renderer::new(self.surface, &self.gui_context);
// Need some input :D
let input: Input<A> = {
let (key_mapping, btn_mapping) = self
.input_config
.unwrap_or((A::get_default_key_mapping(), A::get_default_mouse_mapping()));
Input::new(key_mapping, btn_mapping)
};
self.resources.insert(input);
let mut world = hecs::World::new();
// if a seed is provided, let's add it to the resources.
if let Some(seed) = self.seed {
self.resources.insert(RandomGenerator::new(seed));
} else {
self.resources.insert(RandomGenerator::from_entropy());
}
let scene_stack = {
let mut scenes = SceneStack::default();
if let Some(scene) = self.scene {
scenes.push(scene, &mut world, &mut self.resources);
}
scenes
};
let rdr_id = {
let mut chan = self
.resources
.fetch_mut::<EventChannel<GameEvent>>()
.unwrap();
chan.register_reader()
};
let garbage_collector = GarbageCollector::new(&mut self.resources);
// we need a camera :)
world.spawn((Camera::new(),));
info!("Finished building game");
// audio system.
let audio_system = AudioSystem::new(&self.resources, self.audio_config)
.expect("Cannot create audio system");
Game {
surface: self.surface,
renderer,
scene_stack,
world,
audio_system,
resources: self.resources,
rdr_id,
garbage_collector,
phantom: self.phantom,
gui_context: self.gui_context,
#[cfg(feature = "hot-reload")]
hot_reloader: HotReloader::new(),
}
}
}
/// Struct that holds the game state and systems.
///
/// # Lifetime requirement:
/// The opengl context is held in GlfwSurface. This is a mutable reference here as we do not want the
/// context to be dropped at the same time as the systems. If it is dropped before, then releasing GPU
/// resources will throw a segfault.
///
/// # Generic parameters:
/// - A: Action that is derived from the inputs. (e.g. Move Left)
///
pub struct Game<'a, A> {
/// for drawing stuff
surface: &'a mut GlfwSurface,
renderer: Renderer<GlfwSurface>,
/// All the scenes. Current scene will be used in the main loop.
scene_stack: SceneStack<WindowEvent>,
/// Play music and sound effects
audio_system: AudioSystem,
/// Resources (assets, inputs...)
resources: Resources,
/// Current entities.
world: hecs::World,
/// Read events from the systems
rdr_id: ReaderId<GameEvent>,
/// Clean up the dead entities.
garbage_collector: GarbageCollector,
gui_context: GuiContext,
phantom: PhantomData<A>,
#[cfg(feature = "hot-reload")]
hot_reloader: HotReloader<GlfwSurface>,
}
impl<'a, A> Game<'a, A>
where
A: InputAction + 'static,
{
/// Run the game. This is the main loop.
pub fn run(&mut self) {
let mut current_time = Instant::now();
let dt = Duration::from_millis(16);
let mut back_buffer = self.surface.back_buffer().unwrap();
'app: loop {
// 1. Poll the events and update the Input resource
// ------------------------------------------------
let mut resize = false;
self.surface.window.glfw.poll_events();
{
let mut input = self.resources.fetch_mut::<Input<A>>().unwrap();
input.prepare();
self.gui_context.reset_inputs();
for (_, event) in self.surface.events_rx.try_iter() {
match event {
WindowEvent::Close => break 'app,
WindowEvent::FramebufferSize(_, _) => resize = true,
ev => {
self.gui_context.process_event(ev.clone());
if let Some(scene) = self.scene_stack.current_mut() {
scene.process_input(&mut self.world, ev.clone(), &self.resources);
}
input.process_event(ev)
}
}
}
}
// 2. Update the scene.
// ------------------------------------------------ | let scene_result = if let Some(scene) = self.scene_stack.current_mut() {
let scene_res = scene.update(dt, &mut self.world, &self.resources);
{
let chan = self.resources.fetch::<EventChannel<GameEvent>>().unwrap();
for ev in chan.read(&mut self.rdr_id) {
scene.process_event(&mut self.world, ev.clone(), &self.resources);
}
}
let maybe_gui =
scene.prepare_gui(dt, &mut self.world, &self.resources, &mut self.gui_context);
self.renderer.prepare_ui(
self.surface,
maybe_gui,
&self.resources,
&mut *self.gui_context.fonts.borrow_mut(),
);
Some(scene_res)
} else {
None
};
// Update children transforms:
// -----------------------------
update_transforms(&mut self.world);
// 3. Clean up dead entities.
// ------------------------------------------------
self.garbage_collector
.collect(&mut self.world, &self.resources);
// 4. Render to screen
// ------------------------------------------------
log::debug!("RENDER");
self.renderer
.update(self.surface, &self.world, dt, &self.resources);
if resize {
back_buffer = self.surface.back_buffer().unwrap();
let new_size = back_buffer.size();
let mut proj = self.resources.fetch_mut::<ProjectionMatrix>().unwrap();
proj.resize(new_size[0] as f32, new_size[1] as f32);
let mut dim = self.resources.fetch_mut::<WindowDim>().unwrap();
dim.resize(new_size[0], new_size[1]);
self.gui_context.window_dim = *dim;
}
let render =
self.renderer
.render(self.surface, &mut back_buffer, &self.world, &self.resources);
if render.is_ok() {
self.surface.window.swap_buffers();
} else {
break 'app;
}
// Play music :)
self.audio_system.process(&self.resources);
// Update collision world for collision queries.
{
let mut collisions = self.resources.fetch_mut::<CollisionWorld>().unwrap();
collisions.synchronize(&self.world);
}
// Either clean up or load new resources.
crate::assets::update_asset_managers(self.surface, &self.resources);
#[cfg(feature = "hot-reload")]
self.hot_reloader.update(&self.resources);
// Now, if need to switch scenes, do it.
if let Some(res) = scene_result {
self.scene_stack
.apply_result(res, &mut self.world, &mut self.resources);
}
let now = Instant::now();
let frame_duration = now - current_time;
if frame_duration < dt {
thread::sleep(dt - frame_duration);
}
current_time = now;
}
info!("Bye bye.");
}
} | random_line_split | |
game.rs | #[cfg(feature = "hot-reload")]
use crate::assets::HotReloader;
use crate::config::AudioConfig;
use crate::core::audio::AudioSystem;
use crate::core::camera::{Camera, ProjectionMatrix};
use crate::core::input::{Input, InputAction};
use crate::core::random::{RandomGenerator, Seed};
use crate::core::scene::{Scene, SceneStack};
use crate::core::transform::update_transforms;
use crate::core::window::WindowDim;
use crate::event::GameEvent;
use crate::gameplay::collision::CollisionWorld;
use crate::gameplay::delete::GarbageCollector;
use crate::render::path::debug::DebugQueue;
use crate::render::ui::gui::GuiContext;
use crate::render::Renderer;
use crate::resources::Resources;
use crate::{HEIGHT, WIDTH};
use glfw::{Context, Key, MouseButton, WindowEvent};
use log::info;
use luminance_glfw::GlfwSurface;
use shrev::{EventChannel, ReaderId};
use std::any::Any;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::thread;
use std::time::{Duration, Instant};
/// GameBuilder is used to create a new game. Game struct has a lot of members that do not need to be
/// exposed so gamebuilder provides a simpler way to get started.
pub struct GameBuilder<'a, A>
where
A: InputAction,
{
surface: &'a mut GlfwSurface,
scene: Option<Box<dyn Scene<WindowEvent>>>,
resources: Resources,
phantom: PhantomData<A>,
seed: Option<Seed>,
input_config: Option<(HashMap<Key, A>, HashMap<MouseButton, A>)>,
gui_context: GuiContext,
audio_config: AudioConfig,
}
impl<'a, A> GameBuilder<'a, A>
where
A: InputAction + 'static,
{
pub fn new(surface: &'a mut GlfwSurface) -> Self {
// resources will need at least an event channel and an input
let mut resources = Resources::default();
let chan: EventChannel<GameEvent> = EventChannel::new();
resources.insert(chan);
// and some asset manager;
crate::assets::create_asset_managers(surface, &mut resources);
// the proj matrix.
resources.insert(ProjectionMatrix::new(WIDTH as f32, HEIGHT as f32));
resources.insert(WindowDim::new(WIDTH, HEIGHT));
resources.insert(CollisionWorld::default());
resources.insert(DebugQueue::default());
Self {
gui_context: GuiContext::new(WindowDim::new(WIDTH, HEIGHT)),
surface,
scene: None,
resources,
input_config: None,
phantom: PhantomData::default(),
seed: None,
audio_config: AudioConfig::default(),
}
}
/// Set up the first scene.
pub fn for_scene(mut self, scene: Box<dyn Scene<WindowEvent>>) -> Self {
self.scene = Some(scene);
self
}
pub fn with_input_config(
mut self,
key_map: HashMap<Key, A>,
btn_map: HashMap<MouseButton, A>,
) -> Self {
self.input_config = Some((key_map, btn_map));
self
}
/// Specific config for audio
pub fn with_audio_config(mut self, audio_config: AudioConfig) -> Self {
self.audio_config = audio_config;
self
}
/// Add custom resources.
pub fn with_resource<T: Any>(mut self, r: T) -> Self {
self.resources.insert(r);
self
}
pub fn with_seed(mut self, seed: Seed) -> Self |
pub fn build(mut self) -> Game<'a, A> {
let renderer = Renderer::new(self.surface, &self.gui_context);
// Need some input :D
let input: Input<A> = {
let (key_mapping, btn_mapping) = self
.input_config
.unwrap_or((A::get_default_key_mapping(), A::get_default_mouse_mapping()));
Input::new(key_mapping, btn_mapping)
};
self.resources.insert(input);
let mut world = hecs::World::new();
// if a seed is provided, let's add it to the resources.
if let Some(seed) = self.seed {
self.resources.insert(RandomGenerator::new(seed));
} else {
self.resources.insert(RandomGenerator::from_entropy());
}
let scene_stack = {
let mut scenes = SceneStack::default();
if let Some(scene) = self.scene {
scenes.push(scene, &mut world, &mut self.resources);
}
scenes
};
let rdr_id = {
let mut chan = self
.resources
.fetch_mut::<EventChannel<GameEvent>>()
.unwrap();
chan.register_reader()
};
let garbage_collector = GarbageCollector::new(&mut self.resources);
// we need a camera :)
world.spawn((Camera::new(),));
info!("Finished building game");
// audio system.
let audio_system = AudioSystem::new(&self.resources, self.audio_config)
.expect("Cannot create audio system");
Game {
surface: self.surface,
renderer,
scene_stack,
world,
audio_system,
resources: self.resources,
rdr_id,
garbage_collector,
phantom: self.phantom,
gui_context: self.gui_context,
#[cfg(feature = "hot-reload")]
hot_reloader: HotReloader::new(),
}
}
}
/// Struct that holds the game state and systems.
///
/// # Lifetime requirement:
/// The opengl context is held in GlfwSurface. This is a mutable reference here as we do not want the
/// context to be dropped at the same time as the systems. If it is dropped before, then releasing GPU
/// resources will throw a segfault.
///
/// # Generic parameters:
/// - A: Action that is derived from the inputs. (e.g. Move Left)
///
pub struct Game<'a, A> {
/// for drawing stuff
surface: &'a mut GlfwSurface,
renderer: Renderer<GlfwSurface>,
/// All the scenes. Current scene will be used in the main loop.
scene_stack: SceneStack<WindowEvent>,
/// Play music and sound effects
audio_system: AudioSystem,
/// Resources (assets, inputs...)
resources: Resources,
/// Current entities.
world: hecs::World,
/// Read events from the systems
rdr_id: ReaderId<GameEvent>,
/// Clean up the dead entities.
garbage_collector: GarbageCollector,
gui_context: GuiContext,
phantom: PhantomData<A>,
#[cfg(feature = "hot-reload")]
hot_reloader: HotReloader<GlfwSurface>,
}
impl<'a, A> Game<'a, A>
where
A: InputAction + 'static,
{
/// Run the game. This is the main loop.
pub fn run(&mut self) {
let mut current_time = Instant::now();
let dt = Duration::from_millis(16);
let mut back_buffer = self.surface.back_buffer().unwrap();
'app: loop {
// 1. Poll the events and update the Input resource
// ------------------------------------------------
let mut resize = false;
self.surface.window.glfw.poll_events();
{
let mut input = self.resources.fetch_mut::<Input<A>>().unwrap();
input.prepare();
self.gui_context.reset_inputs();
for (_, event) in self.surface.events_rx.try_iter() {
match event {
WindowEvent::Close => break 'app,
WindowEvent::FramebufferSize(_, _) => resize = true,
ev => {
self.gui_context.process_event(ev.clone());
if let Some(scene) = self.scene_stack.current_mut() {
scene.process_input(&mut self.world, ev.clone(), &self.resources);
}
input.process_event(ev)
}
}
}
}
// 2. Update the scene.
// ------------------------------------------------
let scene_result = if let Some(scene) = self.scene_stack.current_mut() {
let scene_res = scene.update(dt, &mut self.world, &self.resources);
{
let chan = self.resources.fetch::<EventChannel<GameEvent>>().unwrap();
for ev in chan.read(&mut self.rdr_id) {
scene.process_event(&mut self.world, ev.clone(), &self.resources);
}
}
let maybe_gui =
scene.prepare_gui(dt, &mut self.world, &self.resources, &mut self.gui_context);
self.renderer.prepare_ui(
self.surface,
maybe_gui,
&self.resources,
&mut *self.gui_context.fonts.borrow_mut(),
);
Some(scene_res)
} else {
None
};
// Update children transforms:
// -----------------------------
update_transforms(&mut self.world);
// 3. Clean up dead entities.
// ------------------------------------------------
self.garbage_collector
.collect(&mut self.world, &self.resources);
// 4. Render to screen
// ------------------------------------------------
log::debug!("RENDER");
self.renderer
.update(self.surface, &self.world, dt, &self.resources);
if resize {
back_buffer = self.surface.back_buffer().unwrap();
let new_size = back_buffer.size();
let mut proj = self.resources.fetch_mut::<ProjectionMatrix>().unwrap();
proj.resize(new_size[0] as f32, new_size[1] as f32);
let mut dim = self.resources.fetch_mut::<WindowDim>().unwrap();
dim.resize(new_size[0], new_size[1]);
self.gui_context.window_dim = *dim;
}
let render =
self.renderer
.render(self.surface, &mut back_buffer, &self.world, &self.resources);
if render.is_ok() {
self.surface.window.swap_buffers();
} else {
break 'app;
}
// Play music :)
self.audio_system.process(&self.resources);
// Update collision world for collision queries.
{
let mut collisions = self.resources.fetch_mut::<CollisionWorld>().unwrap();
collisions.synchronize(&self.world);
}
// Either clean up or load new resources.
crate::assets::update_asset_managers(self.surface, &self.resources);
#[cfg(feature = "hot-reload")]
self.hot_reloader.update(&self.resources);
// Now, if need to switch scenes, do it.
if let Some(res) = scene_result {
self.scene_stack
.apply_result(res, &mut self.world, &mut self.resources);
}
let now = Instant::now();
let frame_duration = now - current_time;
if frame_duration < dt {
thread::sleep(dt - frame_duration);
}
current_time = now;
}
info!("Bye bye.");
}
}
| {
self.seed = Some(seed);
self
} | identifier_body |
mod.rs | //! Utilities for handling shell surfaces with the `wl_shell` protocol
//!
//! This module provides automatic handling of shell surfaces objects, by being registered
//! as a global handler for `wl_shell`. This protocol is deprecated in favor of `xdg_shell`,
//! thus this module is provided as a compatibility layer with older clients. As a consequence,
//! you can as a compositor-writer decide to only support its functionality in a best-effort
//! maneer: as this global is part of the core protocol, you are still required to provide
//! some support for it.
//!
//! ## Why use this implementation
//!
//! This implementation can track for you the various shell surfaces defined by the
//! clients by handling the `wl_shell` protocol.
//!
//! It allows you to easily access a list of all shell surfaces defined by your clients
//! access their associated metadata and underlying `wl_surface`s.
//!
//! This handler only handles the protocol exchanges with the client to present you the
//! information in a coherent and relatively easy to use manner. All the actual drawing
//! and positioning logic of windows is out of its scope.
//!
//! ## How to use it
//!
//! ### Initialization
//!
//! To initialize this handler, simple use the [`wl_shell_init`](::wayland::shell::legacy::wl_shell_init)
//! function provided in this module. You will need to provide it the [`CompositorToken`](::wayland::compositor::CompositorToken)
//! you retrieved from an instantiation of the compositor handler provided by smithay.
//!
//! ```no_run
//! # extern crate wayland_server;
//! # #[macro_use] extern crate smithay;
//! # extern crate wayland_protocols;
//! #
//! use smithay::wayland::compositor::roles::*;
//! use smithay::wayland::compositor::CompositorToken;
//! use smithay::wayland::shell::legacy::{wl_shell_init, ShellSurfaceRole, ShellRequest};
//! # use wayland_server::protocol::{wl_seat, wl_output};
//!
//! // define the roles type. You need to integrate the XdgSurface role:
//! define_roles!(MyRoles =>
//! [ShellSurface, ShellSurfaceRole]
//! );
//!
//! # fn main() {
//! # let mut event_loop = wayland_server::calloop::EventLoop::<()>::new().unwrap();
//! # let mut display = wayland_server::Display::new(event_loop.handle());
//! # let (compositor_token, _, _) = smithay::wayland::compositor::compositor_init::<MyRoles, _, _>(
//! # &mut display,
//! # |_, _, _| {},
//! # None
//! # );
//! let (shell_state, _) = wl_shell_init(
//! &mut display,
//! // token from the compositor implementation
//! compositor_token,
//! // your implementation
//! |event: ShellRequest<_>| { /* ... */ },
//! None // put a logger if you want
//! );
//!
//! // You're now ready to go!
//! # }
//! ```
use std::{
cell::RefCell,
rc::Rc,
sync::{Arc, Mutex},
};
use crate::wayland::compositor::{roles::Role, CompositorToken};
use wayland_server::{
protocol::{wl_output, wl_seat, wl_shell, wl_shell_surface, wl_surface},
Display, Global,
};
mod wl_handlers;
/// Metadata associated with the `wl_surface` role
pub struct ShellSurfaceRole {
/// Title of the surface
pub title: String,
/// Class of the surface
pub class: String,
pending_ping: u32,
}
/// A handle to a shell surface
pub struct ShellSurface<R> {
wl_surface: wl_surface::WlSurface,
shell_surface: wl_shell_surface::WlShellSurface,
token: CompositorToken<R>,
}
impl<R> ShellSurface<R>
where
R: Role<ShellSurfaceRole> + 'static,
{
/// Is the shell surface referred by this handle still alive?
pub fn alive(&self) -> bool {
self.shell_surface.as_ref().is_alive() && self.wl_surface.as_ref().is_alive()
}
/// Do this handle and the other one actually refer to the same shell surface?
pub fn equals(&self, other: &Self) -> bool {
self.shell_surface.as_ref().equals(&other.shell_surface.as_ref())
}
/// Access the underlying `wl_surface` of this toplevel surface
///
/// Returns `None` if the toplevel surface actually no longer exists.
pub fn get_surface(&self) -> Option<&wl_surface::WlSurface> |
/// Send a ping request to this shell surface
///
/// You'll receive the reply as a [`ShellRequest::Pong`] request
///
/// A typical use is to start a timer at the same time you send this ping
/// request, and cancel it when you receive the pong. If the timer runs
/// down to 0 before a pong is received, mark the client as unresponsive.
///
/// Fails if this shell client already has a pending ping or is already dead.
pub fn send_ping(&self, serial: u32) -> Result<(), ()> {
if !self.alive() {
return Err(());
}
let ret = self.token.with_role_data(&self.wl_surface, |data| {
if data.pending_ping == 0 {
data.pending_ping = serial;
true
} else {
false
}
});
if let Ok(true) = ret {
self.shell_surface.ping(serial);
Ok(())
} else {
Err(())
}
}
/// Send a configure event to this toplevel surface to suggest it a new configuration
pub fn send_configure(&self, size: (u32, u32), edges: wl_shell_surface::Resize) {
self.shell_surface.configure(edges, size.0 as i32, size.1 as i32)
}
/// Signal a popup surface that it has lost focus
pub fn send_popup_done(&self) {
self.shell_surface.popup_done()
}
}
/// Possible kinds of shell surface of the `wl_shell` protocol
pub enum ShellSurfaceKind {
/// Toplevel, a regular window displayed somewhere in the compositor space
Toplevel,
/// Transient, this surface has a parent surface
///
/// These are sub-windows of an application (for example a configuration window),
/// and as such should only be visible in their parent window is, and on top of it.
Transient {
/// The surface considered as parent
parent: wl_surface::WlSurface,
/// Location relative to the parent
location: (i32, i32),
/// Wether this window should be marked as inactive
inactive: bool,
},
/// Fullscreen surface, covering an entire output
Fullscreen {
/// Method used for fullscreen
method: wl_shell_surface::FullscreenMethod,
/// Framerate (relevant only for driver fullscreen)
framerate: u32,
/// Requested output if any
output: Option<wl_output::WlOutput>,
},
/// A popup surface
///
/// Short-lived surface, typically referred as "tooltips" in many
/// contexts.
Popup {
/// The parent surface of this popup
parent: wl_surface::WlSurface,
/// The serial of the input event triggering the creation of this
/// popup
serial: u32,
/// Wether this popup should be marked as inactive
inactive: bool,
/// Location of the popup relative to its parent
location: (i32, i32),
/// Seat associated this the input that triggered the creation of the
/// popup. Used to define when the "popup done" event is sent.
seat: wl_seat::WlSeat,
},
/// A maximized surface
///
/// Like a toplevel surface, but as big as possible on a single output
/// while keeping any relevant desktop-environment interface visible.
Maximized {
/// Requested output for maximization
output: Option<wl_output::WlOutput>,
},
}
/// A request triggered by a `wl_shell_surface`
pub enum ShellRequest<R> {
/// A new shell surface was created
///
/// by default it has no kind and this should not be displayed
NewShellSurface {
/// The created surface
surface: ShellSurface<R>,
},
/// A pong event
///
/// The surface responded to its pending ping. If you receive this
/// event, smithay has already checked that the responded serial was valid.
Pong {
/// The surface that sent the pong
surface: ShellSurface<R>,
},
/// Start of an interactive move
///
/// The surface requests that an interactive move is started on it
Move {
/// The surface requesting the move
surface: ShellSurface<R>,
/// Serial of the implicit grab that initiated the move
serial: u32,
/// Seat associated with the move
seat: wl_seat::WlSeat,
},
/// Start of an interactive resize
///
/// The surface requests that an interactive resize is started on it
Resize {
/// The surface requesting the resize
surface: ShellSurface<R>,
/// Serial of the implicit grab that initiated the resize
serial: u32,
/// Seat associated with the resize
seat: wl_seat::WlSeat,
/// Direction of the resize
edges: wl_shell_surface::Resize,
},
/// The surface changed its kind
SetKind {
/// The surface
surface: ShellSurface<R>,
/// Its new kind
kind: ShellSurfaceKind,
},
}
/// Shell global state
///
/// This state allows you to retrieve a list of surfaces
/// currently known to the shell global.
pub struct ShellState<R> {
known_surfaces: Vec<ShellSurface<R>>,
}
impl<R> ShellState<R>
where
R: Role<ShellSurfaceRole> + 'static,
{
/// Cleans the internal surface storage by removing all dead surfaces
pub(crate) fn cleanup_surfaces(&mut self) {
self.known_surfaces.retain(|s| s.alive());
}
/// Access all the shell surfaces known by this handler
pub fn surfaces(&self) -> &[ShellSurface<R>] {
&self.known_surfaces[..]
}
}
/// Create a new `wl_shell` global
pub fn wl_shell_init<R, L, Impl>(
display: &mut Display,
ctoken: CompositorToken<R>,
implementation: Impl,
logger: L,
) -> (Arc<Mutex<ShellState<R>>>, Global<wl_shell::WlShell>)
where
R: Role<ShellSurfaceRole> + 'static,
L: Into<Option<::slog::Logger>>,
Impl: FnMut(ShellRequest<R>) + 'static,
{
let _log = crate::slog_or_stdlog(logger);
let implementation = Rc::new(RefCell::new(implementation));
let state = Arc::new(Mutex::new(ShellState {
known_surfaces: Vec::new(),
}));
let state2 = state.clone();
let global = display.create_global(1, move |shell, _version| {
self::wl_handlers::implement_shell(shell, ctoken, implementation.clone(), state2.clone());
});
(state, global)
}
| {
if self.alive() {
Some(&self.wl_surface)
} else {
None
}
} | identifier_body |
mod.rs | //! Utilities for handling shell surfaces with the `wl_shell` protocol
//!
//! This module provides automatic handling of shell surfaces objects, by being registered
//! as a global handler for `wl_shell`. This protocol is deprecated in favor of `xdg_shell`,
//! thus this module is provided as a compatibility layer with older clients. As a consequence,
//! you can as a compositor-writer decide to only support its functionality in a best-effort
//! maneer: as this global is part of the core protocol, you are still required to provide
//! some support for it.
//!
//! ## Why use this implementation
//!
//! This implementation can track for you the various shell surfaces defined by the
//! clients by handling the `wl_shell` protocol.
//!
//! It allows you to easily access a list of all shell surfaces defined by your clients
//! access their associated metadata and underlying `wl_surface`s.
//!
//! This handler only handles the protocol exchanges with the client to present you the
//! information in a coherent and relatively easy to use manner. All the actual drawing
//! and positioning logic of windows is out of its scope.
//!
//! ## How to use it
//!
//! ### Initialization
//!
//! To initialize this handler, simple use the [`wl_shell_init`](::wayland::shell::legacy::wl_shell_init)
//! function provided in this module. You will need to provide it the [`CompositorToken`](::wayland::compositor::CompositorToken)
//! you retrieved from an instantiation of the compositor handler provided by smithay.
//!
//! ```no_run
//! # extern crate wayland_server;
//! # #[macro_use] extern crate smithay;
//! # extern crate wayland_protocols;
//! #
//! use smithay::wayland::compositor::roles::*;
//! use smithay::wayland::compositor::CompositorToken;
//! use smithay::wayland::shell::legacy::{wl_shell_init, ShellSurfaceRole, ShellRequest};
//! # use wayland_server::protocol::{wl_seat, wl_output};
//!
//! // define the roles type. You need to integrate the XdgSurface role:
//! define_roles!(MyRoles =>
//! [ShellSurface, ShellSurfaceRole]
//! );
//!
//! # fn main() {
//! # let mut event_loop = wayland_server::calloop::EventLoop::<()>::new().unwrap();
//! # let mut display = wayland_server::Display::new(event_loop.handle());
//! # let (compositor_token, _, _) = smithay::wayland::compositor::compositor_init::<MyRoles, _, _>(
//! # &mut display,
//! # |_, _, _| {},
//! # None
//! # );
//! let (shell_state, _) = wl_shell_init(
//! &mut display,
//! // token from the compositor implementation
//! compositor_token,
//! // your implementation
//! |event: ShellRequest<_>| { /* ... */ },
//! None // put a logger if you want
//! );
//!
//! // You're now ready to go!
//! # }
//! ```
use std::{
cell::RefCell,
rc::Rc,
sync::{Arc, Mutex},
};
use crate::wayland::compositor::{roles::Role, CompositorToken};
use wayland_server::{
protocol::{wl_output, wl_seat, wl_shell, wl_shell_surface, wl_surface},
Display, Global,
};
mod wl_handlers;
/// Metadata associated with the `wl_surface` role
pub struct ShellSurfaceRole {
/// Title of the surface
pub title: String,
/// Class of the surface
pub class: String,
pending_ping: u32,
}
/// A handle to a shell surface
pub struct | <R> {
wl_surface: wl_surface::WlSurface,
shell_surface: wl_shell_surface::WlShellSurface,
token: CompositorToken<R>,
}
impl<R> ShellSurface<R>
where
R: Role<ShellSurfaceRole> + 'static,
{
/// Is the shell surface referred by this handle still alive?
pub fn alive(&self) -> bool {
self.shell_surface.as_ref().is_alive() && self.wl_surface.as_ref().is_alive()
}
/// Do this handle and the other one actually refer to the same shell surface?
pub fn equals(&self, other: &Self) -> bool {
self.shell_surface.as_ref().equals(&other.shell_surface.as_ref())
}
/// Access the underlying `wl_surface` of this toplevel surface
///
/// Returns `None` if the toplevel surface actually no longer exists.
pub fn get_surface(&self) -> Option<&wl_surface::WlSurface> {
if self.alive() {
Some(&self.wl_surface)
} else {
None
}
}
/// Send a ping request to this shell surface
///
/// You'll receive the reply as a [`ShellRequest::Pong`] request
///
/// A typical use is to start a timer at the same time you send this ping
/// request, and cancel it when you receive the pong. If the timer runs
/// down to 0 before a pong is received, mark the client as unresponsive.
///
/// Fails if this shell client already has a pending ping or is already dead.
pub fn send_ping(&self, serial: u32) -> Result<(), ()> {
if !self.alive() {
return Err(());
}
let ret = self.token.with_role_data(&self.wl_surface, |data| {
if data.pending_ping == 0 {
data.pending_ping = serial;
true
} else {
false
}
});
if let Ok(true) = ret {
self.shell_surface.ping(serial);
Ok(())
} else {
Err(())
}
}
/// Send a configure event to this toplevel surface to suggest it a new configuration
pub fn send_configure(&self, size: (u32, u32), edges: wl_shell_surface::Resize) {
self.shell_surface.configure(edges, size.0 as i32, size.1 as i32)
}
/// Signal a popup surface that it has lost focus
pub fn send_popup_done(&self) {
self.shell_surface.popup_done()
}
}
/// Possible kinds of shell surface of the `wl_shell` protocol
pub enum ShellSurfaceKind {
/// Toplevel, a regular window displayed somewhere in the compositor space
Toplevel,
/// Transient, this surface has a parent surface
///
/// These are sub-windows of an application (for example a configuration window),
/// and as such should only be visible in their parent window is, and on top of it.
Transient {
/// The surface considered as parent
parent: wl_surface::WlSurface,
/// Location relative to the parent
location: (i32, i32),
/// Wether this window should be marked as inactive
inactive: bool,
},
/// Fullscreen surface, covering an entire output
Fullscreen {
/// Method used for fullscreen
method: wl_shell_surface::FullscreenMethod,
/// Framerate (relevant only for driver fullscreen)
framerate: u32,
/// Requested output if any
output: Option<wl_output::WlOutput>,
},
/// A popup surface
///
/// Short-lived surface, typically referred as "tooltips" in many
/// contexts.
Popup {
/// The parent surface of this popup
parent: wl_surface::WlSurface,
/// The serial of the input event triggering the creation of this
/// popup
serial: u32,
/// Wether this popup should be marked as inactive
inactive: bool,
/// Location of the popup relative to its parent
location: (i32, i32),
/// Seat associated this the input that triggered the creation of the
/// popup. Used to define when the "popup done" event is sent.
seat: wl_seat::WlSeat,
},
/// A maximized surface
///
/// Like a toplevel surface, but as big as possible on a single output
/// while keeping any relevant desktop-environment interface visible.
Maximized {
/// Requested output for maximization
output: Option<wl_output::WlOutput>,
},
}
/// A request triggered by a `wl_shell_surface`
pub enum ShellRequest<R> {
/// A new shell surface was created
///
/// by default it has no kind and this should not be displayed
NewShellSurface {
/// The created surface
surface: ShellSurface<R>,
},
/// A pong event
///
/// The surface responded to its pending ping. If you receive this
/// event, smithay has already checked that the responded serial was valid.
Pong {
/// The surface that sent the pong
surface: ShellSurface<R>,
},
/// Start of an interactive move
///
/// The surface requests that an interactive move is started on it
Move {
/// The surface requesting the move
surface: ShellSurface<R>,
/// Serial of the implicit grab that initiated the move
serial: u32,
/// Seat associated with the move
seat: wl_seat::WlSeat,
},
/// Start of an interactive resize
///
/// The surface requests that an interactive resize is started on it
Resize {
/// The surface requesting the resize
surface: ShellSurface<R>,
/// Serial of the implicit grab that initiated the resize
serial: u32,
/// Seat associated with the resize
seat: wl_seat::WlSeat,
/// Direction of the resize
edges: wl_shell_surface::Resize,
},
/// The surface changed its kind
SetKind {
/// The surface
surface: ShellSurface<R>,
/// Its new kind
kind: ShellSurfaceKind,
},
}
/// Shell global state
///
/// This state allows you to retrieve a list of surfaces
/// currently known to the shell global.
pub struct ShellState<R> {
known_surfaces: Vec<ShellSurface<R>>,
}
impl<R> ShellState<R>
where
R: Role<ShellSurfaceRole> + 'static,
{
/// Cleans the internal surface storage by removing all dead surfaces
pub(crate) fn cleanup_surfaces(&mut self) {
self.known_surfaces.retain(|s| s.alive());
}
/// Access all the shell surfaces known by this handler
pub fn surfaces(&self) -> &[ShellSurface<R>] {
&self.known_surfaces[..]
}
}
/// Create a new `wl_shell` global
pub fn wl_shell_init<R, L, Impl>(
display: &mut Display,
ctoken: CompositorToken<R>,
implementation: Impl,
logger: L,
) -> (Arc<Mutex<ShellState<R>>>, Global<wl_shell::WlShell>)
where
R: Role<ShellSurfaceRole> + 'static,
L: Into<Option<::slog::Logger>>,
Impl: FnMut(ShellRequest<R>) + 'static,
{
let _log = crate::slog_or_stdlog(logger);
let implementation = Rc::new(RefCell::new(implementation));
let state = Arc::new(Mutex::new(ShellState {
known_surfaces: Vec::new(),
}));
let state2 = state.clone();
let global = display.create_global(1, move |shell, _version| {
self::wl_handlers::implement_shell(shell, ctoken, implementation.clone(), state2.clone());
});
(state, global)
}
| ShellSurface | identifier_name |
mod.rs | //! Utilities for handling shell surfaces with the `wl_shell` protocol
//!
//! This module provides automatic handling of shell surfaces objects, by being registered
//! as a global handler for `wl_shell`. This protocol is deprecated in favor of `xdg_shell`,
//! thus this module is provided as a compatibility layer with older clients. As a consequence,
//! you can as a compositor-writer decide to only support its functionality in a best-effort
//! maneer: as this global is part of the core protocol, you are still required to provide
//! some support for it.
//!
//! ## Why use this implementation
//!
//! This implementation can track for you the various shell surfaces defined by the
//! clients by handling the `wl_shell` protocol.
//!
//! It allows you to easily access a list of all shell surfaces defined by your clients
//! access their associated metadata and underlying `wl_surface`s.
//!
//! This handler only handles the protocol exchanges with the client to present you the
//! information in a coherent and relatively easy to use manner. All the actual drawing
//! and positioning logic of windows is out of its scope.
//!
//! ## How to use it
//!
//! ### Initialization
//!
//! To initialize this handler, simple use the [`wl_shell_init`](::wayland::shell::legacy::wl_shell_init)
//! function provided in this module. You will need to provide it the [`CompositorToken`](::wayland::compositor::CompositorToken)
//! you retrieved from an instantiation of the compositor handler provided by smithay.
//!
//! ```no_run
//! # extern crate wayland_server;
//! # #[macro_use] extern crate smithay;
//! # extern crate wayland_protocols;
//! #
//! use smithay::wayland::compositor::roles::*;
//! use smithay::wayland::compositor::CompositorToken;
//! use smithay::wayland::shell::legacy::{wl_shell_init, ShellSurfaceRole, ShellRequest};
//! # use wayland_server::protocol::{wl_seat, wl_output};
//!
//! // define the roles type. You need to integrate the XdgSurface role:
//! define_roles!(MyRoles =>
//! [ShellSurface, ShellSurfaceRole]
//! );
//!
//! # fn main() {
//! # let mut event_loop = wayland_server::calloop::EventLoop::<()>::new().unwrap();
//! # let mut display = wayland_server::Display::new(event_loop.handle());
//! # let (compositor_token, _, _) = smithay::wayland::compositor::compositor_init::<MyRoles, _, _>(
//! # &mut display,
//! # |_, _, _| {},
//! # None
//! # );
//! let (shell_state, _) = wl_shell_init(
//! &mut display,
//! // token from the compositor implementation
//! compositor_token,
//! // your implementation
//! |event: ShellRequest<_>| { /* ... */ },
//! None // put a logger if you want
//! );
//!
//! // You're now ready to go!
//! # }
//! ```
use std::{
cell::RefCell,
rc::Rc,
sync::{Arc, Mutex},
};
use crate::wayland::compositor::{roles::Role, CompositorToken};
use wayland_server::{
protocol::{wl_output, wl_seat, wl_shell, wl_shell_surface, wl_surface},
Display, Global,
};
mod wl_handlers;
/// Metadata associated with the `wl_surface` role
pub struct ShellSurfaceRole {
/// Title of the surface
pub title: String,
/// Class of the surface
pub class: String,
pending_ping: u32,
}
/// A handle to a shell surface
pub struct ShellSurface<R> {
wl_surface: wl_surface::WlSurface,
shell_surface: wl_shell_surface::WlShellSurface,
token: CompositorToken<R>,
}
impl<R> ShellSurface<R>
where
R: Role<ShellSurfaceRole> + 'static,
{
/// Is the shell surface referred by this handle still alive?
pub fn alive(&self) -> bool {
self.shell_surface.as_ref().is_alive() && self.wl_surface.as_ref().is_alive()
}
/// Do this handle and the other one actually refer to the same shell surface?
pub fn equals(&self, other: &Self) -> bool {
self.shell_surface.as_ref().equals(&other.shell_surface.as_ref())
}
/// Access the underlying `wl_surface` of this toplevel surface
///
/// Returns `None` if the toplevel surface actually no longer exists.
pub fn get_surface(&self) -> Option<&wl_surface::WlSurface> {
if self.alive() {
Some(&self.wl_surface)
} else {
None
}
}
/// Send a ping request to this shell surface
///
/// You'll receive the reply as a [`ShellRequest::Pong`] request
///
/// A typical use is to start a timer at the same time you send this ping
/// request, and cancel it when you receive the pong. If the timer runs
/// down to 0 before a pong is received, mark the client as unresponsive.
///
/// Fails if this shell client already has a pending ping or is already dead.
pub fn send_ping(&self, serial: u32) -> Result<(), ()> {
if !self.alive() {
return Err(());
}
let ret = self.token.with_role_data(&self.wl_surface, |data| {
if data.pending_ping == 0 {
data.pending_ping = serial;
true
} else {
false
}
});
if let Ok(true) = ret | else {
Err(())
}
}
/// Send a configure event to this toplevel surface to suggest it a new configuration
pub fn send_configure(&self, size: (u32, u32), edges: wl_shell_surface::Resize) {
self.shell_surface.configure(edges, size.0 as i32, size.1 as i32)
}
/// Signal a popup surface that it has lost focus
pub fn send_popup_done(&self) {
self.shell_surface.popup_done()
}
}
/// Possible kinds of shell surface of the `wl_shell` protocol
pub enum ShellSurfaceKind {
/// Toplevel, a regular window displayed somewhere in the compositor space
Toplevel,
/// Transient, this surface has a parent surface
///
/// These are sub-windows of an application (for example a configuration window),
/// and as such should only be visible in their parent window is, and on top of it.
Transient {
/// The surface considered as parent
parent: wl_surface::WlSurface,
/// Location relative to the parent
location: (i32, i32),
/// Wether this window should be marked as inactive
inactive: bool,
},
/// Fullscreen surface, covering an entire output
Fullscreen {
/// Method used for fullscreen
method: wl_shell_surface::FullscreenMethod,
/// Framerate (relevant only for driver fullscreen)
framerate: u32,
/// Requested output if any
output: Option<wl_output::WlOutput>,
},
/// A popup surface
///
/// Short-lived surface, typically referred as "tooltips" in many
/// contexts.
Popup {
/// The parent surface of this popup
parent: wl_surface::WlSurface,
/// The serial of the input event triggering the creation of this
/// popup
serial: u32,
/// Wether this popup should be marked as inactive
inactive: bool,
/// Location of the popup relative to its parent
location: (i32, i32),
/// Seat associated this the input that triggered the creation of the
/// popup. Used to define when the "popup done" event is sent.
seat: wl_seat::WlSeat,
},
/// A maximized surface
///
/// Like a toplevel surface, but as big as possible on a single output
/// while keeping any relevant desktop-environment interface visible.
Maximized {
/// Requested output for maximization
output: Option<wl_output::WlOutput>,
},
}
/// A request triggered by a `wl_shell_surface`
pub enum ShellRequest<R> {
/// A new shell surface was created
///
/// by default it has no kind and this should not be displayed
NewShellSurface {
/// The created surface
surface: ShellSurface<R>,
},
/// A pong event
///
/// The surface responded to its pending ping. If you receive this
/// event, smithay has already checked that the responded serial was valid.
Pong {
/// The surface that sent the pong
surface: ShellSurface<R>,
},
/// Start of an interactive move
///
/// The surface requests that an interactive move is started on it
Move {
/// The surface requesting the move
surface: ShellSurface<R>,
/// Serial of the implicit grab that initiated the move
serial: u32,
/// Seat associated with the move
seat: wl_seat::WlSeat,
},
/// Start of an interactive resize
///
/// The surface requests that an interactive resize is started on it
Resize {
/// The surface requesting the resize
surface: ShellSurface<R>,
/// Serial of the implicit grab that initiated the resize
serial: u32,
/// Seat associated with the resize
seat: wl_seat::WlSeat,
/// Direction of the resize
edges: wl_shell_surface::Resize,
},
/// The surface changed its kind
SetKind {
/// The surface
surface: ShellSurface<R>,
/// Its new kind
kind: ShellSurfaceKind,
},
}
/// Shell global state
///
/// This state allows you to retrieve a list of surfaces
/// currently known to the shell global.
pub struct ShellState<R> {
known_surfaces: Vec<ShellSurface<R>>,
}
impl<R> ShellState<R>
where
R: Role<ShellSurfaceRole> + 'static,
{
/// Cleans the internal surface storage by removing all dead surfaces
pub(crate) fn cleanup_surfaces(&mut self) {
self.known_surfaces.retain(|s| s.alive());
}
/// Access all the shell surfaces known by this handler
pub fn surfaces(&self) -> &[ShellSurface<R>] {
&self.known_surfaces[..]
}
}
/// Create a new `wl_shell` global
pub fn wl_shell_init<R, L, Impl>(
display: &mut Display,
ctoken: CompositorToken<R>,
implementation: Impl,
logger: L,
) -> (Arc<Mutex<ShellState<R>>>, Global<wl_shell::WlShell>)
where
R: Role<ShellSurfaceRole> + 'static,
L: Into<Option<::slog::Logger>>,
Impl: FnMut(ShellRequest<R>) + 'static,
{
let _log = crate::slog_or_stdlog(logger);
let implementation = Rc::new(RefCell::new(implementation));
let state = Arc::new(Mutex::new(ShellState {
known_surfaces: Vec::new(),
}));
let state2 = state.clone();
let global = display.create_global(1, move |shell, _version| {
self::wl_handlers::implement_shell(shell, ctoken, implementation.clone(), state2.clone());
});
(state, global)
}
| {
self.shell_surface.ping(serial);
Ok(())
} | conditional_block |
mod.rs | //! Utilities for handling shell surfaces with the `wl_shell` protocol
//!
//! This module provides automatic handling of shell surfaces objects, by being registered
//! as a global handler for `wl_shell`. This protocol is deprecated in favor of `xdg_shell`,
//! thus this module is provided as a compatibility layer with older clients. As a consequence,
//! you can as a compositor-writer decide to only support its functionality in a best-effort
//! maneer: as this global is part of the core protocol, you are still required to provide
//! some support for it.
//!
//! ## Why use this implementation
//!
//! This implementation can track for you the various shell surfaces defined by the
//! clients by handling the `wl_shell` protocol.
//!
//! It allows you to easily access a list of all shell surfaces defined by your clients
//! access their associated metadata and underlying `wl_surface`s.
//!
//! This handler only handles the protocol exchanges with the client to present you the
//! information in a coherent and relatively easy to use manner. All the actual drawing
//! and positioning logic of windows is out of its scope.
//!
//! ## How to use it
//!
//! ### Initialization
//!
//! To initialize this handler, simple use the [`wl_shell_init`](::wayland::shell::legacy::wl_shell_init)
//! function provided in this module. You will need to provide it the [`CompositorToken`](::wayland::compositor::CompositorToken)
//! you retrieved from an instantiation of the compositor handler provided by smithay.
//!
//! ```no_run
//! # extern crate wayland_server;
//! # #[macro_use] extern crate smithay;
//! # extern crate wayland_protocols;
//! #
//! use smithay::wayland::compositor::roles::*;
//! use smithay::wayland::compositor::CompositorToken;
//! use smithay::wayland::shell::legacy::{wl_shell_init, ShellSurfaceRole, ShellRequest};
//! # use wayland_server::protocol::{wl_seat, wl_output};
//!
//! // define the roles type. You need to integrate the XdgSurface role:
//! define_roles!(MyRoles =>
//! [ShellSurface, ShellSurfaceRole]
//! );
//!
//! # fn main() {
//! # let mut event_loop = wayland_server::calloop::EventLoop::<()>::new().unwrap();
//! # let mut display = wayland_server::Display::new(event_loop.handle());
//! # let (compositor_token, _, _) = smithay::wayland::compositor::compositor_init::<MyRoles, _, _>(
//! # &mut display,
//! # |_, _, _| {},
//! # None
//! # );
//! let (shell_state, _) = wl_shell_init(
//! &mut display,
//! // token from the compositor implementation
//! compositor_token,
//! // your implementation
//! |event: ShellRequest<_>| { /* ... */ },
//! None // put a logger if you want
//! );
//!
//! // You're now ready to go!
//! # }
//! ```
use std::{
cell::RefCell,
rc::Rc,
sync::{Arc, Mutex},
};
use crate::wayland::compositor::{roles::Role, CompositorToken};
use wayland_server::{
protocol::{wl_output, wl_seat, wl_shell, wl_shell_surface, wl_surface},
Display, Global,
};
mod wl_handlers;
/// Metadata associated with the `wl_surface` role
pub struct ShellSurfaceRole {
/// Title of the surface
pub title: String,
/// Class of the surface
pub class: String,
pending_ping: u32,
}
/// A handle to a shell surface
pub struct ShellSurface<R> {
wl_surface: wl_surface::WlSurface,
shell_surface: wl_shell_surface::WlShellSurface,
token: CompositorToken<R>,
}
impl<R> ShellSurface<R>
where
R: Role<ShellSurfaceRole> + 'static,
{
/// Is the shell surface referred by this handle still alive?
pub fn alive(&self) -> bool {
self.shell_surface.as_ref().is_alive() && self.wl_surface.as_ref().is_alive()
}
/// Do this handle and the other one actually refer to the same shell surface?
pub fn equals(&self, other: &Self) -> bool {
self.shell_surface.as_ref().equals(&other.shell_surface.as_ref())
}
/// Access the underlying `wl_surface` of this toplevel surface
///
/// Returns `None` if the toplevel surface actually no longer exists.
pub fn get_surface(&self) -> Option<&wl_surface::WlSurface> {
if self.alive() {
Some(&self.wl_surface)
} else {
None
} | ///
/// You'll receive the reply as a [`ShellRequest::Pong`] request
///
/// A typical use is to start a timer at the same time you send this ping
/// request, and cancel it when you receive the pong. If the timer runs
/// down to 0 before a pong is received, mark the client as unresponsive.
///
/// Fails if this shell client already has a pending ping or is already dead.
pub fn send_ping(&self, serial: u32) -> Result<(), ()> {
if !self.alive() {
return Err(());
}
let ret = self.token.with_role_data(&self.wl_surface, |data| {
if data.pending_ping == 0 {
data.pending_ping = serial;
true
} else {
false
}
});
if let Ok(true) = ret {
self.shell_surface.ping(serial);
Ok(())
} else {
Err(())
}
}
/// Send a configure event to this toplevel surface to suggest it a new configuration
pub fn send_configure(&self, size: (u32, u32), edges: wl_shell_surface::Resize) {
self.shell_surface.configure(edges, size.0 as i32, size.1 as i32)
}
/// Signal a popup surface that it has lost focus
pub fn send_popup_done(&self) {
self.shell_surface.popup_done()
}
}
/// Possible kinds of shell surface of the `wl_shell` protocol
pub enum ShellSurfaceKind {
/// Toplevel, a regular window displayed somewhere in the compositor space
Toplevel,
/// Transient, this surface has a parent surface
///
/// These are sub-windows of an application (for example a configuration window),
/// and as such should only be visible in their parent window is, and on top of it.
Transient {
/// The surface considered as parent
parent: wl_surface::WlSurface,
/// Location relative to the parent
location: (i32, i32),
/// Wether this window should be marked as inactive
inactive: bool,
},
/// Fullscreen surface, covering an entire output
Fullscreen {
/// Method used for fullscreen
method: wl_shell_surface::FullscreenMethod,
/// Framerate (relevant only for driver fullscreen)
framerate: u32,
/// Requested output if any
output: Option<wl_output::WlOutput>,
},
/// A popup surface
///
/// Short-lived surface, typically referred as "tooltips" in many
/// contexts.
Popup {
/// The parent surface of this popup
parent: wl_surface::WlSurface,
/// The serial of the input event triggering the creation of this
/// popup
serial: u32,
/// Wether this popup should be marked as inactive
inactive: bool,
/// Location of the popup relative to its parent
location: (i32, i32),
/// Seat associated this the input that triggered the creation of the
/// popup. Used to define when the "popup done" event is sent.
seat: wl_seat::WlSeat,
},
/// A maximized surface
///
/// Like a toplevel surface, but as big as possible on a single output
/// while keeping any relevant desktop-environment interface visible.
Maximized {
/// Requested output for maximization
output: Option<wl_output::WlOutput>,
},
}
/// A request triggered by a `wl_shell_surface`
pub enum ShellRequest<R> {
/// A new shell surface was created
///
/// by default it has no kind and this should not be displayed
NewShellSurface {
/// The created surface
surface: ShellSurface<R>,
},
/// A pong event
///
/// The surface responded to its pending ping. If you receive this
/// event, smithay has already checked that the responded serial was valid.
Pong {
/// The surface that sent the pong
surface: ShellSurface<R>,
},
/// Start of an interactive move
///
/// The surface requests that an interactive move is started on it
Move {
/// The surface requesting the move
surface: ShellSurface<R>,
/// Serial of the implicit grab that initiated the move
serial: u32,
/// Seat associated with the move
seat: wl_seat::WlSeat,
},
/// Start of an interactive resize
///
/// The surface requests that an interactive resize is started on it
Resize {
/// The surface requesting the resize
surface: ShellSurface<R>,
/// Serial of the implicit grab that initiated the resize
serial: u32,
/// Seat associated with the resize
seat: wl_seat::WlSeat,
/// Direction of the resize
edges: wl_shell_surface::Resize,
},
/// The surface changed its kind
SetKind {
/// The surface
surface: ShellSurface<R>,
/// Its new kind
kind: ShellSurfaceKind,
},
}
/// Shell global state
///
/// This state allows you to retrieve a list of surfaces
/// currently known to the shell global.
pub struct ShellState<R> {
known_surfaces: Vec<ShellSurface<R>>,
}
impl<R> ShellState<R>
where
R: Role<ShellSurfaceRole> + 'static,
{
/// Cleans the internal surface storage by removing all dead surfaces
pub(crate) fn cleanup_surfaces(&mut self) {
self.known_surfaces.retain(|s| s.alive());
}
/// Access all the shell surfaces known by this handler
pub fn surfaces(&self) -> &[ShellSurface<R>] {
&self.known_surfaces[..]
}
}
/// Create a new `wl_shell` global
pub fn wl_shell_init<R, L, Impl>(
display: &mut Display,
ctoken: CompositorToken<R>,
implementation: Impl,
logger: L,
) -> (Arc<Mutex<ShellState<R>>>, Global<wl_shell::WlShell>)
where
R: Role<ShellSurfaceRole> + 'static,
L: Into<Option<::slog::Logger>>,
Impl: FnMut(ShellRequest<R>) + 'static,
{
let _log = crate::slog_or_stdlog(logger);
let implementation = Rc::new(RefCell::new(implementation));
let state = Arc::new(Mutex::new(ShellState {
known_surfaces: Vec::new(),
}));
let state2 = state.clone();
let global = display.create_global(1, move |shell, _version| {
self::wl_handlers::implement_shell(shell, ctoken, implementation.clone(), state2.clone());
});
(state, global)
} | }
/// Send a ping request to this shell surface | random_line_split |
azuremachine_controller.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/record"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-azure/cloud/scope"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// AzureMachineReconciler reconciles a AzureMachine object
type AzureMachineReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
}
func (r *AzureMachineReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AzureMachine{}).
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AzureMachine")),
},
).
Watches(
&source.Kind{Type: &infrav1.AzureCluster{}},
&handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.AzureClusterToAzureMachines)},
).
Complete(r)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch
func (r *AzureMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
logger := r.Log.WithValues("namespace", req.Namespace, "azureMachine", req.Name)
// Fetch the AzureMachine VM.
azureMachine := &infrav1.AzureMachine{}
err := r.Get(ctx, req.NamespacedName, azureMachine)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the Machine.
machine, err := util.GetOwnerMachine(ctx, r.Client, azureMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
logger.Info("Machine Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}
logger = logger.WithValues("machine", machine.Name)
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta)
if err != nil {
logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
logger = logger.WithValues("cluster", cluster.Name)
azureCluster := &infrav1.AzureCluster{}
azureClusterName := client.ObjectKey{
Namespace: azureMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(ctx, azureClusterName, azureCluster); err != nil {
logger.Info("AzureCluster is not available yet")
return reconcile.Result{}, nil
}
logger = logger.WithValues("AzureCluster", azureCluster.Name)
// Create the cluster scope
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: logger,
Cluster: cluster,
AzureCluster: azureCluster,
})
if err != nil {
return reconcile.Result{}, err
}
// Create the machine scope
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Logger: logger,
Client: r.Client,
Cluster: cluster,
Machine: machine,
AzureCluster: azureCluster,
AzureMachine: azureMachine,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AzureMachine changes.
defer func() {
if err := machineScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted machines
if !azureMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineScope, clusterScope)
}
// Handle non-deleted machines
return r.reconcileNormal(ctx, machineScope, clusterScope)
}
// findVM queries the Azure APIs and retrieves the VM if it exists, returns nil otherwise.
func (r *AzureMachineReconciler) findVM(scope *scope.MachineScope, ams *azureMachineService) (*infrav1.VM, error) {
var vm *infrav1.VM
// If the ProviderID is populated, describe the VM using its name and resource group name.
vm, err := ams.VMIfExists(scope.GetVMID())
if err != nil {
return nil, errors.Wrapf(err, "failed to query AzureMachine VM")
}
return vm, nil
}
func (r *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) {
machineScope.Info("Reconciling AzureMachine")
// If the AzureMachine is in an error state, return early.
if machineScope.AzureMachine.Status.FailureReason != nil || machineScope.AzureMachine.Status.FailureMessage != nil {
machineScope.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the AzureMachine doesn't have our finalizer, add it.
controllerutil.AddFinalizer(machineScope.AzureMachine, infrav1.MachineFinalizer)
// Register the finalizer immediately to avoid orphaning Azure resources on delete
if err := machineScope.PatchObject(); err != nil {
return reconcile.Result{}, err
}
if !machineScope.Cluster.Status.InfrastructureReady {
machineScope.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil {
machineScope.Info("Bootstrap data secret reference is not yet available")
return reconcile.Result{}, nil
}
// Check that the image is valid
// NOTE: this validation logic is also in the validating webhook
if machineScope.AzureMachine.Spec.Image != nil {
if errs := infrav1.ValidateImage(machineScope.AzureMachine.Spec.Image, field.NewPath("image")); len(errs) > 0 {
agg := kerrors.NewAggregate(errs.ToAggregate().Errors())
machineScope.Info("Invalid image: %s", agg.Error())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "InvalidImage", "Invalid image: %s", agg.Error())
return reconcile.Result{}, nil
}
}
if machineScope.AzureMachine.Spec.AvailabilityZone.ID != nil {
message := "AvailavilityZone is deprecated, use FailureDomain instead"
machineScope.Info(message)
r.Recorder.Eventf(machineScope.AzureCluster, corev1.EventTypeWarning, "DeprecatedField", message)
// Set FailureDomain if its not set
if machineScope.AzureMachine.Spec.FailureDomain == nil {
machineScope.V(2).Info("Failure domain not set, setting with value from AvailabilityZone.ID")
machineScope.AzureMachine.Spec.FailureDomain = machineScope.AzureMachine.Spec.AvailabilityZone.ID
}
}
ams := newAzureMachineService(machineScope, clusterScope)
// Get or create the virtual machine.
vm, err := r.getOrCreate(machineScope, ams)
if err != nil {
return reconcile.Result{}, err
}
// Make sure Spec.ProviderID is always set.
machineScope.SetProviderID(fmt.Sprintf("azure:////%s", vm.ID))
machineScope.SetAnnotation("cluster-api-provider-azure", "true")
machineScope.SetAddresses(vm.Addresses)
// Proceed to reconcile the AzureMachine state.
machineScope.SetVMState(vm.State)
switch vm.State {
case infrav1.VMStateSucceeded:
machineScope.V(2).Info("VM is running", "id", *machineScope.GetVMID())
machineScope.SetReady()
case infrav1.VMStateCreating:
machineScope.V(2).Info("VM is creating", "id", *machineScope.GetVMID())
machineScope.SetNotReady()
case infrav1.VMStateUpdating:
machineScope.V(2).Info("VM is updating", "id", *machineScope.GetVMID())
machineScope.SetNotReady()
case infrav1.VMStateDeleting:
machineScope.Info("Unexpected VM deletion", "state", vm.State, "instance-id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "UnexpectedVMDeletion", "Unexpected Azure VM deletion")
machineScope.SetNotReady()
case infrav1.VMStateFailed:
machineScope.SetNotReady()
machineScope.Error(errors.New("Failed to create or update VM"), "VM is in failed state", "id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "FailedVMState", "Azure VM is in failed state")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("Azure VM state is %s", vm.State))
default:
machineScope.SetNotReady()
machineScope.Info("VM state is undefined", "state", vm.State, "instance-id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "UnhandledVMState", "Azure VM state is undefined")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("Azure VM state %q is undefined", vm.State))
}
// Ensure that the tags are correct.
err = r.reconcileTags(machineScope, clusterScope, machineScope.AdditionalTags())
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to ensure tags: %+v", err)
}
return reconcile.Result{}, nil
}
func (r *AzureMachineReconciler) getOrCreate(scope *scope.MachineScope, ams *azureMachineService) (*infrav1.VM, error) {
vm, err := r.findVM(scope, ams)
if err != nil {
return nil, err
}
if vm == nil {
// Create a new AzureMachine VM if we couldn't find a running VM.
vm, err = ams.Create()
if err != nil {
return nil, errors.Wrapf(err, "failed to create AzureMachine VM")
}
}
return vm, nil
}
func (r *AzureMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) |
// AzureClusterToAzureMachines is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
// of AzureMachines.
func (r *AzureMachineReconciler) AzureClusterToAzureMachines(o handler.MapObject) []ctrl.Request {
result := []ctrl.Request{}
c, ok := o.Object.(*infrav1.AzureCluster)
if !ok {
r.Log.Error(errors.Errorf("expected a AzureCluster but got a %T", o.Object), "failed to get AzureMachine for AzureCluster")
return nil
}
log := r.Log.WithValues("AzureCluster", c.Name, "Namespace", c.Namespace)
cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta)
switch {
case apierrors.IsNotFound(err) || cluster == nil:
return result
case err != nil:
log.Error(err, "failed to get owning cluster")
return result
}
labels := map[string]string{clusterv1.ClusterLabelName: cluster.Name}
machineList := &clusterv1.MachineList{}
if err := r.List(context.TODO(), machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil {
log.Error(err, "failed to list Machines")
return nil
}
for _, m := range machineList.Items {
if m.Spec.InfrastructureRef.Name == "" {
continue
}
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
return result
}
| {
machineScope.Info("Handling deleted AzureMachine")
if err := newAzureMachineService(machineScope, clusterScope).Delete(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureCluster %s/%s", clusterScope.Namespace(), clusterScope.Name())
}
defer func() {
if reterr == nil {
// VM is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(machineScope.AzureMachine, infrav1.MachineFinalizer)
}
}()
return reconcile.Result{}, nil
} | identifier_body |
azuremachine_controller.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/record"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-azure/cloud/scope"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// AzureMachineReconciler reconciles a AzureMachine object
type AzureMachineReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
}
func (r *AzureMachineReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AzureMachine{}).
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AzureMachine")),
},
).
Watches(
&source.Kind{Type: &infrav1.AzureCluster{}},
&handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.AzureClusterToAzureMachines)},
).
Complete(r)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch
func (r *AzureMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
logger := r.Log.WithValues("namespace", req.Namespace, "azureMachine", req.Name)
// Fetch the AzureMachine VM.
azureMachine := &infrav1.AzureMachine{}
err := r.Get(ctx, req.NamespacedName, azureMachine)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the Machine.
machine, err := util.GetOwnerMachine(ctx, r.Client, azureMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
logger.Info("Machine Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}
logger = logger.WithValues("machine", machine.Name)
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta)
if err != nil {
logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
logger = logger.WithValues("cluster", cluster.Name)
azureCluster := &infrav1.AzureCluster{}
azureClusterName := client.ObjectKey{
Namespace: azureMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(ctx, azureClusterName, azureCluster); err != nil {
logger.Info("AzureCluster is not available yet")
return reconcile.Result{}, nil
}
logger = logger.WithValues("AzureCluster", azureCluster.Name)
// Create the cluster scope
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: logger,
Cluster: cluster,
AzureCluster: azureCluster,
})
if err != nil {
return reconcile.Result{}, err
}
// Create the machine scope
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Logger: logger,
Client: r.Client,
Cluster: cluster,
Machine: machine,
AzureCluster: azureCluster,
AzureMachine: azureMachine,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AzureMachine changes.
defer func() {
if err := machineScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted machines
if !azureMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineScope, clusterScope)
}
// Handle non-deleted machines
return r.reconcileNormal(ctx, machineScope, clusterScope)
}
// findVM queries the Azure APIs and retrieves the VM if it exists, returns nil otherwise.
func (r *AzureMachineReconciler) | (scope *scope.MachineScope, ams *azureMachineService) (*infrav1.VM, error) {
var vm *infrav1.VM
// If the ProviderID is populated, describe the VM using its name and resource group name.
vm, err := ams.VMIfExists(scope.GetVMID())
if err != nil {
return nil, errors.Wrapf(err, "failed to query AzureMachine VM")
}
return vm, nil
}
func (r *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) {
machineScope.Info("Reconciling AzureMachine")
// If the AzureMachine is in an error state, return early.
if machineScope.AzureMachine.Status.FailureReason != nil || machineScope.AzureMachine.Status.FailureMessage != nil {
machineScope.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the AzureMachine doesn't have our finalizer, add it.
controllerutil.AddFinalizer(machineScope.AzureMachine, infrav1.MachineFinalizer)
// Register the finalizer immediately to avoid orphaning Azure resources on delete
if err := machineScope.PatchObject(); err != nil {
return reconcile.Result{}, err
}
if !machineScope.Cluster.Status.InfrastructureReady {
machineScope.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil {
machineScope.Info("Bootstrap data secret reference is not yet available")
return reconcile.Result{}, nil
}
// Check that the image is valid
// NOTE: this validation logic is also in the validating webhook
if machineScope.AzureMachine.Spec.Image != nil {
if errs := infrav1.ValidateImage(machineScope.AzureMachine.Spec.Image, field.NewPath("image")); len(errs) > 0 {
agg := kerrors.NewAggregate(errs.ToAggregate().Errors())
machineScope.Info("Invalid image: %s", agg.Error())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "InvalidImage", "Invalid image: %s", agg.Error())
return reconcile.Result{}, nil
}
}
if machineScope.AzureMachine.Spec.AvailabilityZone.ID != nil {
message := "AvailavilityZone is deprecated, use FailureDomain instead"
machineScope.Info(message)
r.Recorder.Eventf(machineScope.AzureCluster, corev1.EventTypeWarning, "DeprecatedField", message)
// Set FailureDomain if its not set
if machineScope.AzureMachine.Spec.FailureDomain == nil {
machineScope.V(2).Info("Failure domain not set, setting with value from AvailabilityZone.ID")
machineScope.AzureMachine.Spec.FailureDomain = machineScope.AzureMachine.Spec.AvailabilityZone.ID
}
}
ams := newAzureMachineService(machineScope, clusterScope)
// Get or create the virtual machine.
vm, err := r.getOrCreate(machineScope, ams)
if err != nil {
return reconcile.Result{}, err
}
// Make sure Spec.ProviderID is always set.
machineScope.SetProviderID(fmt.Sprintf("azure:////%s", vm.ID))
machineScope.SetAnnotation("cluster-api-provider-azure", "true")
machineScope.SetAddresses(vm.Addresses)
// Proceed to reconcile the AzureMachine state.
machineScope.SetVMState(vm.State)
switch vm.State {
case infrav1.VMStateSucceeded:
machineScope.V(2).Info("VM is running", "id", *machineScope.GetVMID())
machineScope.SetReady()
case infrav1.VMStateCreating:
machineScope.V(2).Info("VM is creating", "id", *machineScope.GetVMID())
machineScope.SetNotReady()
case infrav1.VMStateUpdating:
machineScope.V(2).Info("VM is updating", "id", *machineScope.GetVMID())
machineScope.SetNotReady()
case infrav1.VMStateDeleting:
machineScope.Info("Unexpected VM deletion", "state", vm.State, "instance-id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "UnexpectedVMDeletion", "Unexpected Azure VM deletion")
machineScope.SetNotReady()
case infrav1.VMStateFailed:
machineScope.SetNotReady()
machineScope.Error(errors.New("Failed to create or update VM"), "VM is in failed state", "id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "FailedVMState", "Azure VM is in failed state")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("Azure VM state is %s", vm.State))
default:
machineScope.SetNotReady()
machineScope.Info("VM state is undefined", "state", vm.State, "instance-id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "UnhandledVMState", "Azure VM state is undefined")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("Azure VM state %q is undefined", vm.State))
}
// Ensure that the tags are correct.
err = r.reconcileTags(machineScope, clusterScope, machineScope.AdditionalTags())
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to ensure tags: %+v", err)
}
return reconcile.Result{}, nil
}
func (r *AzureMachineReconciler) getOrCreate(scope *scope.MachineScope, ams *azureMachineService) (*infrav1.VM, error) {
vm, err := r.findVM(scope, ams)
if err != nil {
return nil, err
}
if vm == nil {
// Create a new AzureMachine VM if we couldn't find a running VM.
vm, err = ams.Create()
if err != nil {
return nil, errors.Wrapf(err, "failed to create AzureMachine VM")
}
}
return vm, nil
}
func (r *AzureMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) {
machineScope.Info("Handling deleted AzureMachine")
if err := newAzureMachineService(machineScope, clusterScope).Delete(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureCluster %s/%s", clusterScope.Namespace(), clusterScope.Name())
}
defer func() {
if reterr == nil {
// VM is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(machineScope.AzureMachine, infrav1.MachineFinalizer)
}
}()
return reconcile.Result{}, nil
}
// AzureClusterToAzureMachines is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
// of AzureMachines.
func (r *AzureMachineReconciler) AzureClusterToAzureMachines(o handler.MapObject) []ctrl.Request {
result := []ctrl.Request{}
c, ok := o.Object.(*infrav1.AzureCluster)
if !ok {
r.Log.Error(errors.Errorf("expected a AzureCluster but got a %T", o.Object), "failed to get AzureMachine for AzureCluster")
return nil
}
log := r.Log.WithValues("AzureCluster", c.Name, "Namespace", c.Namespace)
cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta)
switch {
case apierrors.IsNotFound(err) || cluster == nil:
return result
case err != nil:
log.Error(err, "failed to get owning cluster")
return result
}
labels := map[string]string{clusterv1.ClusterLabelName: cluster.Name}
machineList := &clusterv1.MachineList{}
if err := r.List(context.TODO(), machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil {
log.Error(err, "failed to list Machines")
return nil
}
for _, m := range machineList.Items {
if m.Spec.InfrastructureRef.Name == "" {
continue
}
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
return result
}
| findVM | identifier_name |
azuremachine_controller.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/record"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-azure/cloud/scope"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// AzureMachineReconciler reconciles a AzureMachine object
type AzureMachineReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
}
func (r *AzureMachineReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AzureMachine{}).
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AzureMachine")),
},
).
Watches(
&source.Kind{Type: &infrav1.AzureCluster{}},
&handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.AzureClusterToAzureMachines)},
).
Complete(r)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch
func (r *AzureMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
logger := r.Log.WithValues("namespace", req.Namespace, "azureMachine", req.Name)
// Fetch the AzureMachine VM.
azureMachine := &infrav1.AzureMachine{}
err := r.Get(ctx, req.NamespacedName, azureMachine)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the Machine.
machine, err := util.GetOwnerMachine(ctx, r.Client, azureMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
logger.Info("Machine Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}
logger = logger.WithValues("machine", machine.Name)
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta)
if err != nil {
logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
logger = logger.WithValues("cluster", cluster.Name)
azureCluster := &infrav1.AzureCluster{}
azureClusterName := client.ObjectKey{
Namespace: azureMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(ctx, azureClusterName, azureCluster); err != nil {
logger.Info("AzureCluster is not available yet")
return reconcile.Result{}, nil
}
logger = logger.WithValues("AzureCluster", azureCluster.Name)
// Create the cluster scope
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: logger,
Cluster: cluster,
AzureCluster: azureCluster,
})
if err != nil {
return reconcile.Result{}, err
}
// Create the machine scope
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Logger: logger,
Client: r.Client,
Cluster: cluster,
Machine: machine,
AzureCluster: azureCluster,
AzureMachine: azureMachine,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AzureMachine changes.
defer func() {
if err := machineScope.Close(); err != nil && reterr == nil { | reterr = err
}
}()
// Handle deleted machines
if !azureMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineScope, clusterScope)
}
// Handle non-deleted machines
return r.reconcileNormal(ctx, machineScope, clusterScope)
}
// findVM queries the Azure APIs and retrieves the VM if it exists, returns nil otherwise.
func (r *AzureMachineReconciler) findVM(scope *scope.MachineScope, ams *azureMachineService) (*infrav1.VM, error) {
var vm *infrav1.VM
// If the ProviderID is populated, describe the VM using its name and resource group name.
vm, err := ams.VMIfExists(scope.GetVMID())
if err != nil {
return nil, errors.Wrapf(err, "failed to query AzureMachine VM")
}
return vm, nil
}
func (r *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) {
machineScope.Info("Reconciling AzureMachine")
// If the AzureMachine is in an error state, return early.
if machineScope.AzureMachine.Status.FailureReason != nil || machineScope.AzureMachine.Status.FailureMessage != nil {
machineScope.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the AzureMachine doesn't have our finalizer, add it.
controllerutil.AddFinalizer(machineScope.AzureMachine, infrav1.MachineFinalizer)
// Register the finalizer immediately to avoid orphaning Azure resources on delete
if err := machineScope.PatchObject(); err != nil {
return reconcile.Result{}, err
}
if !machineScope.Cluster.Status.InfrastructureReady {
machineScope.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil {
machineScope.Info("Bootstrap data secret reference is not yet available")
return reconcile.Result{}, nil
}
// Check that the image is valid
// NOTE: this validation logic is also in the validating webhook
if machineScope.AzureMachine.Spec.Image != nil {
if errs := infrav1.ValidateImage(machineScope.AzureMachine.Spec.Image, field.NewPath("image")); len(errs) > 0 {
agg := kerrors.NewAggregate(errs.ToAggregate().Errors())
machineScope.Info("Invalid image: %s", agg.Error())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "InvalidImage", "Invalid image: %s", agg.Error())
return reconcile.Result{}, nil
}
}
if machineScope.AzureMachine.Spec.AvailabilityZone.ID != nil {
message := "AvailavilityZone is deprecated, use FailureDomain instead"
machineScope.Info(message)
r.Recorder.Eventf(machineScope.AzureCluster, corev1.EventTypeWarning, "DeprecatedField", message)
// Set FailureDomain if its not set
if machineScope.AzureMachine.Spec.FailureDomain == nil {
machineScope.V(2).Info("Failure domain not set, setting with value from AvailabilityZone.ID")
machineScope.AzureMachine.Spec.FailureDomain = machineScope.AzureMachine.Spec.AvailabilityZone.ID
}
}
ams := newAzureMachineService(machineScope, clusterScope)
// Get or create the virtual machine.
vm, err := r.getOrCreate(machineScope, ams)
if err != nil {
return reconcile.Result{}, err
}
// Make sure Spec.ProviderID is always set.
machineScope.SetProviderID(fmt.Sprintf("azure:////%s", vm.ID))
machineScope.SetAnnotation("cluster-api-provider-azure", "true")
machineScope.SetAddresses(vm.Addresses)
// Proceed to reconcile the AzureMachine state.
machineScope.SetVMState(vm.State)
switch vm.State {
case infrav1.VMStateSucceeded:
machineScope.V(2).Info("VM is running", "id", *machineScope.GetVMID())
machineScope.SetReady()
case infrav1.VMStateCreating:
machineScope.V(2).Info("VM is creating", "id", *machineScope.GetVMID())
machineScope.SetNotReady()
case infrav1.VMStateUpdating:
machineScope.V(2).Info("VM is updating", "id", *machineScope.GetVMID())
machineScope.SetNotReady()
case infrav1.VMStateDeleting:
machineScope.Info("Unexpected VM deletion", "state", vm.State, "instance-id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "UnexpectedVMDeletion", "Unexpected Azure VM deletion")
machineScope.SetNotReady()
case infrav1.VMStateFailed:
machineScope.SetNotReady()
machineScope.Error(errors.New("Failed to create or update VM"), "VM is in failed state", "id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "FailedVMState", "Azure VM is in failed state")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("Azure VM state is %s", vm.State))
default:
machineScope.SetNotReady()
machineScope.Info("VM state is undefined", "state", vm.State, "instance-id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "UnhandledVMState", "Azure VM state is undefined")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("Azure VM state %q is undefined", vm.State))
}
// Ensure that the tags are correct.
err = r.reconcileTags(machineScope, clusterScope, machineScope.AdditionalTags())
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to ensure tags: %+v", err)
}
return reconcile.Result{}, nil
}
func (r *AzureMachineReconciler) getOrCreate(scope *scope.MachineScope, ams *azureMachineService) (*infrav1.VM, error) {
vm, err := r.findVM(scope, ams)
if err != nil {
return nil, err
}
if vm == nil {
// Create a new AzureMachine VM if we couldn't find a running VM.
vm, err = ams.Create()
if err != nil {
return nil, errors.Wrapf(err, "failed to create AzureMachine VM")
}
}
return vm, nil
}
func (r *AzureMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) {
machineScope.Info("Handling deleted AzureMachine")
if err := newAzureMachineService(machineScope, clusterScope).Delete(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureCluster %s/%s", clusterScope.Namespace(), clusterScope.Name())
}
defer func() {
if reterr == nil {
// VM is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(machineScope.AzureMachine, infrav1.MachineFinalizer)
}
}()
return reconcile.Result{}, nil
}
// AzureClusterToAzureMachines is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
// of AzureMachines.
func (r *AzureMachineReconciler) AzureClusterToAzureMachines(o handler.MapObject) []ctrl.Request {
result := []ctrl.Request{}
c, ok := o.Object.(*infrav1.AzureCluster)
if !ok {
r.Log.Error(errors.Errorf("expected a AzureCluster but got a %T", o.Object), "failed to get AzureMachine for AzureCluster")
return nil
}
log := r.Log.WithValues("AzureCluster", c.Name, "Namespace", c.Namespace)
cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta)
switch {
case apierrors.IsNotFound(err) || cluster == nil:
return result
case err != nil:
log.Error(err, "failed to get owning cluster")
return result
}
labels := map[string]string{clusterv1.ClusterLabelName: cluster.Name}
machineList := &clusterv1.MachineList{}
if err := r.List(context.TODO(), machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil {
log.Error(err, "failed to list Machines")
return nil
}
for _, m := range machineList.Items {
if m.Spec.InfrastructureRef.Name == "" {
continue
}
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
return result
} | random_line_split | |
azuremachine_controller.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/tools/record"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha3"
"sigs.k8s.io/cluster-api-provider-azure/cloud/scope"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// AzureMachineReconciler reconciles a AzureMachine object
type AzureMachineReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
}
func (r *AzureMachineReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AzureMachine{}).
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AzureMachine")),
},
).
Watches(
&source.Kind{Type: &infrav1.AzureCluster{}},
&handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.AzureClusterToAzureMachines)},
).
Complete(r)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch
func (r *AzureMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
logger := r.Log.WithValues("namespace", req.Namespace, "azureMachine", req.Name)
// Fetch the AzureMachine VM.
azureMachine := &infrav1.AzureMachine{}
err := r.Get(ctx, req.NamespacedName, azureMachine)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the Machine.
machine, err := util.GetOwnerMachine(ctx, r.Client, azureMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil |
logger = logger.WithValues("machine", machine.Name)
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta)
if err != nil {
logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
logger = logger.WithValues("cluster", cluster.Name)
azureCluster := &infrav1.AzureCluster{}
azureClusterName := client.ObjectKey{
Namespace: azureMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(ctx, azureClusterName, azureCluster); err != nil {
logger.Info("AzureCluster is not available yet")
return reconcile.Result{}, nil
}
logger = logger.WithValues("AzureCluster", azureCluster.Name)
// Create the cluster scope
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: logger,
Cluster: cluster,
AzureCluster: azureCluster,
})
if err != nil {
return reconcile.Result{}, err
}
// Create the machine scope
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Logger: logger,
Client: r.Client,
Cluster: cluster,
Machine: machine,
AzureCluster: azureCluster,
AzureMachine: azureMachine,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AzureMachine changes.
defer func() {
if err := machineScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted machines
if !azureMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineScope, clusterScope)
}
// Handle non-deleted machines
return r.reconcileNormal(ctx, machineScope, clusterScope)
}
// findVM queries the Azure APIs and retrieves the VM if it exists, returns nil otherwise.
func (r *AzureMachineReconciler) findVM(scope *scope.MachineScope, ams *azureMachineService) (*infrav1.VM, error) {
var vm *infrav1.VM
// If the ProviderID is populated, describe the VM using its name and resource group name.
vm, err := ams.VMIfExists(scope.GetVMID())
if err != nil {
return nil, errors.Wrapf(err, "failed to query AzureMachine VM")
}
return vm, nil
}
func (r *AzureMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) {
machineScope.Info("Reconciling AzureMachine")
// If the AzureMachine is in an error state, return early.
if machineScope.AzureMachine.Status.FailureReason != nil || machineScope.AzureMachine.Status.FailureMessage != nil {
machineScope.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the AzureMachine doesn't have our finalizer, add it.
controllerutil.AddFinalizer(machineScope.AzureMachine, infrav1.MachineFinalizer)
// Register the finalizer immediately to avoid orphaning Azure resources on delete
if err := machineScope.PatchObject(); err != nil {
return reconcile.Result{}, err
}
if !machineScope.Cluster.Status.InfrastructureReady {
machineScope.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil {
machineScope.Info("Bootstrap data secret reference is not yet available")
return reconcile.Result{}, nil
}
// Check that the image is valid
// NOTE: this validation logic is also in the validating webhook
if machineScope.AzureMachine.Spec.Image != nil {
if errs := infrav1.ValidateImage(machineScope.AzureMachine.Spec.Image, field.NewPath("image")); len(errs) > 0 {
agg := kerrors.NewAggregate(errs.ToAggregate().Errors())
machineScope.Info("Invalid image: %s", agg.Error())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "InvalidImage", "Invalid image: %s", agg.Error())
return reconcile.Result{}, nil
}
}
if machineScope.AzureMachine.Spec.AvailabilityZone.ID != nil {
message := "AvailavilityZone is deprecated, use FailureDomain instead"
machineScope.Info(message)
r.Recorder.Eventf(machineScope.AzureCluster, corev1.EventTypeWarning, "DeprecatedField", message)
// Set FailureDomain if its not set
if machineScope.AzureMachine.Spec.FailureDomain == nil {
machineScope.V(2).Info("Failure domain not set, setting with value from AvailabilityZone.ID")
machineScope.AzureMachine.Spec.FailureDomain = machineScope.AzureMachine.Spec.AvailabilityZone.ID
}
}
ams := newAzureMachineService(machineScope, clusterScope)
// Get or create the virtual machine.
vm, err := r.getOrCreate(machineScope, ams)
if err != nil {
return reconcile.Result{}, err
}
// Make sure Spec.ProviderID is always set.
machineScope.SetProviderID(fmt.Sprintf("azure:////%s", vm.ID))
machineScope.SetAnnotation("cluster-api-provider-azure", "true")
machineScope.SetAddresses(vm.Addresses)
// Proceed to reconcile the AzureMachine state.
machineScope.SetVMState(vm.State)
switch vm.State {
case infrav1.VMStateSucceeded:
machineScope.V(2).Info("VM is running", "id", *machineScope.GetVMID())
machineScope.SetReady()
case infrav1.VMStateCreating:
machineScope.V(2).Info("VM is creating", "id", *machineScope.GetVMID())
machineScope.SetNotReady()
case infrav1.VMStateUpdating:
machineScope.V(2).Info("VM is updating", "id", *machineScope.GetVMID())
machineScope.SetNotReady()
case infrav1.VMStateDeleting:
machineScope.Info("Unexpected VM deletion", "state", vm.State, "instance-id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "UnexpectedVMDeletion", "Unexpected Azure VM deletion")
machineScope.SetNotReady()
case infrav1.VMStateFailed:
machineScope.SetNotReady()
machineScope.Error(errors.New("Failed to create or update VM"), "VM is in failed state", "id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "FailedVMState", "Azure VM is in failed state")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("Azure VM state is %s", vm.State))
default:
machineScope.SetNotReady()
machineScope.Info("VM state is undefined", "state", vm.State, "instance-id", *machineScope.GetVMID())
r.Recorder.Eventf(machineScope.AzureMachine, corev1.EventTypeWarning, "UnhandledVMState", "Azure VM state is undefined")
machineScope.SetFailureReason(capierrors.UpdateMachineError)
machineScope.SetFailureMessage(errors.Errorf("Azure VM state %q is undefined", vm.State))
}
// Ensure that the tags are correct.
err = r.reconcileTags(machineScope, clusterScope, machineScope.AdditionalTags())
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to ensure tags: %+v", err)
}
return reconcile.Result{}, nil
}
func (r *AzureMachineReconciler) getOrCreate(scope *scope.MachineScope, ams *azureMachineService) (*infrav1.VM, error) {
vm, err := r.findVM(scope, ams)
if err != nil {
return nil, err
}
if vm == nil {
// Create a new AzureMachine VM if we couldn't find a running VM.
vm, err = ams.Create()
if err != nil {
return nil, errors.Wrapf(err, "failed to create AzureMachine VM")
}
}
return vm, nil
}
func (r *AzureMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) {
machineScope.Info("Handling deleted AzureMachine")
if err := newAzureMachineService(machineScope, clusterScope).Delete(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureCluster %s/%s", clusterScope.Namespace(), clusterScope.Name())
}
defer func() {
if reterr == nil {
// VM is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(machineScope.AzureMachine, infrav1.MachineFinalizer)
}
}()
return reconcile.Result{}, nil
}
// AzureClusterToAzureMachines is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
// of AzureMachines.
func (r *AzureMachineReconciler) AzureClusterToAzureMachines(o handler.MapObject) []ctrl.Request {
result := []ctrl.Request{}
c, ok := o.Object.(*infrav1.AzureCluster)
if !ok {
r.Log.Error(errors.Errorf("expected a AzureCluster but got a %T", o.Object), "failed to get AzureMachine for AzureCluster")
return nil
}
log := r.Log.WithValues("AzureCluster", c.Name, "Namespace", c.Namespace)
cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta)
switch {
case apierrors.IsNotFound(err) || cluster == nil:
return result
case err != nil:
log.Error(err, "failed to get owning cluster")
return result
}
labels := map[string]string{clusterv1.ClusterLabelName: cluster.Name}
machineList := &clusterv1.MachineList{}
if err := r.List(context.TODO(), machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil {
log.Error(err, "failed to list Machines")
return nil
}
for _, m := range machineList.Items {
if m.Spec.InfrastructureRef.Name == "" {
continue
}
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
return result
}
| {
logger.Info("Machine Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
} | conditional_block |
contour.py | import cv2
import numpy as np
from math import *
def appendimages(im1,im2):
""" Return a new image that appends the two images side-by-side. """
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)
# if none of these cases they are equal, no filling needed.
return np.concatenate((im1,im2), axis=1)
def append_imgs(im1, im2, im3):
#buff = appendimages(im1,im2)
#return appendimages(buff,im3)
buff = np.concatenate((im1,im2), axis=1)
return np.concatenate((buff,im3), axis=1)
#check whether the point is near edge or not
def point_not_at_edge( x, y, img_height, img_width, threshold):
no_at_edge = ( (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) )
return no_at_edge
#check whether two points are too near from each other
def points_not_similar(x, y, x_neighb, y_neighb, threshold):
no_same_point = (fabs(x - x_neighb) + fabs(y - y_neighb) > 2*threshold)
return no_same_point
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = (fabs(x - x_next) + fabs(y - y_next) > 2*threshold)
no_at_edge = (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold )
return (no_same_point and no_at_edge)
'''
calculate the point on wrist of the hand
by taking the average of opposites of convexity defects to the center
'''
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2,n))
for i in range(n):
opposites[0,i] = 2*center[0] - contour[set_idx_convDefs[i], 0, 0] #calcul x
opposites[1,i] = 2*center[1] - contour[set_idx_convDefs[i], 0, 1] #calcul y
total = np.sum(opposites, axis = 1)
#print total
x = int(total[0]/n)
y = int(total[1]/n)
wrist = (x, y)
#print 'wrist = ', wrist
return wrist
'''
simple methods to detect finger tips
by calculating the farthest points on convex hull
compared to a fixed point. This fixed point can be center or wrist
'''
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
#calculate distance to fixed Point
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2))
#sort index from farthest to nearest
max_indx = np.argsort(-1*np.array(dist_from_fixedPoint))
#need to eliminate same points and points at edge
#results stored in idx_ok, the list of candidate indices of hulls
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh):
if(len(idx_ok) == 0):
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh))
if not not_similar: #if similar break the loop
break
if(not_similar):
idx_ok.append(idx)
return idx_ok
def simple_preprocessing(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
blur = cv2.erode(blur, kernel, iterations = 2)
blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def simple_preprocessing2(img, backGround):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)
gray = gray-gray2
blur = cv2.GaussianBlur(gray, (5,5), 0)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
#blur = cv2.erode(blur, kernel, iterations = 2)
#blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def hsv_preprocessing(img):
#define boundaries of HSV pixel intensities to be considered as 'skin'
#H: 2-39 / 360 * 255 = 1-28
#S: 0.15 - 0.9 / 1 * 255 = 38- 250
#V: 0.2 - 0.95 / 1 * 255 =
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#hsv = cv2.GaussianBlur(hsv, (5,5), 0)
skinMask = cv2.inRange(hsv, lower, upper)
#choosing a structure elements to apply noise-remove process
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
blur = cv2.GaussianBlur(skinMask, (5,5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
#find the contour
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#search the maximum contour in the hierachy tree of contours
max_area = 0
ci = 0
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if(area > max_area):
max_area = area
ci = i
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints = False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
#draws the image with only the contour and its convex hull
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def eliminate_background(img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if(fabs(img[i,j,k] - backGround[i,j,k]) > thres_diff):
erase = False
if erase:
img[i,j,:] = 0
return img
'''
Tracking by camera
NOTE: hsv is very color and light sensitive and simple_preprocessing seems stabler
'''
'''
firstSec = 0
camera = cv2.VideoCapture(0)
for i in range(12):
camera.read()
grabbed, backGround = camera.read()
for i in range(12):
grabbed, img = camera.read()
backGround = backGround/2 + img/2
'''
def tracking():
camera = cv2.VideoCapture(0)
_,img = camera.read()
h,w,d = img.shape
#out = cv2.VideoWriter('video.avi',-1,1,(3*w,h))
fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')
out = cv2.VideoWriter()
success = out.open('output.avi',fourcc, 15, (3*w,h), True)
waitTime = 100
for i in range(waitTime):
_, average = camera.read()
#average = np.float32(average)
index_im = 0
while True:
grabbed, img = camera.read()
#alpha = 0.01 #factor of forgetting
#cv2.accumulateWeighted(img, average, alpha)#img is src, average is dst
img_diff = cv2.absdiff(img, average)#convert scale and do subtract these 2 images
#cv2.imshow('img_diff', img_diff)
#substract background
#img = eliminate_background(img, backGround, 20)
#bin_image = simple_preprocessing(img, backGround)
bin_image = simple_preprocessing(img_diff)
bin_image2 = bin_image.copy()
cv2.imshow('binaire', bin_image2)
# bin_image = hsv_preprocessing(img)
# cv2.imshow('orig', img)
# cv2.imshow('bin', bin_image)
# cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3]) |
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:5]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
#cv2.imshow('contour and convex hull', drawing)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)
'''
print img.shape
print bin_image2.shape
print drawing.shape
'''
frame = append_imgs(img, bin_image2, drawing)
#cv2.imshow('frame', frame)
#out.write(frame)
cv2.imwrite("store2/" + "img"+str(index_im) + ".jpg", frame)
index_im += 1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
out.release()
#self.out = None
cv2.destroyAllWindows()
def main():
image_name = "hand_in_BG5.png"
img = cv2.imread(image_name)
bin_image = simple_preprocessing(img)
#bin_image = hsv_preprocessing(img)
cv2.imshow('orig', img)
cv2.imshow('bin', bin_image)
cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:1]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
cv2.imshow('contour and convex hull', drawing)
k = cv2.waitKey(0)
if __name__ == "__main__":
# main()
tracking() | max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2) | random_line_split |
contour.py | import cv2
import numpy as np
from math import *
def appendimages(im1,im2):
""" Return a new image that appends the two images side-by-side. """
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)
# if none of these cases they are equal, no filling needed.
return np.concatenate((im1,im2), axis=1)
def append_imgs(im1, im2, im3):
#buff = appendimages(im1,im2)
#return appendimages(buff,im3)
buff = np.concatenate((im1,im2), axis=1)
return np.concatenate((buff,im3), axis=1)
#check whether the point is near edge or not
def point_not_at_edge( x, y, img_height, img_width, threshold):
|
#check whether two points are too near from each other
def points_not_similar(x, y, x_neighb, y_neighb, threshold):
no_same_point = (fabs(x - x_neighb) + fabs(y - y_neighb) > 2*threshold)
return no_same_point
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = (fabs(x - x_next) + fabs(y - y_next) > 2*threshold)
no_at_edge = (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold )
return (no_same_point and no_at_edge)
'''
calculate the point on wrist of the hand
by taking the average of opposites of convexity defects to the center
'''
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2,n))
for i in range(n):
opposites[0,i] = 2*center[0] - contour[set_idx_convDefs[i], 0, 0] #calcul x
opposites[1,i] = 2*center[1] - contour[set_idx_convDefs[i], 0, 1] #calcul y
total = np.sum(opposites, axis = 1)
#print total
x = int(total[0]/n)
y = int(total[1]/n)
wrist = (x, y)
#print 'wrist = ', wrist
return wrist
'''
simple methods to detect finger tips
by calculating the farthest points on convex hull
compared to a fixed point. This fixed point can be center or wrist
'''
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
#calculate distance to fixed Point
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2))
#sort index from farthest to nearest
max_indx = np.argsort(-1*np.array(dist_from_fixedPoint))
#need to eliminate same points and points at edge
#results stored in idx_ok, the list of candidate indices of hulls
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh):
if(len(idx_ok) == 0):
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh))
if not not_similar: #if similar break the loop
break
if(not_similar):
idx_ok.append(idx)
return idx_ok
def simple_preprocessing(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
blur = cv2.erode(blur, kernel, iterations = 2)
blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def simple_preprocessing2(img, backGround):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)
gray = gray-gray2
blur = cv2.GaussianBlur(gray, (5,5), 0)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
#blur = cv2.erode(blur, kernel, iterations = 2)
#blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def hsv_preprocessing(img):
#define boundaries of HSV pixel intensities to be considered as 'skin'
#H: 2-39 / 360 * 255 = 1-28
#S: 0.15 - 0.9 / 1 * 255 = 38- 250
#V: 0.2 - 0.95 / 1 * 255 =
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#hsv = cv2.GaussianBlur(hsv, (5,5), 0)
skinMask = cv2.inRange(hsv, lower, upper)
#choosing a structure elements to apply noise-remove process
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
blur = cv2.GaussianBlur(skinMask, (5,5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
#find the contour
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#search the maximum contour in the hierachy tree of contours
max_area = 0
ci = 0
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if(area > max_area):
max_area = area
ci = i
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints = False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
#draws the image with only the contour and its convex hull
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def eliminate_background(img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if(fabs(img[i,j,k] - backGround[i,j,k]) > thres_diff):
erase = False
if erase:
img[i,j,:] = 0
return img
'''
Tracking by camera
NOTE: hsv is very color and light sensitive and simple_preprocessing seems stabler
'''
'''
firstSec = 0
camera = cv2.VideoCapture(0)
for i in range(12):
camera.read()
grabbed, backGround = camera.read()
for i in range(12):
grabbed, img = camera.read()
backGround = backGround/2 + img/2
'''
def tracking():
camera = cv2.VideoCapture(0)
_,img = camera.read()
h,w,d = img.shape
#out = cv2.VideoWriter('video.avi',-1,1,(3*w,h))
fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')
out = cv2.VideoWriter()
success = out.open('output.avi',fourcc, 15, (3*w,h), True)
waitTime = 100
for i in range(waitTime):
_, average = camera.read()
#average = np.float32(average)
index_im = 0
while True:
grabbed, img = camera.read()
#alpha = 0.01 #factor of forgetting
#cv2.accumulateWeighted(img, average, alpha)#img is src, average is dst
img_diff = cv2.absdiff(img, average)#convert scale and do subtract these 2 images
#cv2.imshow('img_diff', img_diff)
#substract background
#img = eliminate_background(img, backGround, 20)
#bin_image = simple_preprocessing(img, backGround)
bin_image = simple_preprocessing(img_diff)
bin_image2 = bin_image.copy()
cv2.imshow('binaire', bin_image2)
# bin_image = hsv_preprocessing(img)
# cv2.imshow('orig', img)
# cv2.imshow('bin', bin_image)
# cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:5]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
#cv2.imshow('contour and convex hull', drawing)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)
'''
print img.shape
print bin_image2.shape
print drawing.shape
'''
frame = append_imgs(img, bin_image2, drawing)
#cv2.imshow('frame', frame)
#out.write(frame)
cv2.imwrite("store2/" + "img"+str(index_im) + ".jpg", frame)
index_im += 1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
out.release()
#self.out = None
cv2.destroyAllWindows()
def main():
image_name = "hand_in_BG5.png"
img = cv2.imread(image_name)
bin_image = simple_preprocessing(img)
#bin_image = hsv_preprocessing(img)
cv2.imshow('orig', img)
cv2.imshow('bin', bin_image)
cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:1]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
cv2.imshow('contour and convex hull', drawing)
k = cv2.waitKey(0)
if __name__ == "__main__":
# main()
tracking()
| no_at_edge = ( (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) )
return no_at_edge | identifier_body |
contour.py | import cv2
import numpy as np
from math import *
def appendimages(im1,im2):
""" Return a new image that appends the two images side-by-side. """
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)
# if none of these cases they are equal, no filling needed.
return np.concatenate((im1,im2), axis=1)
def append_imgs(im1, im2, im3):
#buff = appendimages(im1,im2)
#return appendimages(buff,im3)
buff = np.concatenate((im1,im2), axis=1)
return np.concatenate((buff,im3), axis=1)
#check whether the point is near edge or not
def point_not_at_edge( x, y, img_height, img_width, threshold):
no_at_edge = ( (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) )
return no_at_edge
#check whether two points are too near from each other
def points_not_similar(x, y, x_neighb, y_neighb, threshold):
no_same_point = (fabs(x - x_neighb) + fabs(y - y_neighb) > 2*threshold)
return no_same_point
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = (fabs(x - x_next) + fabs(y - y_next) > 2*threshold)
no_at_edge = (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold )
return (no_same_point and no_at_edge)
'''
calculate the point on wrist of the hand
by taking the average of opposites of convexity defects to the center
'''
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2,n))
for i in range(n):
opposites[0,i] = 2*center[0] - contour[set_idx_convDefs[i], 0, 0] #calcul x
opposites[1,i] = 2*center[1] - contour[set_idx_convDefs[i], 0, 1] #calcul y
total = np.sum(opposites, axis = 1)
#print total
x = int(total[0]/n)
y = int(total[1]/n)
wrist = (x, y)
#print 'wrist = ', wrist
return wrist
'''
simple methods to detect finger tips
by calculating the farthest points on convex hull
compared to a fixed point. This fixed point can be center or wrist
'''
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
#calculate distance to fixed Point
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2))
#sort index from farthest to nearest
max_indx = np.argsort(-1*np.array(dist_from_fixedPoint))
#need to eliminate same points and points at edge
#results stored in idx_ok, the list of candidate indices of hulls
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh):
if(len(idx_ok) == 0):
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh))
if not not_similar: #if similar break the loop
break
if(not_similar):
idx_ok.append(idx)
return idx_ok
def simple_preprocessing(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
blur = cv2.erode(blur, kernel, iterations = 2)
blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def simple_preprocessing2(img, backGround):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)
gray = gray-gray2
blur = cv2.GaussianBlur(gray, (5,5), 0)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
#blur = cv2.erode(blur, kernel, iterations = 2)
#blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def hsv_preprocessing(img):
#define boundaries of HSV pixel intensities to be considered as 'skin'
#H: 2-39 / 360 * 255 = 1-28
#S: 0.15 - 0.9 / 1 * 255 = 38- 250
#V: 0.2 - 0.95 / 1 * 255 =
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#hsv = cv2.GaussianBlur(hsv, (5,5), 0)
skinMask = cv2.inRange(hsv, lower, upper)
#choosing a structure elements to apply noise-remove process
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
blur = cv2.GaussianBlur(skinMask, (5,5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
#find the contour
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#search the maximum contour in the hierachy tree of contours
max_area = 0
ci = 0
for i in range(len(contours)):
|
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints = False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
#draws the image with only the contour and its convex hull
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def eliminate_background(img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if(fabs(img[i,j,k] - backGround[i,j,k]) > thres_diff):
erase = False
if erase:
img[i,j,:] = 0
return img
'''
Tracking by camera
NOTE: hsv is very color and light sensitive and simple_preprocessing seems stabler
'''
'''
firstSec = 0
camera = cv2.VideoCapture(0)
for i in range(12):
camera.read()
grabbed, backGround = camera.read()
for i in range(12):
grabbed, img = camera.read()
backGround = backGround/2 + img/2
'''
def tracking():
camera = cv2.VideoCapture(0)
_,img = camera.read()
h,w,d = img.shape
#out = cv2.VideoWriter('video.avi',-1,1,(3*w,h))
fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')
out = cv2.VideoWriter()
success = out.open('output.avi',fourcc, 15, (3*w,h), True)
waitTime = 100
for i in range(waitTime):
_, average = camera.read()
#average = np.float32(average)
index_im = 0
while True:
grabbed, img = camera.read()
#alpha = 0.01 #factor of forgetting
#cv2.accumulateWeighted(img, average, alpha)#img is src, average is dst
img_diff = cv2.absdiff(img, average)#convert scale and do subtract these 2 images
#cv2.imshow('img_diff', img_diff)
#substract background
#img = eliminate_background(img, backGround, 20)
#bin_image = simple_preprocessing(img, backGround)
bin_image = simple_preprocessing(img_diff)
bin_image2 = bin_image.copy()
cv2.imshow('binaire', bin_image2)
# bin_image = hsv_preprocessing(img)
# cv2.imshow('orig', img)
# cv2.imshow('bin', bin_image)
# cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:5]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
#cv2.imshow('contour and convex hull', drawing)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)
'''
print img.shape
print bin_image2.shape
print drawing.shape
'''
frame = append_imgs(img, bin_image2, drawing)
#cv2.imshow('frame', frame)
#out.write(frame)
cv2.imwrite("store2/" + "img"+str(index_im) + ".jpg", frame)
index_im += 1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
out.release()
#self.out = None
cv2.destroyAllWindows()
def main():
image_name = "hand_in_BG5.png"
img = cv2.imread(image_name)
bin_image = simple_preprocessing(img)
#bin_image = hsv_preprocessing(img)
cv2.imshow('orig', img)
cv2.imshow('bin', bin_image)
cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:1]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
cv2.imshow('contour and convex hull', drawing)
k = cv2.waitKey(0)
if __name__ == "__main__":
# main()
tracking()
| cnt = contours[i]
area = cv2.contourArea(cnt)
if(area > max_area):
max_area = area
ci = i | conditional_block |
contour.py | import cv2
import numpy as np
from math import *
def appendimages(im1,im2):
""" Return a new image that appends the two images side-by-side. """
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)
# if none of these cases they are equal, no filling needed.
return np.concatenate((im1,im2), axis=1)
def append_imgs(im1, im2, im3):
#buff = appendimages(im1,im2)
#return appendimages(buff,im3)
buff = np.concatenate((im1,im2), axis=1)
return np.concatenate((buff,im3), axis=1)
#check whether the point is near edge or not
def point_not_at_edge( x, y, img_height, img_width, threshold):
no_at_edge = ( (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) )
return no_at_edge
#check whether two points are too near from each other
def points_not_similar(x, y, x_neighb, y_neighb, threshold):
no_same_point = (fabs(x - x_neighb) + fabs(y - y_neighb) > 2*threshold)
return no_same_point
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = (fabs(x - x_next) + fabs(y - y_next) > 2*threshold)
no_at_edge = (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold )
return (no_same_point and no_at_edge)
'''
calculate the point on wrist of the hand
by taking the average of opposites of convexity defects to the center
'''
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2,n))
for i in range(n):
opposites[0,i] = 2*center[0] - contour[set_idx_convDefs[i], 0, 0] #calcul x
opposites[1,i] = 2*center[1] - contour[set_idx_convDefs[i], 0, 1] #calcul y
total = np.sum(opposites, axis = 1)
#print total
x = int(total[0]/n)
y = int(total[1]/n)
wrist = (x, y)
#print 'wrist = ', wrist
return wrist
'''
simple methods to detect finger tips
by calculating the farthest points on convex hull
compared to a fixed point. This fixed point can be center or wrist
'''
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
#calculate distance to fixed Point
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2))
#sort index from farthest to nearest
max_indx = np.argsort(-1*np.array(dist_from_fixedPoint))
#need to eliminate same points and points at edge
#results stored in idx_ok, the list of candidate indices of hulls
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh):
if(len(idx_ok) == 0):
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh))
if not not_similar: #if similar break the loop
break
if(not_similar):
idx_ok.append(idx)
return idx_ok
def simple_preprocessing(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
blur = cv2.erode(blur, kernel, iterations = 2)
blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def simple_preprocessing2(img, backGround):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)
gray = gray-gray2
blur = cv2.GaussianBlur(gray, (5,5), 0)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
#blur = cv2.erode(blur, kernel, iterations = 2)
#blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def hsv_preprocessing(img):
#define boundaries of HSV pixel intensities to be considered as 'skin'
#H: 2-39 / 360 * 255 = 1-28
#S: 0.15 - 0.9 / 1 * 255 = 38- 250
#V: 0.2 - 0.95 / 1 * 255 =
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#hsv = cv2.GaussianBlur(hsv, (5,5), 0)
skinMask = cv2.inRange(hsv, lower, upper)
#choosing a structure elements to apply noise-remove process
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
blur = cv2.GaussianBlur(skinMask, (5,5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
#find the contour
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#search the maximum contour in the hierachy tree of contours
max_area = 0
ci = 0
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if(area > max_area):
max_area = area
ci = i
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints = False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
#draws the image with only the contour and its convex hull
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def | (img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if(fabs(img[i,j,k] - backGround[i,j,k]) > thres_diff):
erase = False
if erase:
img[i,j,:] = 0
return img
'''
Tracking by camera
NOTE: hsv is very color and light sensitive and simple_preprocessing seems stabler
'''
'''
firstSec = 0
camera = cv2.VideoCapture(0)
for i in range(12):
camera.read()
grabbed, backGround = camera.read()
for i in range(12):
grabbed, img = camera.read()
backGround = backGround/2 + img/2
'''
def tracking():
camera = cv2.VideoCapture(0)
_,img = camera.read()
h,w,d = img.shape
#out = cv2.VideoWriter('video.avi',-1,1,(3*w,h))
fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')
out = cv2.VideoWriter()
success = out.open('output.avi',fourcc, 15, (3*w,h), True)
waitTime = 100
for i in range(waitTime):
_, average = camera.read()
#average = np.float32(average)
index_im = 0
while True:
grabbed, img = camera.read()
#alpha = 0.01 #factor of forgetting
#cv2.accumulateWeighted(img, average, alpha)#img is src, average is dst
img_diff = cv2.absdiff(img, average)#convert scale and do subtract these 2 images
#cv2.imshow('img_diff', img_diff)
#substract background
#img = eliminate_background(img, backGround, 20)
#bin_image = simple_preprocessing(img, backGround)
bin_image = simple_preprocessing(img_diff)
bin_image2 = bin_image.copy()
cv2.imshow('binaire', bin_image2)
# bin_image = hsv_preprocessing(img)
# cv2.imshow('orig', img)
# cv2.imshow('bin', bin_image)
# cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:5]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
#cv2.imshow('contour and convex hull', drawing)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)
'''
print img.shape
print bin_image2.shape
print drawing.shape
'''
frame = append_imgs(img, bin_image2, drawing)
#cv2.imshow('frame', frame)
#out.write(frame)
cv2.imwrite("store2/" + "img"+str(index_im) + ".jpg", frame)
index_im += 1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
out.release()
#self.out = None
cv2.destroyAllWindows()
def main():
image_name = "hand_in_BG5.png"
img = cv2.imread(image_name)
bin_image = simple_preprocessing(img)
#bin_image = hsv_preprocessing(img)
cv2.imshow('orig', img)
cv2.imshow('bin', bin_image)
cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:1]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
cv2.imshow('contour and convex hull', drawing)
k = cv2.waitKey(0)
if __name__ == "__main__":
# main()
tracking()
| eliminate_background | identifier_name |
app.js | /**
*
*Filename : app.js
*@description This JS file has functions for mapping event
* Author : Logesh
**/
/*
|--------------------------------------------------------------------------
| Declare global variables
|--------------------------------------------------------------------------
*/
var objectID = "";
var totalDistance = "";
var storeLatLong = [];
let latLong = {};
/*
|--------------------------------------------------------------------------
| API KEY
|--------------------------------------------------------------------------
*/
const mapAPI = "Gamk6viBYo5tDnM2VN-lrZ4GHnI87z-k_1ipBiMWMec";
/*
|--------------------------------------------------------------------------
| Declare global Functions
|--------------------------------------------------------------------------
*/
//Loader for request & response
$(document).ajaxSend(function() {
$("#overlay").fadeIn(300);
});
//Bootstrap DatePicker
$(".date").datepicker({
format: "yyyy-mm-dd",
autoclose: true,
orientation: "bottom",
todayHighlight: true,
});
/**
* @function
* @name getCSRFtoken
* @description This is used to get CSRF token for ajax call
**/
function getCSRFtoken() {
$.ajaxSetup({
headers: {
"X-CSRF-TOKEN": jQuery('meta[name="csrf-token"]').attr("content"),
},
});
}
/**
* @function
* @name removeSingleQuotes
* @description Removes single quotes from string
*
**/
function removeSingleQuotes($value) {
var outputStr = $value.replace(/'/g, "");
return outputStr;
}
/**
* @function
* @name errorMsg
* @description Display error messages on toast
**/
function errorMsg() {
$(".alert").show();
setTimeout(function() {
$(".alert").hide();
}, 2000);
}
/**
* @function
* @name loaderFadeOut
* @description Disable loader
**/
function loaderFadeOut() {
setTimeout(function() {
$("#overlay").fadeOut(300);
}, 500);
}
/**
* @function
* @name restrictUsage
* @description restrict blocks
**/
function restrictUsage() {
$("#dateInput").css("display", "none");
$("#tbdata").hide();
$("#vcdata").empty();
$("#ajax-alert").css("display", "block");
}
/**
* @function
* @name onError
* @description Append dynamic error messages based on the API call
**/
function onError(data) {
loaderFadeOut();
var validateText = "Either API Key / Date is empty";
if (data.responseText == "") {
data.responseText = validateText;
}
$("#ajax-alert").html(data.responseText);
restrictUsage();
errorMsg();
}
/**
* @function
* @name secondsToDhms
* @description Converts DateTime to Days, Hours, Mins, Secs
*
**/
function secondsToDhms(value) {
var lastUpdate = "";
var d = new Date() - new Date(value);
var date = new Date(d);
var seconds = date.getTime() / 1000; //1440516958
seconds = Number(seconds);
var d = Math.floor(seconds / (3600 * 24));
var h = Math.floor((seconds % (3600 * 24)) / 3600);
var m = Math.floor((seconds % 3600) / 60);
var s = Math.floor(seconds % 60);
var dDisplay = d > 0 ? d + (d == 1 ? " day ago " : " days ago ") : "";
var hDisplay = h > 0 ? h + (h == 1 ? "h ago " : "h ago ") : "";
var mDisplay = m > 0 ? m + (m == 1 ? " min ago " : " mins ago ") : "";
var sDisplay = s > 0 ? s + (s == 1 ? " sec ago" : " sec ago") : "";
if (dDisplay != "") {
lastUpdate = dDisplay;
} else if (hDisplay != "") {
lastUpdate = "over " + hDisplay;
} else if (mDisplay != "") {
lastUpdate = mDisplay;
} else {
lastUpdate = sDisplay;
}
return lastUpdate;
}
/*
|--------------------------------------------------------------------------
| Map Functions
|--------------------------------------------------------------------------
*/
jQuery(document).ready(function($) {
//Ajax call for Loading Last vechile data
$("#go").click(function(e) {
getCSRFtoken();
e.preventDefault();
var formData = {
apiKey: $("#apiKey").val(),
};
var type = "POST";
var ajaxurl = "vehicles";
$.ajax({
type: type,
url: ajaxurl,
data: formData,
dataType: "json",
success: function(data) {
$("#tbdata").find("tbody").remove();
$("#tbdata").show();
plotMarkers(data);
},
error: function(data) {
onError(data);
},
}).done(function() {
loaderFadeOut();
});
});
//Select Vehicle Data for routes navigated
$(".vehicleDataTable").on("click", "tr", function() {
var currentRow = $(this).closest("tr");
var getObjectID = currentRow.find(".objectID").html();
objectID = getObjectID;
$("#dateInput").css("display", "block");
$(".selected").removeClass("selected");
$(this).addClass("selected");
});
});
/**
* @function
* @name plotMarkers
* @description This is used to plot multiple markers based on Lat/Lng
**/
function plotMarkers(data) {
var i = 0;
var tableData = [];
$.each(data, function() {
var speed = data[i].speed == null ? "0 km/h" : data[i].speed + " km/h";
tableData = {
objectName: "'" + data[i].objectName + "'",
speed: "'" + speed + "'",
lastEngineOnTime: "'" + secondsToDhms(data[i].lastEngineOnTime) + "'",
objectId: "'" + data[i].objectId + "'",
};
$("<tr>", { html: formatItem(tableData) }).appendTo($("#tbdata"));
addInfoBubble(data[i].latitude, data[i].longitude, data[i].address);
i++;
});
$("#noData").hide();
}
//Ajax call for vehicles navigated on selected date
$("#selectedDate").click(function(e) {
getCSRFtoken();
e.preventDefault();
var formData = {
date: $("#date").val(),
objectID: objectID,
apiKey: $("#apiKey").val(),
};
var type = "POST";
var ajaxurl = "directions";
$.ajax({
type: type,
url: ajaxurl,
data: formData,
dataType: "json",
success: function(data) {
var t = loopData(data);
navigateToRoute(t);
},
error: function(data) {
onError(data);
},
}).done(function() {
loaderFadeOut();
});
});
/**
* @function
* @name loopData
* @description Retrieves Latitude, Longitude and stops from API
* [Remove Duplicates Lat/ Lng]
**/
function loopData($array) {
$array.forEach(function(item, index) {
storeLatLong.push({ lat: item.Latitude, lng: item.Longitude });
});
const totalStops = $array.filter((a) => a.EngineStatus == null).length;
var filtered = storeLatLong.reduce((filtered, item) => {
if (!filtered.some(
(filteredItem) =>
JSON.stringify(filteredItem) == JSON.stringify(item)
))
filtered.push(item);
return filtered;
}, []);
let $combine = { latlng: storeLatLong, stops: totalStops };
return $combine;
}
/**
* @function
* @name navigateToRoute
* @description Append vehicles Data based on the route travelled
*
**/
function navigateToRoute(n) {
map.removeObjects(map.getObjects());
addPolylineToMap(map, n.latlng);
$("#vcdata").empty();
$("#vcdata").append(
'<tr class="child"><td>Total Distance</td><td>' +
totalDistance +
"</td></tr><tr><td>Number of stops</td><td>" +
n.stops +
"</td></tr><tr><td>Shortest possible distance</td><td>" +
totalDistance +
"</td></tr>"
);
}
/**
* @function
* @name formatItem
* @description Format vehicles List Data | **/
function formatItem(item) {
return (
"<td>" +
removeSingleQuotes(item.objectName) +
"</td> <td> " +
removeSingleQuotes(item.speed) +
" </td> <td> " +
removeSingleQuotes(item.lastEngineOnTime) +
' </td> <td class="objectID" style="display:none"> ' +
removeSingleQuotes(item.objectId) +
" </td>"
);
}
function calculateShortestRoute() {
const platform = configPlatform();
var routingService = platform.getRoutingService();
var routingParameters = {
mode: "fastest;car",
waypoint0: "geo!50.1120423728813,8.68340740740811",
waypoint1: "geo!52.5309916298853,13.3846220493377",
representation: "display",
};
routingService.calculateRoute(routingParameters, (success) => {
console.log(success);
});
}
//################################## @begins Place Markers based on Lat/ Lng ######################################################//
/**
* Creates a new marker and adds it to a group
* @param {H.map.Group} group The group holding the new marker
* @param {H.geo.Point} coordinate The location of the marker
* @param {String} html Data associated with the marker
*/
function addMarkerToGroup(group, coordinate, html) {
var marker = new H.map.Marker(coordinate);
// add custom data to the marker
marker.setData(html);
group.addObject(marker);
}
/**
* Add two markers showing the position of given Lat / Lng.
* Clicking on a marker opens an infobubble which holds HTML content related to the marker.
* @param {H.Map} map A HERE Map instance within the application
*/
function addInfoBubble(vehicleLatitude, vehicleLongitude, vehicleAddress) {
var group = new H.map.Group();
map.addObject(group);
// add 'tap' event listener, that opens info bubble, to the group
group.addEventListener(
"tap",
function(evt) {
// event target is the marker itself, group is a parent event target
// for all objects that it contains
var bubble = new H.ui.InfoBubble(evt.target.getGeometry(), {
// read custom data
content: evt.target.getData(),
});
// show info bubble
ui.addBubble(bubble);
},
false
);
addMarkerToGroup(
group, { lat: vehicleLatitude, lng: vehicleLongitude },
"<div>" + vehicleAddress + "</div>"
);
}
// initialize communication with the platform
var platform = new H.service.Platform({
apikey: mapAPI,
});
var defaultLayers = platform.createDefaultLayers();
// initialize a map - this map is centered over Europe
var map = new H.Map(
document.getElementById("map"),
defaultLayers.vector.normal.map, {
center: { lat: 58.5953, lng: 25.0136 },
zoom: 7,
pixelRatio: window.devicePixelRatio || 1,
}
);
// add a resize listener to make sure that the map occupies the whole container
window.addEventListener("resize", () => map.getViewPort().resize());
// MapEvents enables the event system
// Behavior implements default interactions for pan/zoom (also on mobile touch environments)
var behavior = new H.mapevents.Behavior(new H.mapevents.MapEvents(map));
// create default UI with layers provided by the platform
var ui = H.ui.UI.createDefault(map, defaultLayers);
// Now use the map as required...
addInfoBubble(map);
//##################################@begins Navigate Lat / Lng ###############################################################//
/**
* Adds a polyline between given Lat / Lng to the map
*
* @param {H.Map} map A HERE Map instance within the application
*/
function addPolylineToMap(map, ts) {
var lineString = new H.geo.LineString();
ts.forEach(function(t) {
lineString.pushPoint(t);
});
// Initialize a polyline with the linestring:
var polyline = new H.map.Polyline(lineString, {
style: { lineWidth: 4, strokeColor: "blue" },
});
// Add the polyline to the map:
map.addObject(polyline);
// Zoom the map to fit the rectangle:
map.getViewModel().setLookAtData({ bounds: polyline.getBoundingBox() });
var distance = getPolylineLength(polyline);
var km = distance / 1000;
totalDistance = km.toFixed(1) + " km";
}
/**
* @function
* @name getPolylineLength
* @description Get distance for the selected route
*
**/
function getPolylineLength(polyline) {
const geometry = polyline.getGeometry();
let distance = 0;
let last = geometry.extractPoint(0);
for (let i = 1; i < geometry.getPointCount(); i++) {
const point = geometry.extractPoint(i);
distance += last.distance(point);
last = point;
}
if (polyline.isClosed()) {
distance += last.distance(geometry.extractPoint(0));
}
// distance in meters
return distance;
}
// initialize communication with the platform
var platform = new H.service.Platform({
apikey: mapAPI,
});
var defaultLayers = platform.createDefaultLayers();
// add a resize listener to make sure that the map occupies the whole container
window.addEventListener("resize", () => map.getViewPort().resize());
// MapEvents enables the event system
// Behavior implements default interactions for pan/zoom (also on mobile touch environments)
var behavior = new H.mapevents.Behavior(new H.mapevents.MapEvents(map));
// Create the default UI components
var ui = H.ui.UI.createDefault(map, defaultLayers);
// Now use the map as required...
addPolylineToMap(map); | * | random_line_split |
app.js | /**
*
*Filename : app.js
*@description This JS file has functions for mapping event
* Author : Logesh
**/
/*
|--------------------------------------------------------------------------
| Declare global variables
|--------------------------------------------------------------------------
*/
var objectID = "";
var totalDistance = "";
var storeLatLong = [];
let latLong = {};
/*
|--------------------------------------------------------------------------
| API KEY
|--------------------------------------------------------------------------
*/
const mapAPI = "Gamk6viBYo5tDnM2VN-lrZ4GHnI87z-k_1ipBiMWMec";
/*
|--------------------------------------------------------------------------
| Declare global Functions
|--------------------------------------------------------------------------
*/
//Loader for request & response
$(document).ajaxSend(function() {
$("#overlay").fadeIn(300);
});
//Bootstrap DatePicker
$(".date").datepicker({
format: "yyyy-mm-dd",
autoclose: true,
orientation: "bottom",
todayHighlight: true,
});
/**
* @function
* @name getCSRFtoken
* @description This is used to get CSRF token for ajax call
**/
function getCSRFtoken() {
$.ajaxSetup({
headers: {
"X-CSRF-TOKEN": jQuery('meta[name="csrf-token"]').attr("content"),
},
});
}
/**
* @function
* @name removeSingleQuotes
* @description Removes single quotes from string
*
**/
function removeSingleQuotes($value) {
var outputStr = $value.replace(/'/g, "");
return outputStr;
}
/**
* @function
* @name errorMsg
* @description Display error messages on toast
**/
function errorMsg() {
$(".alert").show();
setTimeout(function() {
$(".alert").hide();
}, 2000);
}
/**
* @function
* @name loaderFadeOut
* @description Disable loader
**/
function loaderFadeOut() {
setTimeout(function() {
$("#overlay").fadeOut(300);
}, 500);
}
/**
* @function
* @name restrictUsage
* @description restrict blocks
**/
function restrictUsage() |
/**
* @function
* @name onError
* @description Append dynamic error messages based on the API call
**/
function onError(data) {
loaderFadeOut();
var validateText = "Either API Key / Date is empty";
if (data.responseText == "") {
data.responseText = validateText;
}
$("#ajax-alert").html(data.responseText);
restrictUsage();
errorMsg();
}
/**
* @function
* @name secondsToDhms
* @description Converts DateTime to Days, Hours, Mins, Secs
*
**/
function secondsToDhms(value) {
var lastUpdate = "";
var d = new Date() - new Date(value);
var date = new Date(d);
var seconds = date.getTime() / 1000; //1440516958
seconds = Number(seconds);
var d = Math.floor(seconds / (3600 * 24));
var h = Math.floor((seconds % (3600 * 24)) / 3600);
var m = Math.floor((seconds % 3600) / 60);
var s = Math.floor(seconds % 60);
var dDisplay = d > 0 ? d + (d == 1 ? " day ago " : " days ago ") : "";
var hDisplay = h > 0 ? h + (h == 1 ? "h ago " : "h ago ") : "";
var mDisplay = m > 0 ? m + (m == 1 ? " min ago " : " mins ago ") : "";
var sDisplay = s > 0 ? s + (s == 1 ? " sec ago" : " sec ago") : "";
if (dDisplay != "") {
lastUpdate = dDisplay;
} else if (hDisplay != "") {
lastUpdate = "over " + hDisplay;
} else if (mDisplay != "") {
lastUpdate = mDisplay;
} else {
lastUpdate = sDisplay;
}
return lastUpdate;
}
/*
|--------------------------------------------------------------------------
| Map Functions
|--------------------------------------------------------------------------
*/
jQuery(document).ready(function($) {
//Ajax call for Loading Last vechile data
$("#go").click(function(e) {
getCSRFtoken();
e.preventDefault();
var formData = {
apiKey: $("#apiKey").val(),
};
var type = "POST";
var ajaxurl = "vehicles";
$.ajax({
type: type,
url: ajaxurl,
data: formData,
dataType: "json",
success: function(data) {
$("#tbdata").find("tbody").remove();
$("#tbdata").show();
plotMarkers(data);
},
error: function(data) {
onError(data);
},
}).done(function() {
loaderFadeOut();
});
});
//Select Vehicle Data for routes navigated
$(".vehicleDataTable").on("click", "tr", function() {
var currentRow = $(this).closest("tr");
var getObjectID = currentRow.find(".objectID").html();
objectID = getObjectID;
$("#dateInput").css("display", "block");
$(".selected").removeClass("selected");
$(this).addClass("selected");
});
});
/**
* @function
* @name plotMarkers
* @description This is used to plot multiple markers based on Lat/Lng
**/
function plotMarkers(data) {
var i = 0;
var tableData = [];
$.each(data, function() {
var speed = data[i].speed == null ? "0 km/h" : data[i].speed + " km/h";
tableData = {
objectName: "'" + data[i].objectName + "'",
speed: "'" + speed + "'",
lastEngineOnTime: "'" + secondsToDhms(data[i].lastEngineOnTime) + "'",
objectId: "'" + data[i].objectId + "'",
};
$("<tr>", { html: formatItem(tableData) }).appendTo($("#tbdata"));
addInfoBubble(data[i].latitude, data[i].longitude, data[i].address);
i++;
});
$("#noData").hide();
}
//Ajax call for vehicles navigated on selected date
$("#selectedDate").click(function(e) {
getCSRFtoken();
e.preventDefault();
var formData = {
date: $("#date").val(),
objectID: objectID,
apiKey: $("#apiKey").val(),
};
var type = "POST";
var ajaxurl = "directions";
$.ajax({
type: type,
url: ajaxurl,
data: formData,
dataType: "json",
success: function(data) {
var t = loopData(data);
navigateToRoute(t);
},
error: function(data) {
onError(data);
},
}).done(function() {
loaderFadeOut();
});
});
/**
* @function
* @name loopData
* @description Retrieves Latitude, Longitude and stops from API
* [Remove Duplicates Lat/ Lng]
**/
function loopData($array) {
$array.forEach(function(item, index) {
storeLatLong.push({ lat: item.Latitude, lng: item.Longitude });
});
const totalStops = $array.filter((a) => a.EngineStatus == null).length;
var filtered = storeLatLong.reduce((filtered, item) => {
if (!filtered.some(
(filteredItem) =>
JSON.stringify(filteredItem) == JSON.stringify(item)
))
filtered.push(item);
return filtered;
}, []);
let $combine = { latlng: storeLatLong, stops: totalStops };
return $combine;
}
/**
* @function
* @name navigateToRoute
* @description Append vehicles Data based on the route travelled
*
**/
function navigateToRoute(n) {
map.removeObjects(map.getObjects());
addPolylineToMap(map, n.latlng);
$("#vcdata").empty();
$("#vcdata").append(
'<tr class="child"><td>Total Distance</td><td>' +
totalDistance +
"</td></tr><tr><td>Number of stops</td><td>" +
n.stops +
"</td></tr><tr><td>Shortest possible distance</td><td>" +
totalDistance +
"</td></tr>"
);
}
/**
* @function
* @name formatItem
* @description Format vehicles List Data
*
**/
function formatItem(item) {
return (
"<td>" +
removeSingleQuotes(item.objectName) +
"</td> <td> " +
removeSingleQuotes(item.speed) +
" </td> <td> " +
removeSingleQuotes(item.lastEngineOnTime) +
' </td> <td class="objectID" style="display:none"> ' +
removeSingleQuotes(item.objectId) +
" </td>"
);
}
function calculateShortestRoute() {
const platform = configPlatform();
var routingService = platform.getRoutingService();
var routingParameters = {
mode: "fastest;car",
waypoint0: "geo!50.1120423728813,8.68340740740811",
waypoint1: "geo!52.5309916298853,13.3846220493377",
representation: "display",
};
routingService.calculateRoute(routingParameters, (success) => {
console.log(success);
});
}
//################################## @begins Place Markers based on Lat/ Lng ######################################################//
/**
* Creates a new marker and adds it to a group
* @param {H.map.Group} group The group holding the new marker
* @param {H.geo.Point} coordinate The location of the marker
* @param {String} html Data associated with the marker
*/
function addMarkerToGroup(group, coordinate, html) {
var marker = new H.map.Marker(coordinate);
// add custom data to the marker
marker.setData(html);
group.addObject(marker);
}
/**
* Add two markers showing the position of given Lat / Lng.
* Clicking on a marker opens an infobubble which holds HTML content related to the marker.
* @param {H.Map} map A HERE Map instance within the application
*/
function addInfoBubble(vehicleLatitude, vehicleLongitude, vehicleAddress) {
var group = new H.map.Group();
map.addObject(group);
// add 'tap' event listener, that opens info bubble, to the group
group.addEventListener(
"tap",
function(evt) {
// event target is the marker itself, group is a parent event target
// for all objects that it contains
var bubble = new H.ui.InfoBubble(evt.target.getGeometry(), {
// read custom data
content: evt.target.getData(),
});
// show info bubble
ui.addBubble(bubble);
},
false
);
addMarkerToGroup(
group, { lat: vehicleLatitude, lng: vehicleLongitude },
"<div>" + vehicleAddress + "</div>"
);
}
// initialize communication with the platform
var platform = new H.service.Platform({
apikey: mapAPI,
});
var defaultLayers = platform.createDefaultLayers();
// initialize a map - this map is centered over Europe
var map = new H.Map(
document.getElementById("map"),
defaultLayers.vector.normal.map, {
center: { lat: 58.5953, lng: 25.0136 },
zoom: 7,
pixelRatio: window.devicePixelRatio || 1,
}
);
// add a resize listener to make sure that the map occupies the whole container
window.addEventListener("resize", () => map.getViewPort().resize());
// MapEvents enables the event system
// Behavior implements default interactions for pan/zoom (also on mobile touch environments)
var behavior = new H.mapevents.Behavior(new H.mapevents.MapEvents(map));
// create default UI with layers provided by the platform
var ui = H.ui.UI.createDefault(map, defaultLayers);
// Now use the map as required...
addInfoBubble(map);
//##################################@begins Navigate Lat / Lng ###############################################################//
/**
* Adds a polyline between given Lat / Lng to the map
*
* @param {H.Map} map A HERE Map instance within the application
*/
function addPolylineToMap(map, ts) {
var lineString = new H.geo.LineString();
ts.forEach(function(t) {
lineString.pushPoint(t);
});
// Initialize a polyline with the linestring:
var polyline = new H.map.Polyline(lineString, {
style: { lineWidth: 4, strokeColor: "blue" },
});
// Add the polyline to the map:
map.addObject(polyline);
// Zoom the map to fit the rectangle:
map.getViewModel().setLookAtData({ bounds: polyline.getBoundingBox() });
var distance = getPolylineLength(polyline);
var km = distance / 1000;
totalDistance = km.toFixed(1) + " km";
}
/**
* @function
* @name getPolylineLength
* @description Get distance for the selected route
*
**/
function getPolylineLength(polyline) {
const geometry = polyline.getGeometry();
let distance = 0;
let last = geometry.extractPoint(0);
for (let i = 1; i < geometry.getPointCount(); i++) {
const point = geometry.extractPoint(i);
distance += last.distance(point);
last = point;
}
if (polyline.isClosed()) {
distance += last.distance(geometry.extractPoint(0));
}
// distance in meters
return distance;
}
// initialize communication with the platform
var platform = new H.service.Platform({
apikey: mapAPI,
});
var defaultLayers = platform.createDefaultLayers();
// add a resize listener to make sure that the map occupies the whole container
window.addEventListener("resize", () => map.getViewPort().resize());
// MapEvents enables the event system
// Behavior implements default interactions for pan/zoom (also on mobile touch environments)
var behavior = new H.mapevents.Behavior(new H.mapevents.MapEvents(map));
// Create the default UI components
var ui = H.ui.UI.createDefault(map, defaultLayers);
// Now use the map as required...
addPolylineToMap(map); | {
$("#dateInput").css("display", "none");
$("#tbdata").hide();
$("#vcdata").empty();
$("#ajax-alert").css("display", "block");
} | identifier_body |
app.js | /**
*
*Filename : app.js
*@description This JS file has functions for mapping event
* Author : Logesh
**/
/*
|--------------------------------------------------------------------------
| Declare global variables
|--------------------------------------------------------------------------
*/
var objectID = "";
var totalDistance = "";
var storeLatLong = [];
let latLong = {};
/*
|--------------------------------------------------------------------------
| API KEY
|--------------------------------------------------------------------------
*/
const mapAPI = "Gamk6viBYo5tDnM2VN-lrZ4GHnI87z-k_1ipBiMWMec";
/*
|--------------------------------------------------------------------------
| Declare global Functions
|--------------------------------------------------------------------------
*/
//Loader for request & response
$(document).ajaxSend(function() {
$("#overlay").fadeIn(300);
});
//Bootstrap DatePicker
$(".date").datepicker({
format: "yyyy-mm-dd",
autoclose: true,
orientation: "bottom",
todayHighlight: true,
});
/**
* @function
* @name getCSRFtoken
* @description This is used to get CSRF token for ajax call
**/
function getCSRFtoken() {
$.ajaxSetup({
headers: {
"X-CSRF-TOKEN": jQuery('meta[name="csrf-token"]').attr("content"),
},
});
}
/**
* @function
* @name removeSingleQuotes
* @description Removes single quotes from string
*
**/
function removeSingleQuotes($value) {
var outputStr = $value.replace(/'/g, "");
return outputStr;
}
/**
* @function
* @name errorMsg
* @description Display error messages on toast
**/
function errorMsg() {
$(".alert").show();
setTimeout(function() {
$(".alert").hide();
}, 2000);
}
/**
* @function
* @name loaderFadeOut
* @description Disable loader
**/
function loaderFadeOut() {
setTimeout(function() {
$("#overlay").fadeOut(300);
}, 500);
}
/**
* @function
* @name restrictUsage
* @description restrict blocks
**/
function restrictUsage() {
$("#dateInput").css("display", "none");
$("#tbdata").hide();
$("#vcdata").empty();
$("#ajax-alert").css("display", "block");
}
/**
* @function
* @name onError
* @description Append dynamic error messages based on the API call
**/
function onError(data) {
loaderFadeOut();
var validateText = "Either API Key / Date is empty";
if (data.responseText == "") {
data.responseText = validateText;
}
$("#ajax-alert").html(data.responseText);
restrictUsage();
errorMsg();
}
/**
* @function
* @name secondsToDhms
* @description Converts DateTime to Days, Hours, Mins, Secs
*
**/
function secondsToDhms(value) {
var lastUpdate = "";
var d = new Date() - new Date(value);
var date = new Date(d);
var seconds = date.getTime() / 1000; //1440516958
seconds = Number(seconds);
var d = Math.floor(seconds / (3600 * 24));
var h = Math.floor((seconds % (3600 * 24)) / 3600);
var m = Math.floor((seconds % 3600) / 60);
var s = Math.floor(seconds % 60);
var dDisplay = d > 0 ? d + (d == 1 ? " day ago " : " days ago ") : "";
var hDisplay = h > 0 ? h + (h == 1 ? "h ago " : "h ago ") : "";
var mDisplay = m > 0 ? m + (m == 1 ? " min ago " : " mins ago ") : "";
var sDisplay = s > 0 ? s + (s == 1 ? " sec ago" : " sec ago") : "";
if (dDisplay != "") {
lastUpdate = dDisplay;
} else if (hDisplay != "") {
lastUpdate = "over " + hDisplay;
} else if (mDisplay != "") {
lastUpdate = mDisplay;
} else {
lastUpdate = sDisplay;
}
return lastUpdate;
}
/*
|--------------------------------------------------------------------------
| Map Functions
|--------------------------------------------------------------------------
*/
jQuery(document).ready(function($) {
//Ajax call for Loading Last vechile data
$("#go").click(function(e) {
getCSRFtoken();
e.preventDefault();
var formData = {
apiKey: $("#apiKey").val(),
};
var type = "POST";
var ajaxurl = "vehicles";
$.ajax({
type: type,
url: ajaxurl,
data: formData,
dataType: "json",
success: function(data) {
$("#tbdata").find("tbody").remove();
$("#tbdata").show();
plotMarkers(data);
},
error: function(data) {
onError(data);
},
}).done(function() {
loaderFadeOut();
});
});
//Select Vehicle Data for routes navigated
$(".vehicleDataTable").on("click", "tr", function() {
var currentRow = $(this).closest("tr");
var getObjectID = currentRow.find(".objectID").html();
objectID = getObjectID;
$("#dateInput").css("display", "block");
$(".selected").removeClass("selected");
$(this).addClass("selected");
});
});
/**
* @function
* @name plotMarkers
* @description This is used to plot multiple markers based on Lat/Lng
**/
function plotMarkers(data) {
var i = 0;
var tableData = [];
$.each(data, function() {
var speed = data[i].speed == null ? "0 km/h" : data[i].speed + " km/h";
tableData = {
objectName: "'" + data[i].objectName + "'",
speed: "'" + speed + "'",
lastEngineOnTime: "'" + secondsToDhms(data[i].lastEngineOnTime) + "'",
objectId: "'" + data[i].objectId + "'",
};
$("<tr>", { html: formatItem(tableData) }).appendTo($("#tbdata"));
addInfoBubble(data[i].latitude, data[i].longitude, data[i].address);
i++;
});
$("#noData").hide();
}
//Ajax call for vehicles navigated on selected date
$("#selectedDate").click(function(e) {
getCSRFtoken();
e.preventDefault();
var formData = {
date: $("#date").val(),
objectID: objectID,
apiKey: $("#apiKey").val(),
};
var type = "POST";
var ajaxurl = "directions";
$.ajax({
type: type,
url: ajaxurl,
data: formData,
dataType: "json",
success: function(data) {
var t = loopData(data);
navigateToRoute(t);
},
error: function(data) {
onError(data);
},
}).done(function() {
loaderFadeOut();
});
});
/**
* @function
* @name loopData
* @description Retrieves Latitude, Longitude and stops from API
* [Remove Duplicates Lat/ Lng]
**/
function loopData($array) {
$array.forEach(function(item, index) {
storeLatLong.push({ lat: item.Latitude, lng: item.Longitude });
});
const totalStops = $array.filter((a) => a.EngineStatus == null).length;
var filtered = storeLatLong.reduce((filtered, item) => {
if (!filtered.some(
(filteredItem) =>
JSON.stringify(filteredItem) == JSON.stringify(item)
))
filtered.push(item);
return filtered;
}, []);
let $combine = { latlng: storeLatLong, stops: totalStops };
return $combine;
}
/**
* @function
* @name navigateToRoute
* @description Append vehicles Data based on the route travelled
*
**/
function navigateToRoute(n) {
map.removeObjects(map.getObjects());
addPolylineToMap(map, n.latlng);
$("#vcdata").empty();
$("#vcdata").append(
'<tr class="child"><td>Total Distance</td><td>' +
totalDistance +
"</td></tr><tr><td>Number of stops</td><td>" +
n.stops +
"</td></tr><tr><td>Shortest possible distance</td><td>" +
totalDistance +
"</td></tr>"
);
}
/**
* @function
* @name formatItem
* @description Format vehicles List Data
*
**/
function formatItem(item) {
return (
"<td>" +
removeSingleQuotes(item.objectName) +
"</td> <td> " +
removeSingleQuotes(item.speed) +
" </td> <td> " +
removeSingleQuotes(item.lastEngineOnTime) +
' </td> <td class="objectID" style="display:none"> ' +
removeSingleQuotes(item.objectId) +
" </td>"
);
}
function calculateShortestRoute() {
const platform = configPlatform();
var routingService = platform.getRoutingService();
var routingParameters = {
mode: "fastest;car",
waypoint0: "geo!50.1120423728813,8.68340740740811",
waypoint1: "geo!52.5309916298853,13.3846220493377",
representation: "display",
};
routingService.calculateRoute(routingParameters, (success) => {
console.log(success);
});
}
//################################## @begins Place Markers based on Lat/ Lng ######################################################//
/**
* Creates a new marker and adds it to a group
* @param {H.map.Group} group The group holding the new marker
* @param {H.geo.Point} coordinate The location of the marker
* @param {String} html Data associated with the marker
*/
function addMarkerToGroup(group, coordinate, html) {
var marker = new H.map.Marker(coordinate);
// add custom data to the marker
marker.setData(html);
group.addObject(marker);
}
/**
* Add two markers showing the position of given Lat / Lng.
* Clicking on a marker opens an infobubble which holds HTML content related to the marker.
* @param {H.Map} map A HERE Map instance within the application
*/
function addInfoBubble(vehicleLatitude, vehicleLongitude, vehicleAddress) {
var group = new H.map.Group();
map.addObject(group);
// add 'tap' event listener, that opens info bubble, to the group
group.addEventListener(
"tap",
function(evt) {
// event target is the marker itself, group is a parent event target
// for all objects that it contains
var bubble = new H.ui.InfoBubble(evt.target.getGeometry(), {
// read custom data
content: evt.target.getData(),
});
// show info bubble
ui.addBubble(bubble);
},
false
);
addMarkerToGroup(
group, { lat: vehicleLatitude, lng: vehicleLongitude },
"<div>" + vehicleAddress + "</div>"
);
}
// initialize communication with the platform
var platform = new H.service.Platform({
apikey: mapAPI,
});
var defaultLayers = platform.createDefaultLayers();
// initialize a map - this map is centered over Europe
var map = new H.Map(
document.getElementById("map"),
defaultLayers.vector.normal.map, {
center: { lat: 58.5953, lng: 25.0136 },
zoom: 7,
pixelRatio: window.devicePixelRatio || 1,
}
);
// add a resize listener to make sure that the map occupies the whole container
window.addEventListener("resize", () => map.getViewPort().resize());
// MapEvents enables the event system
// Behavior implements default interactions for pan/zoom (also on mobile touch environments)
var behavior = new H.mapevents.Behavior(new H.mapevents.MapEvents(map));
// create default UI with layers provided by the platform
var ui = H.ui.UI.createDefault(map, defaultLayers);
// Now use the map as required...
addInfoBubble(map);
//##################################@begins Navigate Lat / Lng ###############################################################//
/**
* Adds a polyline between given Lat / Lng to the map
*
* @param {H.Map} map A HERE Map instance within the application
*/
function | (map, ts) {
var lineString = new H.geo.LineString();
ts.forEach(function(t) {
lineString.pushPoint(t);
});
// Initialize a polyline with the linestring:
var polyline = new H.map.Polyline(lineString, {
style: { lineWidth: 4, strokeColor: "blue" },
});
// Add the polyline to the map:
map.addObject(polyline);
// Zoom the map to fit the rectangle:
map.getViewModel().setLookAtData({ bounds: polyline.getBoundingBox() });
var distance = getPolylineLength(polyline);
var km = distance / 1000;
totalDistance = km.toFixed(1) + " km";
}
/**
* @function
* @name getPolylineLength
* @description Get distance for the selected route
*
**/
function getPolylineLength(polyline) {
const geometry = polyline.getGeometry();
let distance = 0;
let last = geometry.extractPoint(0);
for (let i = 1; i < geometry.getPointCount(); i++) {
const point = geometry.extractPoint(i);
distance += last.distance(point);
last = point;
}
if (polyline.isClosed()) {
distance += last.distance(geometry.extractPoint(0));
}
// distance in meters
return distance;
}
// initialize communication with the platform
var platform = new H.service.Platform({
apikey: mapAPI,
});
var defaultLayers = platform.createDefaultLayers();
// add a resize listener to make sure that the map occupies the whole container
window.addEventListener("resize", () => map.getViewPort().resize());
// MapEvents enables the event system
// Behavior implements default interactions for pan/zoom (also on mobile touch environments)
var behavior = new H.mapevents.Behavior(new H.mapevents.MapEvents(map));
// Create the default UI components
var ui = H.ui.UI.createDefault(map, defaultLayers);
// Now use the map as required...
addPolylineToMap(map); | addPolylineToMap | identifier_name |
controller_commitstatus.go | package controller
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/jenkins-x/jx/v2/pkg/cmd/helper"
"github.com/jenkins-x/jx/v2/pkg/kube/naming"
"github.com/jenkins-x/jx/v2/pkg/cmd/opts"
"github.com/jenkins-x/jx/v2/pkg/prow/config"
"github.com/jenkins-x/jx/v2/pkg/gits"
"github.com/jenkins-x/jx/v2/pkg/prow"
"k8s.io/client-go/kubernetes"
"github.com/jenkins-x/jx/v2/pkg/extensions"
"github.com/pkg/errors"
"github.com/jenkins-x/jx/v2/pkg/builds"
corev1 "k8s.io/api/core/v1"
jenkinsv1client "github.com/jenkins-x/jx-api/pkg/client/clientset/versioned"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/tools/cache"
"github.com/jenkins-x/jx-logging/pkg/log"
jenkinsv1 "github.com/jenkins-x/jx-api/pkg/apis/jenkins.io/v1"
"github.com/jenkins-x/jx/v2/pkg/kube"
"github.com/spf13/cobra"
)
// ControllerCommitStatusOptions the options for the controller
type ControllerCommitStatusOptions struct {
ControllerOptions
}
// NewCmdControllerCommitStatus creates a command object for the "create" command
func NewCmdControllerCommitStatus(commonOpts *opts.CommonOptions) *cobra.Command {
options := &ControllerCommitStatusOptions{
ControllerOptions: ControllerOptions{
CommonOptions: commonOpts,
},
}
cmd := &cobra.Command{
Use: "commitstatus",
Short: "Updates commit status",
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
helper.CheckErr(err)
},
}
return cmd
}
// Run implements this command
func (o *ControllerCommitStatusOptions) Run() error {
// Always run in batch mode as a controller is never run interactively
o.BatchMode = true
jxClient, ns, err := o.JXClientAndDevNamespace()
if err != nil {
return err
}
kubeClient, _, err := o.KubeClientAndDevNamespace()
if err != nil {
return err
}
apisClient, err := o.ApiExtensionsClient()
if err != nil {
return err
}
err = kube.RegisterCommitStatusCRD(apisClient)
if err != nil {
return err
}
err = kube.RegisterPipelineActivityCRD(apisClient)
if err != nil {
return err
}
commitstatusListWatch := cache.NewListWatchFromClient(jxClient.JenkinsV1().RESTClient(), "commitstatuses", ns, fields.Everything())
kube.SortListWatchByName(commitstatusListWatch)
_, commitstatusController := cache.NewInformer(
commitstatusListWatch,
&jenkinsv1.CommitStatus{},
time.Minute*10,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
o.onCommitStatusObj(obj, jxClient, ns)
},
UpdateFunc: func(oldObj, newObj interface{}) {
o.onCommitStatusObj(newObj, jxClient, ns)
},
DeleteFunc: func(obj interface{}) {
},
},
)
stop := make(chan struct{})
go commitstatusController.Run(stop)
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", ns, fields.Everything())
kube.SortListWatchByName(podListWatch)
_, podWatch := cache.NewInformer(
podListWatch,
&corev1.Pod{},
time.Minute*10,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
o.onPodObj(obj, jxClient, kubeClient, ns)
},
UpdateFunc: func(oldObj, newObj interface{}) {
o.onPodObj(newObj, jxClient, kubeClient, ns)
},
DeleteFunc: func(obj interface{}) {
},
},
)
stop = make(chan struct{})
podWatch.Run(stop)
if err != nil {
return err
}
return nil
}
func (o *ControllerCommitStatusOptions) onCommitStatusObj(obj interface{}, jxClient jenkinsv1client.Interface, ns string) {
check, ok := obj.(*jenkinsv1.CommitStatus)
if !ok {
log.Logger().Fatalf("commit status controller: unexpected type %v", obj)
} else {
err := o.onCommitStatus(check, jxClient, ns)
if err != nil {
log.Logger().Fatalf("commit status controller: %v", err)
}
}
}
func (o *ControllerCommitStatusOptions) onCommitStatus(check *jenkinsv1.CommitStatus, jxClient jenkinsv1client.Interface, ns string) error {
groupedBySha := make(map[string][]jenkinsv1.CommitStatusDetails, 0)
for _, v := range check.Spec.Items {
if _, ok := groupedBySha[v.Commit.SHA]; !ok {
groupedBySha[v.Commit.SHA] = make([]jenkinsv1.CommitStatusDetails, 0)
}
groupedBySha[v.Commit.SHA] = append(groupedBySha[v.Commit.SHA], v)
}
for _, vs := range groupedBySha {
var last jenkinsv1.CommitStatusDetails
for _, v := range vs {
lastBuildNumber, err := strconv.Atoi(getBuildNumber(last.PipelineActivity.Name))
if err != nil {
return err
}
buildNumber, err := strconv.Atoi(getBuildNumber(v.PipelineActivity.Name))
if err != nil {
return err
}
if lastBuildNumber < buildNumber {
last = v
}
}
err := o.update(&last, jxClient, ns)
if err != nil {
gitProvider, gitRepoInfo, err1 := o.getGitProvider(last.Commit.GitURL)
if err1 != nil {
return err1
}
_, err1 = extensions.NotifyCommitStatus(last.Commit, "error", "", "Internal Error performing commit status updates", "", last.Context, gitProvider, gitRepoInfo)
if err1 != nil {
return err
}
return err
}
}
return nil
}
func (o *ControllerCommitStatusOptions) onPodObj(obj interface{}, jxClient jenkinsv1client.Interface, kubeClient kubernetes.Interface, ns string) {
check, ok := obj.(*corev1.Pod)
if !ok {
log.Logger().Fatalf("pod watcher: unexpected type %v", obj)
} else {
err := o.onPod(check, jxClient, kubeClient, ns)
if err != nil {
log.Logger().Fatalf("pod watcher: %v", err)
}
}
}
func (o *ControllerCommitStatusOptions) onPod(pod *corev1.Pod, jxClient jenkinsv1client.Interface, kubeClient kubernetes.Interface, ns string) error |
func (o *ControllerCommitStatusOptions) UpsertCommitStatusCheck(name string, pipelineActName string, url string, sha string, pullRequest string, context string, phase corev1.PodPhase, jxClient jenkinsv1client.Interface, ns string) error {
if name != "" {
status, err := jxClient.JenkinsV1().CommitStatuses(ns).Get(name, metav1.GetOptions{})
create := false
insert := false
actRef := jenkinsv1.ResourceReference{}
if err != nil {
create = true
} else {
log.Logger().Infof("pod watcher: commit status already exists for %s", name)
}
// Create the activity reference
act, err := jxClient.JenkinsV1().PipelineActivities(ns).Get(pipelineActName, metav1.GetOptions{})
if err == nil {
actRef.Name = act.Name
actRef.Kind = act.Kind
actRef.UID = act.UID
actRef.APIVersion = act.APIVersion
}
possibleStatusDetails := make([]int, 0)
for i, v := range status.Spec.Items {
if v.Commit.SHA == sha && v.PipelineActivity.Name == pipelineActName {
possibleStatusDetails = append(possibleStatusDetails, i)
}
}
statusDetails := jenkinsv1.CommitStatusDetails{}
log.Logger().Debugf("pod watcher: Discovered possible status details %v", possibleStatusDetails)
if len(possibleStatusDetails) == 1 {
log.Logger().Debugf("CommitStatus %s for pipeline %s already exists", name, pipelineActName)
} else if len(possibleStatusDetails) == 0 {
insert = true
} else {
return fmt.Errorf("More than %d status detail for sha %s, should 1 or 0, found %v", len(possibleStatusDetails), sha, possibleStatusDetails)
}
if create || insert {
// This is not the same pipeline activity the status was created for,
// or there is no existing status, so we make a new one
statusDetails = jenkinsv1.CommitStatusDetails{
Checked: false,
Commit: jenkinsv1.CommitStatusCommitReference{
GitURL: url,
PullRequest: pullRequest,
SHA: sha,
},
PipelineActivity: actRef,
Context: context,
}
}
if create {
log.Logger().Infof("pod watcher: Creating commit status for pipeline activity %s", pipelineActName)
status = &jenkinsv1.CommitStatus{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"lastCommitSha": sha,
},
},
Spec: jenkinsv1.CommitStatusSpec{
Items: []jenkinsv1.CommitStatusDetails{
statusDetails,
},
},
}
_, err := jxClient.JenkinsV1().CommitStatuses(ns).Create(status)
if err != nil {
return err
}
} else if insert {
status.Spec.Items = append(status.Spec.Items, statusDetails)
log.Logger().Infof("pod watcher: Adding commit status for pipeline activity %s", pipelineActName)
_, err := jxClient.JenkinsV1().CommitStatuses(ns).PatchUpdate(status)
if err != nil {
return err
}
} else {
log.Logger().Debugf("pod watcher: Not updating or creating pipeline activity %s", pipelineActName)
}
} else {
return errors.New("commit status controller: Must supply name")
}
return nil
}
func (o *ControllerCommitStatusOptions) update(statusDetails *jenkinsv1.CommitStatusDetails, jxClient jenkinsv1client.Interface, ns string) error {
gitProvider, gitRepoInfo, err := o.getGitProvider(statusDetails.Commit.GitURL)
if err != nil {
return err
}
pass := false
if statusDetails.Checked {
var commentBuilder strings.Builder
pass = true
for _, c := range statusDetails.Items {
if !c.Pass {
pass = false
fmt.Fprintf(&commentBuilder, "%s | %s | %s | TODO | `/test this`\n", c.Name, c.Description, statusDetails.Commit.SHA)
}
}
if pass {
_, err := extensions.NotifyCommitStatus(statusDetails.Commit, "success", "", "Completed successfully", "", statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
} else {
comment := fmt.Sprintf(
"The following commit statusDetails checks **failed**, say `/retest` to rerun them all:\n"+
"\n"+
"Name | Description | Commit | Details | Rerun command\n"+
"--- | --- | --- | --- | --- \n"+
"%s\n"+
"<details>\n"+
"\n"+
"Instructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%%20issue:) repository. I understand the commands that are listed [here](https://go.k8s.io/bot-commands).\n"+
"</details>", commentBuilder.String())
_, err := extensions.NotifyCommitStatus(statusDetails.Commit, "failure", "", fmt.Sprintf("%s failed", statusDetails.Context), comment, statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
}
} else {
_, err = extensions.NotifyCommitStatus(statusDetails.Commit, "pending", "", fmt.Sprintf("Waiting for %s to complete", statusDetails.Context), "", statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
}
return nil
}
func (o *ControllerCommitStatusOptions) getGitProvider(url string) (gits.GitProvider, *gits.GitRepository, error) {
// TODO This is an epic hack to get the git stuff working
gitInfo, err := gits.ParseGitURL(url)
if err != nil {
return nil, nil, err
}
authConfigSvc, err := o.GitAuthConfigService()
if err != nil {
return nil, nil, err
}
gitKind, err := o.GitServerKind(gitInfo)
if err != nil {
return nil, nil, err
}
for _, server := range authConfigSvc.Config().Servers {
if server.Kind == gitKind && len(server.Users) >= 1 {
// Just grab the first user for now
username := server.Users[0].Username
apiToken := server.Users[0].ApiToken
err = os.Setenv("GIT_USERNAME", username)
if err != nil {
return nil, nil, err
}
err = os.Setenv("GIT_API_TOKEN", apiToken)
if err != nil {
return nil, nil, err
}
break
}
}
return o.CreateGitProviderForURLWithoutKind(url)
}
func getBuildNumber(pipelineActName string) string {
if pipelineActName == "" {
return "-1"
}
pipelineParts := strings.Split(pipelineActName, "-")
if len(pipelineParts) > 3 {
return pipelineParts[len(pipelineParts)-1]
} else {
return ""
}
}
| {
if pod != nil {
labels := pod.Labels
if labels != nil {
buildName := labels[builds.LabelBuildName]
if buildName == "" {
buildName = labels[builds.LabelOldBuildName]
}
if buildName == "" {
buildName = labels[builds.LabelPipelineRunName]
}
if buildName != "" {
org := ""
repo := ""
pullRequest := ""
pullPullSha := ""
pullBaseSha := ""
buildNumber := ""
jxBuildNumber := ""
buildId := ""
sourceUrl := ""
branch := ""
containers, _, _ := kube.GetContainersWithStatusAndIsInit(pod)
for _, container := range containers {
for _, e := range container.Env {
switch e.Name {
case "REPO_OWNER":
org = e.Value
case "REPO_NAME":
repo = e.Value
case "PULL_NUMBER":
pullRequest = fmt.Sprintf("PR-%s", e.Value)
case "PULL_PULL_SHA":
pullPullSha = e.Value
case "PULL_BASE_SHA":
pullBaseSha = e.Value
case "JX_BUILD_NUMBER":
jxBuildNumber = e.Value
case "BUILD_NUMBER":
buildNumber = e.Value
case "BUILD_ID":
buildId = e.Value
case "SOURCE_URL":
sourceUrl = e.Value
case "PULL_BASE_REF":
branch = e.Value
}
}
}
sha := pullBaseSha
if pullRequest == "PR-" {
pullRequest = ""
} else {
sha = pullPullSha
branch = pullRequest
}
// if BUILD_ID is set, use it, otherwise if JX_BUILD_NUMBER is set, use it, otherwise use BUILD_NUMBER
if jxBuildNumber != "" {
buildNumber = jxBuildNumber
}
if buildId != "" {
buildNumber = buildId
}
pipelineActName := naming.ToValidName(fmt.Sprintf("%s-%s-%s-%s", org, repo, branch, buildNumber))
// PLM TODO This is a bit of hack, we need a working build controller
// Try to add the lastCommitSha and gitUrl to the PipelineActivity
act, err := jxClient.JenkinsV1().PipelineActivities(ns).Get(pipelineActName, metav1.GetOptions{})
if err != nil {
// An error just means the activity doesn't exist yet
log.Logger().Debugf("pod watcher: Unable to find PipelineActivity for %s", pipelineActName)
} else {
act.Spec.LastCommitSHA = sha
act.Spec.GitURL = sourceUrl
act.Spec.GitOwner = org
log.Logger().Debugf("pod watcher: Adding lastCommitSha: %s and gitUrl: %s to %s", act.Spec.LastCommitSHA, act.Spec.GitURL, pipelineActName)
_, err := jxClient.JenkinsV1().PipelineActivities(ns).PatchUpdate(act)
if err != nil {
// We can safely return this error as it will just get logged
return err
}
}
if org != "" && repo != "" && buildNumber != "" && (pullBaseSha != "" || pullPullSha != "") {
log.Logger().Debugf("pod watcher: build pod: %s, org: %s, repo: %s, buildNumber: %s, pullBaseSha: %s, pullPullSha: %s, pullRequest: %s, sourceUrl: %s", pod.Name, org, repo, buildNumber, pullBaseSha, pullPullSha, pullRequest, sourceUrl)
if sha == "" {
log.Logger().Warnf("pod watcher: No sha on %s, not upserting commit status", pod.Name)
} else {
prow := prow.Options{
KubeClient: kubeClient,
NS: ns,
}
prowConfig, _, err := prow.GetProwConfig()
if err != nil {
return errors.Wrap(err, "getting prow config")
}
contexts, err := config.GetBranchProtectionContexts(org, repo, prowConfig)
if err != nil {
return err
}
log.Logger().Debugf("pod watcher: Using contexts %v", contexts)
for _, ctx := range contexts {
if pullRequest != "" {
name := naming.ToValidName(fmt.Sprintf("%s-%s-%s-%s", org, repo, branch, ctx))
err = o.UpsertCommitStatusCheck(name, pipelineActName, sourceUrl, sha, pullRequest, ctx, pod.Status.Phase, jxClient, ns)
if err != nil {
return err
}
}
}
}
}
}
}
}
return nil
} | identifier_body |
controller_commitstatus.go | package controller
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/jenkins-x/jx/v2/pkg/cmd/helper"
"github.com/jenkins-x/jx/v2/pkg/kube/naming"
"github.com/jenkins-x/jx/v2/pkg/cmd/opts"
"github.com/jenkins-x/jx/v2/pkg/prow/config"
"github.com/jenkins-x/jx/v2/pkg/gits"
"github.com/jenkins-x/jx/v2/pkg/prow"
"k8s.io/client-go/kubernetes"
"github.com/jenkins-x/jx/v2/pkg/extensions"
"github.com/pkg/errors"
"github.com/jenkins-x/jx/v2/pkg/builds"
corev1 "k8s.io/api/core/v1"
jenkinsv1client "github.com/jenkins-x/jx-api/pkg/client/clientset/versioned"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/tools/cache"
"github.com/jenkins-x/jx-logging/pkg/log"
jenkinsv1 "github.com/jenkins-x/jx-api/pkg/apis/jenkins.io/v1"
"github.com/jenkins-x/jx/v2/pkg/kube"
"github.com/spf13/cobra"
)
// ControllerCommitStatusOptions the options for the controller
type ControllerCommitStatusOptions struct {
ControllerOptions
}
// NewCmdControllerCommitStatus creates a command object for the "create" command
func NewCmdControllerCommitStatus(commonOpts *opts.CommonOptions) *cobra.Command {
options := &ControllerCommitStatusOptions{
ControllerOptions: ControllerOptions{
CommonOptions: commonOpts,
},
}
cmd := &cobra.Command{
Use: "commitstatus",
Short: "Updates commit status",
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
helper.CheckErr(err)
},
}
return cmd
}
// Run implements this command
func (o *ControllerCommitStatusOptions) Run() error {
// Always run in batch mode as a controller is never run interactively
o.BatchMode = true
jxClient, ns, err := o.JXClientAndDevNamespace()
if err != nil {
return err
}
kubeClient, _, err := o.KubeClientAndDevNamespace()
if err != nil {
return err
}
apisClient, err := o.ApiExtensionsClient()
if err != nil {
return err
}
err = kube.RegisterCommitStatusCRD(apisClient)
if err != nil {
return err
}
err = kube.RegisterPipelineActivityCRD(apisClient)
if err != nil {
return err
}
commitstatusListWatch := cache.NewListWatchFromClient(jxClient.JenkinsV1().RESTClient(), "commitstatuses", ns, fields.Everything())
kube.SortListWatchByName(commitstatusListWatch)
_, commitstatusController := cache.NewInformer(
commitstatusListWatch,
&jenkinsv1.CommitStatus{},
time.Minute*10,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
o.onCommitStatusObj(obj, jxClient, ns)
},
UpdateFunc: func(oldObj, newObj interface{}) {
o.onCommitStatusObj(newObj, jxClient, ns)
},
DeleteFunc: func(obj interface{}) {
},
},
)
stop := make(chan struct{})
go commitstatusController.Run(stop)
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", ns, fields.Everything())
kube.SortListWatchByName(podListWatch)
_, podWatch := cache.NewInformer(
podListWatch,
&corev1.Pod{},
time.Minute*10,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
o.onPodObj(obj, jxClient, kubeClient, ns)
},
UpdateFunc: func(oldObj, newObj interface{}) {
o.onPodObj(newObj, jxClient, kubeClient, ns)
},
DeleteFunc: func(obj interface{}) {
},
},
)
stop = make(chan struct{})
podWatch.Run(stop)
if err != nil {
return err
}
return nil
}
func (o *ControllerCommitStatusOptions) onCommitStatusObj(obj interface{}, jxClient jenkinsv1client.Interface, ns string) {
check, ok := obj.(*jenkinsv1.CommitStatus)
if !ok {
log.Logger().Fatalf("commit status controller: unexpected type %v", obj)
} else {
err := o.onCommitStatus(check, jxClient, ns)
if err != nil {
log.Logger().Fatalf("commit status controller: %v", err)
}
}
}
func (o *ControllerCommitStatusOptions) onCommitStatus(check *jenkinsv1.CommitStatus, jxClient jenkinsv1client.Interface, ns string) error {
groupedBySha := make(map[string][]jenkinsv1.CommitStatusDetails, 0)
for _, v := range check.Spec.Items {
if _, ok := groupedBySha[v.Commit.SHA]; !ok {
groupedBySha[v.Commit.SHA] = make([]jenkinsv1.CommitStatusDetails, 0)
}
groupedBySha[v.Commit.SHA] = append(groupedBySha[v.Commit.SHA], v)
}
for _, vs := range groupedBySha {
var last jenkinsv1.CommitStatusDetails
for _, v := range vs {
lastBuildNumber, err := strconv.Atoi(getBuildNumber(last.PipelineActivity.Name))
if err != nil {
return err
}
buildNumber, err := strconv.Atoi(getBuildNumber(v.PipelineActivity.Name))
if err != nil {
return err
}
if lastBuildNumber < buildNumber {
last = v
}
}
err := o.update(&last, jxClient, ns)
if err != nil {
gitProvider, gitRepoInfo, err1 := o.getGitProvider(last.Commit.GitURL)
if err1 != nil {
return err1
}
_, err1 = extensions.NotifyCommitStatus(last.Commit, "error", "", "Internal Error performing commit status updates", "", last.Context, gitProvider, gitRepoInfo)
if err1 != nil {
return err
}
return err
}
}
return nil
}
func (o *ControllerCommitStatusOptions) onPodObj(obj interface{}, jxClient jenkinsv1client.Interface, kubeClient kubernetes.Interface, ns string) {
check, ok := obj.(*corev1.Pod)
if !ok {
log.Logger().Fatalf("pod watcher: unexpected type %v", obj)
} else {
err := o.onPod(check, jxClient, kubeClient, ns)
if err != nil {
log.Logger().Fatalf("pod watcher: %v", err)
}
}
}
func (o *ControllerCommitStatusOptions) onPod(pod *corev1.Pod, jxClient jenkinsv1client.Interface, kubeClient kubernetes.Interface, ns string) error {
if pod != nil {
labels := pod.Labels
if labels != nil {
buildName := labels[builds.LabelBuildName]
if buildName == "" {
buildName = labels[builds.LabelOldBuildName]
}
if buildName == "" {
buildName = labels[builds.LabelPipelineRunName]
}
if buildName != "" |
}
}
return nil
}
func (o *ControllerCommitStatusOptions) UpsertCommitStatusCheck(name string, pipelineActName string, url string, sha string, pullRequest string, context string, phase corev1.PodPhase, jxClient jenkinsv1client.Interface, ns string) error {
if name != "" {
status, err := jxClient.JenkinsV1().CommitStatuses(ns).Get(name, metav1.GetOptions{})
create := false
insert := false
actRef := jenkinsv1.ResourceReference{}
if err != nil {
create = true
} else {
log.Logger().Infof("pod watcher: commit status already exists for %s", name)
}
// Create the activity reference
act, err := jxClient.JenkinsV1().PipelineActivities(ns).Get(pipelineActName, metav1.GetOptions{})
if err == nil {
actRef.Name = act.Name
actRef.Kind = act.Kind
actRef.UID = act.UID
actRef.APIVersion = act.APIVersion
}
possibleStatusDetails := make([]int, 0)
for i, v := range status.Spec.Items {
if v.Commit.SHA == sha && v.PipelineActivity.Name == pipelineActName {
possibleStatusDetails = append(possibleStatusDetails, i)
}
}
statusDetails := jenkinsv1.CommitStatusDetails{}
log.Logger().Debugf("pod watcher: Discovered possible status details %v", possibleStatusDetails)
if len(possibleStatusDetails) == 1 {
log.Logger().Debugf("CommitStatus %s for pipeline %s already exists", name, pipelineActName)
} else if len(possibleStatusDetails) == 0 {
insert = true
} else {
return fmt.Errorf("More than %d status detail for sha %s, should 1 or 0, found %v", len(possibleStatusDetails), sha, possibleStatusDetails)
}
if create || insert {
// This is not the same pipeline activity the status was created for,
// or there is no existing status, so we make a new one
statusDetails = jenkinsv1.CommitStatusDetails{
Checked: false,
Commit: jenkinsv1.CommitStatusCommitReference{
GitURL: url,
PullRequest: pullRequest,
SHA: sha,
},
PipelineActivity: actRef,
Context: context,
}
}
if create {
log.Logger().Infof("pod watcher: Creating commit status for pipeline activity %s", pipelineActName)
status = &jenkinsv1.CommitStatus{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"lastCommitSha": sha,
},
},
Spec: jenkinsv1.CommitStatusSpec{
Items: []jenkinsv1.CommitStatusDetails{
statusDetails,
},
},
}
_, err := jxClient.JenkinsV1().CommitStatuses(ns).Create(status)
if err != nil {
return err
}
} else if insert {
status.Spec.Items = append(status.Spec.Items, statusDetails)
log.Logger().Infof("pod watcher: Adding commit status for pipeline activity %s", pipelineActName)
_, err := jxClient.JenkinsV1().CommitStatuses(ns).PatchUpdate(status)
if err != nil {
return err
}
} else {
log.Logger().Debugf("pod watcher: Not updating or creating pipeline activity %s", pipelineActName)
}
} else {
return errors.New("commit status controller: Must supply name")
}
return nil
}
func (o *ControllerCommitStatusOptions) update(statusDetails *jenkinsv1.CommitStatusDetails, jxClient jenkinsv1client.Interface, ns string) error {
gitProvider, gitRepoInfo, err := o.getGitProvider(statusDetails.Commit.GitURL)
if err != nil {
return err
}
pass := false
if statusDetails.Checked {
var commentBuilder strings.Builder
pass = true
for _, c := range statusDetails.Items {
if !c.Pass {
pass = false
fmt.Fprintf(&commentBuilder, "%s | %s | %s | TODO | `/test this`\n", c.Name, c.Description, statusDetails.Commit.SHA)
}
}
if pass {
_, err := extensions.NotifyCommitStatus(statusDetails.Commit, "success", "", "Completed successfully", "", statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
} else {
comment := fmt.Sprintf(
"The following commit statusDetails checks **failed**, say `/retest` to rerun them all:\n"+
"\n"+
"Name | Description | Commit | Details | Rerun command\n"+
"--- | --- | --- | --- | --- \n"+
"%s\n"+
"<details>\n"+
"\n"+
"Instructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%%20issue:) repository. I understand the commands that are listed [here](https://go.k8s.io/bot-commands).\n"+
"</details>", commentBuilder.String())
_, err := extensions.NotifyCommitStatus(statusDetails.Commit, "failure", "", fmt.Sprintf("%s failed", statusDetails.Context), comment, statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
}
} else {
_, err = extensions.NotifyCommitStatus(statusDetails.Commit, "pending", "", fmt.Sprintf("Waiting for %s to complete", statusDetails.Context), "", statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
}
return nil
}
func (o *ControllerCommitStatusOptions) getGitProvider(url string) (gits.GitProvider, *gits.GitRepository, error) {
// TODO This is an epic hack to get the git stuff working
gitInfo, err := gits.ParseGitURL(url)
if err != nil {
return nil, nil, err
}
authConfigSvc, err := o.GitAuthConfigService()
if err != nil {
return nil, nil, err
}
gitKind, err := o.GitServerKind(gitInfo)
if err != nil {
return nil, nil, err
}
for _, server := range authConfigSvc.Config().Servers {
if server.Kind == gitKind && len(server.Users) >= 1 {
// Just grab the first user for now
username := server.Users[0].Username
apiToken := server.Users[0].ApiToken
err = os.Setenv("GIT_USERNAME", username)
if err != nil {
return nil, nil, err
}
err = os.Setenv("GIT_API_TOKEN", apiToken)
if err != nil {
return nil, nil, err
}
break
}
}
return o.CreateGitProviderForURLWithoutKind(url)
}
func getBuildNumber(pipelineActName string) string {
if pipelineActName == "" {
return "-1"
}
pipelineParts := strings.Split(pipelineActName, "-")
if len(pipelineParts) > 3 {
return pipelineParts[len(pipelineParts)-1]
} else {
return ""
}
}
| {
org := ""
repo := ""
pullRequest := ""
pullPullSha := ""
pullBaseSha := ""
buildNumber := ""
jxBuildNumber := ""
buildId := ""
sourceUrl := ""
branch := ""
containers, _, _ := kube.GetContainersWithStatusAndIsInit(pod)
for _, container := range containers {
for _, e := range container.Env {
switch e.Name {
case "REPO_OWNER":
org = e.Value
case "REPO_NAME":
repo = e.Value
case "PULL_NUMBER":
pullRequest = fmt.Sprintf("PR-%s", e.Value)
case "PULL_PULL_SHA":
pullPullSha = e.Value
case "PULL_BASE_SHA":
pullBaseSha = e.Value
case "JX_BUILD_NUMBER":
jxBuildNumber = e.Value
case "BUILD_NUMBER":
buildNumber = e.Value
case "BUILD_ID":
buildId = e.Value
case "SOURCE_URL":
sourceUrl = e.Value
case "PULL_BASE_REF":
branch = e.Value
}
}
}
sha := pullBaseSha
if pullRequest == "PR-" {
pullRequest = ""
} else {
sha = pullPullSha
branch = pullRequest
}
// if BUILD_ID is set, use it, otherwise if JX_BUILD_NUMBER is set, use it, otherwise use BUILD_NUMBER
if jxBuildNumber != "" {
buildNumber = jxBuildNumber
}
if buildId != "" {
buildNumber = buildId
}
pipelineActName := naming.ToValidName(fmt.Sprintf("%s-%s-%s-%s", org, repo, branch, buildNumber))
// PLM TODO This is a bit of hack, we need a working build controller
// Try to add the lastCommitSha and gitUrl to the PipelineActivity
act, err := jxClient.JenkinsV1().PipelineActivities(ns).Get(pipelineActName, metav1.GetOptions{})
if err != nil {
// An error just means the activity doesn't exist yet
log.Logger().Debugf("pod watcher: Unable to find PipelineActivity for %s", pipelineActName)
} else {
act.Spec.LastCommitSHA = sha
act.Spec.GitURL = sourceUrl
act.Spec.GitOwner = org
log.Logger().Debugf("pod watcher: Adding lastCommitSha: %s and gitUrl: %s to %s", act.Spec.LastCommitSHA, act.Spec.GitURL, pipelineActName)
_, err := jxClient.JenkinsV1().PipelineActivities(ns).PatchUpdate(act)
if err != nil {
// We can safely return this error as it will just get logged
return err
}
}
if org != "" && repo != "" && buildNumber != "" && (pullBaseSha != "" || pullPullSha != "") {
log.Logger().Debugf("pod watcher: build pod: %s, org: %s, repo: %s, buildNumber: %s, pullBaseSha: %s, pullPullSha: %s, pullRequest: %s, sourceUrl: %s", pod.Name, org, repo, buildNumber, pullBaseSha, pullPullSha, pullRequest, sourceUrl)
if sha == "" {
log.Logger().Warnf("pod watcher: No sha on %s, not upserting commit status", pod.Name)
} else {
prow := prow.Options{
KubeClient: kubeClient,
NS: ns,
}
prowConfig, _, err := prow.GetProwConfig()
if err != nil {
return errors.Wrap(err, "getting prow config")
}
contexts, err := config.GetBranchProtectionContexts(org, repo, prowConfig)
if err != nil {
return err
}
log.Logger().Debugf("pod watcher: Using contexts %v", contexts)
for _, ctx := range contexts {
if pullRequest != "" {
name := naming.ToValidName(fmt.Sprintf("%s-%s-%s-%s", org, repo, branch, ctx))
err = o.UpsertCommitStatusCheck(name, pipelineActName, sourceUrl, sha, pullRequest, ctx, pod.Status.Phase, jxClient, ns)
if err != nil {
return err
}
}
}
}
}
} | conditional_block |
controller_commitstatus.go | package controller
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/jenkins-x/jx/v2/pkg/cmd/helper"
"github.com/jenkins-x/jx/v2/pkg/kube/naming"
"github.com/jenkins-x/jx/v2/pkg/cmd/opts"
"github.com/jenkins-x/jx/v2/pkg/prow/config"
"github.com/jenkins-x/jx/v2/pkg/gits"
"github.com/jenkins-x/jx/v2/pkg/prow"
"k8s.io/client-go/kubernetes"
"github.com/jenkins-x/jx/v2/pkg/extensions"
"github.com/pkg/errors"
"github.com/jenkins-x/jx/v2/pkg/builds"
corev1 "k8s.io/api/core/v1"
jenkinsv1client "github.com/jenkins-x/jx-api/pkg/client/clientset/versioned"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/tools/cache"
"github.com/jenkins-x/jx-logging/pkg/log"
jenkinsv1 "github.com/jenkins-x/jx-api/pkg/apis/jenkins.io/v1"
"github.com/jenkins-x/jx/v2/pkg/kube"
"github.com/spf13/cobra"
)
// ControllerCommitStatusOptions the options for the controller
type ControllerCommitStatusOptions struct {
ControllerOptions
}
// NewCmdControllerCommitStatus creates a command object for the "create" command
func NewCmdControllerCommitStatus(commonOpts *opts.CommonOptions) *cobra.Command {
options := &ControllerCommitStatusOptions{
ControllerOptions: ControllerOptions{
CommonOptions: commonOpts,
},
}
cmd := &cobra.Command{
Use: "commitstatus",
Short: "Updates commit status",
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
helper.CheckErr(err)
},
}
return cmd
}
// Run implements this command
func (o *ControllerCommitStatusOptions) Run() error {
// Always run in batch mode as a controller is never run interactively
o.BatchMode = true
jxClient, ns, err := o.JXClientAndDevNamespace()
if err != nil {
return err
}
kubeClient, _, err := o.KubeClientAndDevNamespace()
if err != nil {
return err
}
apisClient, err := o.ApiExtensionsClient()
if err != nil {
return err
}
err = kube.RegisterCommitStatusCRD(apisClient)
if err != nil {
return err
}
err = kube.RegisterPipelineActivityCRD(apisClient)
if err != nil {
return err
}
commitstatusListWatch := cache.NewListWatchFromClient(jxClient.JenkinsV1().RESTClient(), "commitstatuses", ns, fields.Everything())
kube.SortListWatchByName(commitstatusListWatch)
_, commitstatusController := cache.NewInformer(
commitstatusListWatch,
&jenkinsv1.CommitStatus{},
time.Minute*10,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
o.onCommitStatusObj(obj, jxClient, ns)
},
UpdateFunc: func(oldObj, newObj interface{}) {
o.onCommitStatusObj(newObj, jxClient, ns)
},
DeleteFunc: func(obj interface{}) {
},
},
)
stop := make(chan struct{})
go commitstatusController.Run(stop)
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", ns, fields.Everything())
kube.SortListWatchByName(podListWatch)
_, podWatch := cache.NewInformer(
podListWatch,
&corev1.Pod{},
time.Minute*10,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
o.onPodObj(obj, jxClient, kubeClient, ns)
},
UpdateFunc: func(oldObj, newObj interface{}) {
o.onPodObj(newObj, jxClient, kubeClient, ns)
},
DeleteFunc: func(obj interface{}) {
},
},
)
stop = make(chan struct{})
podWatch.Run(stop)
if err != nil {
return err
}
return nil
}
func (o *ControllerCommitStatusOptions) onCommitStatusObj(obj interface{}, jxClient jenkinsv1client.Interface, ns string) {
check, ok := obj.(*jenkinsv1.CommitStatus)
if !ok {
log.Logger().Fatalf("commit status controller: unexpected type %v", obj)
} else {
err := o.onCommitStatus(check, jxClient, ns)
if err != nil {
log.Logger().Fatalf("commit status controller: %v", err)
}
}
}
func (o *ControllerCommitStatusOptions) onCommitStatus(check *jenkinsv1.CommitStatus, jxClient jenkinsv1client.Interface, ns string) error {
groupedBySha := make(map[string][]jenkinsv1.CommitStatusDetails, 0)
for _, v := range check.Spec.Items {
if _, ok := groupedBySha[v.Commit.SHA]; !ok {
groupedBySha[v.Commit.SHA] = make([]jenkinsv1.CommitStatusDetails, 0)
}
groupedBySha[v.Commit.SHA] = append(groupedBySha[v.Commit.SHA], v)
}
for _, vs := range groupedBySha {
var last jenkinsv1.CommitStatusDetails
for _, v := range vs {
lastBuildNumber, err := strconv.Atoi(getBuildNumber(last.PipelineActivity.Name))
if err != nil {
return err
}
buildNumber, err := strconv.Atoi(getBuildNumber(v.PipelineActivity.Name))
if err != nil {
return err
}
if lastBuildNumber < buildNumber {
last = v
}
}
err := o.update(&last, jxClient, ns)
if err != nil {
gitProvider, gitRepoInfo, err1 := o.getGitProvider(last.Commit.GitURL)
if err1 != nil {
return err1
}
_, err1 = extensions.NotifyCommitStatus(last.Commit, "error", "", "Internal Error performing commit status updates", "", last.Context, gitProvider, gitRepoInfo)
if err1 != nil {
return err
}
return err
}
}
return nil
}
func (o *ControllerCommitStatusOptions) onPodObj(obj interface{}, jxClient jenkinsv1client.Interface, kubeClient kubernetes.Interface, ns string) {
check, ok := obj.(*corev1.Pod)
if !ok {
log.Logger().Fatalf("pod watcher: unexpected type %v", obj)
} else {
err := o.onPod(check, jxClient, kubeClient, ns)
if err != nil {
log.Logger().Fatalf("pod watcher: %v", err)
}
}
}
func (o *ControllerCommitStatusOptions) onPod(pod *corev1.Pod, jxClient jenkinsv1client.Interface, kubeClient kubernetes.Interface, ns string) error {
if pod != nil {
labels := pod.Labels
if labels != nil {
buildName := labels[builds.LabelBuildName]
if buildName == "" {
buildName = labels[builds.LabelOldBuildName]
}
if buildName == "" {
buildName = labels[builds.LabelPipelineRunName]
}
if buildName != "" {
org := ""
repo := ""
pullRequest := ""
pullPullSha := ""
pullBaseSha := ""
buildNumber := ""
jxBuildNumber := ""
buildId := ""
sourceUrl := ""
branch := ""
containers, _, _ := kube.GetContainersWithStatusAndIsInit(pod)
for _, container := range containers {
for _, e := range container.Env {
switch e.Name {
case "REPO_OWNER":
org = e.Value
case "REPO_NAME":
repo = e.Value
case "PULL_NUMBER":
pullRequest = fmt.Sprintf("PR-%s", e.Value)
case "PULL_PULL_SHA":
pullPullSha = e.Value
case "PULL_BASE_SHA":
pullBaseSha = e.Value
case "JX_BUILD_NUMBER":
jxBuildNumber = e.Value
case "BUILD_NUMBER":
buildNumber = e.Value
case "BUILD_ID":
buildId = e.Value
case "SOURCE_URL":
sourceUrl = e.Value
case "PULL_BASE_REF":
branch = e.Value
}
}
}
sha := pullBaseSha
if pullRequest == "PR-" {
pullRequest = ""
} else {
sha = pullPullSha
branch = pullRequest
}
// if BUILD_ID is set, use it, otherwise if JX_BUILD_NUMBER is set, use it, otherwise use BUILD_NUMBER
if jxBuildNumber != "" {
buildNumber = jxBuildNumber
}
if buildId != "" {
buildNumber = buildId
}
pipelineActName := naming.ToValidName(fmt.Sprintf("%s-%s-%s-%s", org, repo, branch, buildNumber))
// PLM TODO This is a bit of hack, we need a working build controller
// Try to add the lastCommitSha and gitUrl to the PipelineActivity
act, err := jxClient.JenkinsV1().PipelineActivities(ns).Get(pipelineActName, metav1.GetOptions{})
if err != nil {
// An error just means the activity doesn't exist yet
log.Logger().Debugf("pod watcher: Unable to find PipelineActivity for %s", pipelineActName)
} else {
act.Spec.LastCommitSHA = sha
act.Spec.GitURL = sourceUrl
act.Spec.GitOwner = org
log.Logger().Debugf("pod watcher: Adding lastCommitSha: %s and gitUrl: %s to %s", act.Spec.LastCommitSHA, act.Spec.GitURL, pipelineActName)
_, err := jxClient.JenkinsV1().PipelineActivities(ns).PatchUpdate(act)
if err != nil {
// We can safely return this error as it will just get logged
return err
}
}
if org != "" && repo != "" && buildNumber != "" && (pullBaseSha != "" || pullPullSha != "") {
log.Logger().Debugf("pod watcher: build pod: %s, org: %s, repo: %s, buildNumber: %s, pullBaseSha: %s, pullPullSha: %s, pullRequest: %s, sourceUrl: %s", pod.Name, org, repo, buildNumber, pullBaseSha, pullPullSha, pullRequest, sourceUrl)
if sha == "" {
log.Logger().Warnf("pod watcher: No sha on %s, not upserting commit status", pod.Name)
} else {
prow := prow.Options{
KubeClient: kubeClient,
NS: ns,
}
prowConfig, _, err := prow.GetProwConfig()
if err != nil {
return errors.Wrap(err, "getting prow config")
}
contexts, err := config.GetBranchProtectionContexts(org, repo, prowConfig)
if err != nil {
return err
}
log.Logger().Debugf("pod watcher: Using contexts %v", contexts)
for _, ctx := range contexts {
if pullRequest != "" {
name := naming.ToValidName(fmt.Sprintf("%s-%s-%s-%s", org, repo, branch, ctx))
err = o.UpsertCommitStatusCheck(name, pipelineActName, sourceUrl, sha, pullRequest, ctx, pod.Status.Phase, jxClient, ns)
if err != nil {
return err
}
}
}
}
}
}
}
}
return nil
}
func (o *ControllerCommitStatusOptions) UpsertCommitStatusCheck(name string, pipelineActName string, url string, sha string, pullRequest string, context string, phase corev1.PodPhase, jxClient jenkinsv1client.Interface, ns string) error {
if name != "" {
status, err := jxClient.JenkinsV1().CommitStatuses(ns).Get(name, metav1.GetOptions{})
create := false
insert := false
actRef := jenkinsv1.ResourceReference{}
if err != nil {
create = true
} else {
log.Logger().Infof("pod watcher: commit status already exists for %s", name)
}
// Create the activity reference
act, err := jxClient.JenkinsV1().PipelineActivities(ns).Get(pipelineActName, metav1.GetOptions{})
if err == nil {
actRef.Name = act.Name
actRef.Kind = act.Kind
actRef.UID = act.UID
actRef.APIVersion = act.APIVersion
}
possibleStatusDetails := make([]int, 0)
for i, v := range status.Spec.Items {
if v.Commit.SHA == sha && v.PipelineActivity.Name == pipelineActName {
possibleStatusDetails = append(possibleStatusDetails, i)
}
}
statusDetails := jenkinsv1.CommitStatusDetails{}
log.Logger().Debugf("pod watcher: Discovered possible status details %v", possibleStatusDetails)
if len(possibleStatusDetails) == 1 {
log.Logger().Debugf("CommitStatus %s for pipeline %s already exists", name, pipelineActName)
} else if len(possibleStatusDetails) == 0 {
insert = true
} else {
return fmt.Errorf("More than %d status detail for sha %s, should 1 or 0, found %v", len(possibleStatusDetails), sha, possibleStatusDetails)
}
if create || insert {
// This is not the same pipeline activity the status was created for,
// or there is no existing status, so we make a new one
statusDetails = jenkinsv1.CommitStatusDetails{
Checked: false,
Commit: jenkinsv1.CommitStatusCommitReference{
GitURL: url,
PullRequest: pullRequest,
SHA: sha,
},
PipelineActivity: actRef,
Context: context,
}
}
if create {
log.Logger().Infof("pod watcher: Creating commit status for pipeline activity %s", pipelineActName)
status = &jenkinsv1.CommitStatus{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{ | statusDetails,
},
},
}
_, err := jxClient.JenkinsV1().CommitStatuses(ns).Create(status)
if err != nil {
return err
}
} else if insert {
status.Spec.Items = append(status.Spec.Items, statusDetails)
log.Logger().Infof("pod watcher: Adding commit status for pipeline activity %s", pipelineActName)
_, err := jxClient.JenkinsV1().CommitStatuses(ns).PatchUpdate(status)
if err != nil {
return err
}
} else {
log.Logger().Debugf("pod watcher: Not updating or creating pipeline activity %s", pipelineActName)
}
} else {
return errors.New("commit status controller: Must supply name")
}
return nil
}
func (o *ControllerCommitStatusOptions) update(statusDetails *jenkinsv1.CommitStatusDetails, jxClient jenkinsv1client.Interface, ns string) error {
gitProvider, gitRepoInfo, err := o.getGitProvider(statusDetails.Commit.GitURL)
if err != nil {
return err
}
pass := false
if statusDetails.Checked {
var commentBuilder strings.Builder
pass = true
for _, c := range statusDetails.Items {
if !c.Pass {
pass = false
fmt.Fprintf(&commentBuilder, "%s | %s | %s | TODO | `/test this`\n", c.Name, c.Description, statusDetails.Commit.SHA)
}
}
if pass {
_, err := extensions.NotifyCommitStatus(statusDetails.Commit, "success", "", "Completed successfully", "", statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
} else {
comment := fmt.Sprintf(
"The following commit statusDetails checks **failed**, say `/retest` to rerun them all:\n"+
"\n"+
"Name | Description | Commit | Details | Rerun command\n"+
"--- | --- | --- | --- | --- \n"+
"%s\n"+
"<details>\n"+
"\n"+
"Instructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%%20issue:) repository. I understand the commands that are listed [here](https://go.k8s.io/bot-commands).\n"+
"</details>", commentBuilder.String())
_, err := extensions.NotifyCommitStatus(statusDetails.Commit, "failure", "", fmt.Sprintf("%s failed", statusDetails.Context), comment, statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
}
} else {
_, err = extensions.NotifyCommitStatus(statusDetails.Commit, "pending", "", fmt.Sprintf("Waiting for %s to complete", statusDetails.Context), "", statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
}
return nil
}
func (o *ControllerCommitStatusOptions) getGitProvider(url string) (gits.GitProvider, *gits.GitRepository, error) {
// TODO This is an epic hack to get the git stuff working
gitInfo, err := gits.ParseGitURL(url)
if err != nil {
return nil, nil, err
}
authConfigSvc, err := o.GitAuthConfigService()
if err != nil {
return nil, nil, err
}
gitKind, err := o.GitServerKind(gitInfo)
if err != nil {
return nil, nil, err
}
for _, server := range authConfigSvc.Config().Servers {
if server.Kind == gitKind && len(server.Users) >= 1 {
// Just grab the first user for now
username := server.Users[0].Username
apiToken := server.Users[0].ApiToken
err = os.Setenv("GIT_USERNAME", username)
if err != nil {
return nil, nil, err
}
err = os.Setenv("GIT_API_TOKEN", apiToken)
if err != nil {
return nil, nil, err
}
break
}
}
return o.CreateGitProviderForURLWithoutKind(url)
}
func getBuildNumber(pipelineActName string) string {
if pipelineActName == "" {
return "-1"
}
pipelineParts := strings.Split(pipelineActName, "-")
if len(pipelineParts) > 3 {
return pipelineParts[len(pipelineParts)-1]
} else {
return ""
}
} | "lastCommitSha": sha,
},
},
Spec: jenkinsv1.CommitStatusSpec{
Items: []jenkinsv1.CommitStatusDetails{ | random_line_split |
controller_commitstatus.go | package controller
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/jenkins-x/jx/v2/pkg/cmd/helper"
"github.com/jenkins-x/jx/v2/pkg/kube/naming"
"github.com/jenkins-x/jx/v2/pkg/cmd/opts"
"github.com/jenkins-x/jx/v2/pkg/prow/config"
"github.com/jenkins-x/jx/v2/pkg/gits"
"github.com/jenkins-x/jx/v2/pkg/prow"
"k8s.io/client-go/kubernetes"
"github.com/jenkins-x/jx/v2/pkg/extensions"
"github.com/pkg/errors"
"github.com/jenkins-x/jx/v2/pkg/builds"
corev1 "k8s.io/api/core/v1"
jenkinsv1client "github.com/jenkins-x/jx-api/pkg/client/clientset/versioned"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/tools/cache"
"github.com/jenkins-x/jx-logging/pkg/log"
jenkinsv1 "github.com/jenkins-x/jx-api/pkg/apis/jenkins.io/v1"
"github.com/jenkins-x/jx/v2/pkg/kube"
"github.com/spf13/cobra"
)
// ControllerCommitStatusOptions the options for the controller
type ControllerCommitStatusOptions struct {
ControllerOptions
}
// NewCmdControllerCommitStatus creates a command object for the "create" command
func NewCmdControllerCommitStatus(commonOpts *opts.CommonOptions) *cobra.Command {
options := &ControllerCommitStatusOptions{
ControllerOptions: ControllerOptions{
CommonOptions: commonOpts,
},
}
cmd := &cobra.Command{
Use: "commitstatus",
Short: "Updates commit status",
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
helper.CheckErr(err)
},
}
return cmd
}
// Run implements this command
func (o *ControllerCommitStatusOptions) Run() error {
// Always run in batch mode as a controller is never run interactively
o.BatchMode = true
jxClient, ns, err := o.JXClientAndDevNamespace()
if err != nil {
return err
}
kubeClient, _, err := o.KubeClientAndDevNamespace()
if err != nil {
return err
}
apisClient, err := o.ApiExtensionsClient()
if err != nil {
return err
}
err = kube.RegisterCommitStatusCRD(apisClient)
if err != nil {
return err
}
err = kube.RegisterPipelineActivityCRD(apisClient)
if err != nil {
return err
}
commitstatusListWatch := cache.NewListWatchFromClient(jxClient.JenkinsV1().RESTClient(), "commitstatuses", ns, fields.Everything())
kube.SortListWatchByName(commitstatusListWatch)
_, commitstatusController := cache.NewInformer(
commitstatusListWatch,
&jenkinsv1.CommitStatus{},
time.Minute*10,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
o.onCommitStatusObj(obj, jxClient, ns)
},
UpdateFunc: func(oldObj, newObj interface{}) {
o.onCommitStatusObj(newObj, jxClient, ns)
},
DeleteFunc: func(obj interface{}) {
},
},
)
stop := make(chan struct{})
go commitstatusController.Run(stop)
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", ns, fields.Everything())
kube.SortListWatchByName(podListWatch)
_, podWatch := cache.NewInformer(
podListWatch,
&corev1.Pod{},
time.Minute*10,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
o.onPodObj(obj, jxClient, kubeClient, ns)
},
UpdateFunc: func(oldObj, newObj interface{}) {
o.onPodObj(newObj, jxClient, kubeClient, ns)
},
DeleteFunc: func(obj interface{}) {
},
},
)
stop = make(chan struct{})
podWatch.Run(stop)
if err != nil {
return err
}
return nil
}
func (o *ControllerCommitStatusOptions) onCommitStatusObj(obj interface{}, jxClient jenkinsv1client.Interface, ns string) {
check, ok := obj.(*jenkinsv1.CommitStatus)
if !ok {
log.Logger().Fatalf("commit status controller: unexpected type %v", obj)
} else {
err := o.onCommitStatus(check, jxClient, ns)
if err != nil {
log.Logger().Fatalf("commit status controller: %v", err)
}
}
}
func (o *ControllerCommitStatusOptions) onCommitStatus(check *jenkinsv1.CommitStatus, jxClient jenkinsv1client.Interface, ns string) error {
groupedBySha := make(map[string][]jenkinsv1.CommitStatusDetails, 0)
for _, v := range check.Spec.Items {
if _, ok := groupedBySha[v.Commit.SHA]; !ok {
groupedBySha[v.Commit.SHA] = make([]jenkinsv1.CommitStatusDetails, 0)
}
groupedBySha[v.Commit.SHA] = append(groupedBySha[v.Commit.SHA], v)
}
for _, vs := range groupedBySha {
var last jenkinsv1.CommitStatusDetails
for _, v := range vs {
lastBuildNumber, err := strconv.Atoi(getBuildNumber(last.PipelineActivity.Name))
if err != nil {
return err
}
buildNumber, err := strconv.Atoi(getBuildNumber(v.PipelineActivity.Name))
if err != nil {
return err
}
if lastBuildNumber < buildNumber {
last = v
}
}
err := o.update(&last, jxClient, ns)
if err != nil {
gitProvider, gitRepoInfo, err1 := o.getGitProvider(last.Commit.GitURL)
if err1 != nil {
return err1
}
_, err1 = extensions.NotifyCommitStatus(last.Commit, "error", "", "Internal Error performing commit status updates", "", last.Context, gitProvider, gitRepoInfo)
if err1 != nil {
return err
}
return err
}
}
return nil
}
func (o *ControllerCommitStatusOptions) onPodObj(obj interface{}, jxClient jenkinsv1client.Interface, kubeClient kubernetes.Interface, ns string) {
check, ok := obj.(*corev1.Pod)
if !ok {
log.Logger().Fatalf("pod watcher: unexpected type %v", obj)
} else {
err := o.onPod(check, jxClient, kubeClient, ns)
if err != nil {
log.Logger().Fatalf("pod watcher: %v", err)
}
}
}
func (o *ControllerCommitStatusOptions) onPod(pod *corev1.Pod, jxClient jenkinsv1client.Interface, kubeClient kubernetes.Interface, ns string) error {
if pod != nil {
labels := pod.Labels
if labels != nil {
buildName := labels[builds.LabelBuildName]
if buildName == "" {
buildName = labels[builds.LabelOldBuildName]
}
if buildName == "" {
buildName = labels[builds.LabelPipelineRunName]
}
if buildName != "" {
org := ""
repo := ""
pullRequest := ""
pullPullSha := ""
pullBaseSha := ""
buildNumber := ""
jxBuildNumber := ""
buildId := ""
sourceUrl := ""
branch := ""
containers, _, _ := kube.GetContainersWithStatusAndIsInit(pod)
for _, container := range containers {
for _, e := range container.Env {
switch e.Name {
case "REPO_OWNER":
org = e.Value
case "REPO_NAME":
repo = e.Value
case "PULL_NUMBER":
pullRequest = fmt.Sprintf("PR-%s", e.Value)
case "PULL_PULL_SHA":
pullPullSha = e.Value
case "PULL_BASE_SHA":
pullBaseSha = e.Value
case "JX_BUILD_NUMBER":
jxBuildNumber = e.Value
case "BUILD_NUMBER":
buildNumber = e.Value
case "BUILD_ID":
buildId = e.Value
case "SOURCE_URL":
sourceUrl = e.Value
case "PULL_BASE_REF":
branch = e.Value
}
}
}
sha := pullBaseSha
if pullRequest == "PR-" {
pullRequest = ""
} else {
sha = pullPullSha
branch = pullRequest
}
// if BUILD_ID is set, use it, otherwise if JX_BUILD_NUMBER is set, use it, otherwise use BUILD_NUMBER
if jxBuildNumber != "" {
buildNumber = jxBuildNumber
}
if buildId != "" {
buildNumber = buildId
}
pipelineActName := naming.ToValidName(fmt.Sprintf("%s-%s-%s-%s", org, repo, branch, buildNumber))
// PLM TODO This is a bit of hack, we need a working build controller
// Try to add the lastCommitSha and gitUrl to the PipelineActivity
act, err := jxClient.JenkinsV1().PipelineActivities(ns).Get(pipelineActName, metav1.GetOptions{})
if err != nil {
// An error just means the activity doesn't exist yet
log.Logger().Debugf("pod watcher: Unable to find PipelineActivity for %s", pipelineActName)
} else {
act.Spec.LastCommitSHA = sha
act.Spec.GitURL = sourceUrl
act.Spec.GitOwner = org
log.Logger().Debugf("pod watcher: Adding lastCommitSha: %s and gitUrl: %s to %s", act.Spec.LastCommitSHA, act.Spec.GitURL, pipelineActName)
_, err := jxClient.JenkinsV1().PipelineActivities(ns).PatchUpdate(act)
if err != nil {
// We can safely return this error as it will just get logged
return err
}
}
if org != "" && repo != "" && buildNumber != "" && (pullBaseSha != "" || pullPullSha != "") {
log.Logger().Debugf("pod watcher: build pod: %s, org: %s, repo: %s, buildNumber: %s, pullBaseSha: %s, pullPullSha: %s, pullRequest: %s, sourceUrl: %s", pod.Name, org, repo, buildNumber, pullBaseSha, pullPullSha, pullRequest, sourceUrl)
if sha == "" {
log.Logger().Warnf("pod watcher: No sha on %s, not upserting commit status", pod.Name)
} else {
prow := prow.Options{
KubeClient: kubeClient,
NS: ns,
}
prowConfig, _, err := prow.GetProwConfig()
if err != nil {
return errors.Wrap(err, "getting prow config")
}
contexts, err := config.GetBranchProtectionContexts(org, repo, prowConfig)
if err != nil {
return err
}
log.Logger().Debugf("pod watcher: Using contexts %v", contexts)
for _, ctx := range contexts {
if pullRequest != "" {
name := naming.ToValidName(fmt.Sprintf("%s-%s-%s-%s", org, repo, branch, ctx))
err = o.UpsertCommitStatusCheck(name, pipelineActName, sourceUrl, sha, pullRequest, ctx, pod.Status.Phase, jxClient, ns)
if err != nil {
return err
}
}
}
}
}
}
}
}
return nil
}
func (o *ControllerCommitStatusOptions) UpsertCommitStatusCheck(name string, pipelineActName string, url string, sha string, pullRequest string, context string, phase corev1.PodPhase, jxClient jenkinsv1client.Interface, ns string) error {
if name != "" {
status, err := jxClient.JenkinsV1().CommitStatuses(ns).Get(name, metav1.GetOptions{})
create := false
insert := false
actRef := jenkinsv1.ResourceReference{}
if err != nil {
create = true
} else {
log.Logger().Infof("pod watcher: commit status already exists for %s", name)
}
// Create the activity reference
act, err := jxClient.JenkinsV1().PipelineActivities(ns).Get(pipelineActName, metav1.GetOptions{})
if err == nil {
actRef.Name = act.Name
actRef.Kind = act.Kind
actRef.UID = act.UID
actRef.APIVersion = act.APIVersion
}
possibleStatusDetails := make([]int, 0)
for i, v := range status.Spec.Items {
if v.Commit.SHA == sha && v.PipelineActivity.Name == pipelineActName {
possibleStatusDetails = append(possibleStatusDetails, i)
}
}
statusDetails := jenkinsv1.CommitStatusDetails{}
log.Logger().Debugf("pod watcher: Discovered possible status details %v", possibleStatusDetails)
if len(possibleStatusDetails) == 1 {
log.Logger().Debugf("CommitStatus %s for pipeline %s already exists", name, pipelineActName)
} else if len(possibleStatusDetails) == 0 {
insert = true
} else {
return fmt.Errorf("More than %d status detail for sha %s, should 1 or 0, found %v", len(possibleStatusDetails), sha, possibleStatusDetails)
}
if create || insert {
// This is not the same pipeline activity the status was created for,
// or there is no existing status, so we make a new one
statusDetails = jenkinsv1.CommitStatusDetails{
Checked: false,
Commit: jenkinsv1.CommitStatusCommitReference{
GitURL: url,
PullRequest: pullRequest,
SHA: sha,
},
PipelineActivity: actRef,
Context: context,
}
}
if create {
log.Logger().Infof("pod watcher: Creating commit status for pipeline activity %s", pipelineActName)
status = &jenkinsv1.CommitStatus{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
"lastCommitSha": sha,
},
},
Spec: jenkinsv1.CommitStatusSpec{
Items: []jenkinsv1.CommitStatusDetails{
statusDetails,
},
},
}
_, err := jxClient.JenkinsV1().CommitStatuses(ns).Create(status)
if err != nil {
return err
}
} else if insert {
status.Spec.Items = append(status.Spec.Items, statusDetails)
log.Logger().Infof("pod watcher: Adding commit status for pipeline activity %s", pipelineActName)
_, err := jxClient.JenkinsV1().CommitStatuses(ns).PatchUpdate(status)
if err != nil {
return err
}
} else {
log.Logger().Debugf("pod watcher: Not updating or creating pipeline activity %s", pipelineActName)
}
} else {
return errors.New("commit status controller: Must supply name")
}
return nil
}
func (o *ControllerCommitStatusOptions) update(statusDetails *jenkinsv1.CommitStatusDetails, jxClient jenkinsv1client.Interface, ns string) error {
gitProvider, gitRepoInfo, err := o.getGitProvider(statusDetails.Commit.GitURL)
if err != nil {
return err
}
pass := false
if statusDetails.Checked {
var commentBuilder strings.Builder
pass = true
for _, c := range statusDetails.Items {
if !c.Pass {
pass = false
fmt.Fprintf(&commentBuilder, "%s | %s | %s | TODO | `/test this`\n", c.Name, c.Description, statusDetails.Commit.SHA)
}
}
if pass {
_, err := extensions.NotifyCommitStatus(statusDetails.Commit, "success", "", "Completed successfully", "", statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
} else {
comment := fmt.Sprintf(
"The following commit statusDetails checks **failed**, say `/retest` to rerun them all:\n"+
"\n"+
"Name | Description | Commit | Details | Rerun command\n"+
"--- | --- | --- | --- | --- \n"+
"%s\n"+
"<details>\n"+
"\n"+
"Instructions for interacting with me using PR comments are available [here](https://git.k8s.io/community/contributors/guide/pull-requests.md). If you have questions or suggestions related to my behavior, please file an issue against the [kubernetes/test-infra](https://github.com/kubernetes/test-infra/issues/new?title=Prow%%20issue:) repository. I understand the commands that are listed [here](https://go.k8s.io/bot-commands).\n"+
"</details>", commentBuilder.String())
_, err := extensions.NotifyCommitStatus(statusDetails.Commit, "failure", "", fmt.Sprintf("%s failed", statusDetails.Context), comment, statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
}
} else {
_, err = extensions.NotifyCommitStatus(statusDetails.Commit, "pending", "", fmt.Sprintf("Waiting for %s to complete", statusDetails.Context), "", statusDetails.Context, gitProvider, gitRepoInfo)
if err != nil {
return err
}
}
return nil
}
func (o *ControllerCommitStatusOptions) getGitProvider(url string) (gits.GitProvider, *gits.GitRepository, error) {
// TODO This is an epic hack to get the git stuff working
gitInfo, err := gits.ParseGitURL(url)
if err != nil {
return nil, nil, err
}
authConfigSvc, err := o.GitAuthConfigService()
if err != nil {
return nil, nil, err
}
gitKind, err := o.GitServerKind(gitInfo)
if err != nil {
return nil, nil, err
}
for _, server := range authConfigSvc.Config().Servers {
if server.Kind == gitKind && len(server.Users) >= 1 {
// Just grab the first user for now
username := server.Users[0].Username
apiToken := server.Users[0].ApiToken
err = os.Setenv("GIT_USERNAME", username)
if err != nil {
return nil, nil, err
}
err = os.Setenv("GIT_API_TOKEN", apiToken)
if err != nil {
return nil, nil, err
}
break
}
}
return o.CreateGitProviderForURLWithoutKind(url)
}
func | (pipelineActName string) string {
if pipelineActName == "" {
return "-1"
}
pipelineParts := strings.Split(pipelineActName, "-")
if len(pipelineParts) > 3 {
return pipelineParts[len(pipelineParts)-1]
} else {
return ""
}
}
| getBuildNumber | identifier_name |
touyou_case.py | # -*- coding: utf-8 -*-
# @Time : 2020/3/7 13:30
# @Author : Deng Wenxing
# @Email : dengwenxingae86@163.com
# @File : qq_msg.py
# @Software: PyCharm
# @content : qq相关信息
import sys, os
from pyspark import SparkConf
from pyspark.sql import SparkSession
# from pyspark.sql import functions as fun
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.window import Window
import time, copy, re, math
from datetime import datetime, timedelta,date
import json
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
reload(sys)
sys.setdefaultencoding('utf-8')
#warehouse_location = '/user/hive/warehouse/'
conf=SparkConf().set('spark.driver.maxResultSize', '20g')
conf.set('spark.yarn.am.cores', 5)
conf.set('spark.executor.memory', '20g')
conf.set('spark.executor.instances', 40)
conf.set('spark.executor.cores', 8)
conf.set('spark.executor.extraJavaOptions', '-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+UseG1GC')
#conf.set("spark.sql.warehouse.dir", warehouse_location)
spark = SparkSession \
.builder \
.config(conf=conf) \
.enableHiveSupport() \
.getOrCreate()
path_prefix = '/phoebus/_fileservice/users/slmp/shulianmingpin/midfile/qq'
root_path = '/phoebus/_fileservice/users/slmp/shulianmingpin/wa_data/touyou_case/'
def read_parquet(path):
'''读 parquet'''
df = spark.read.parquet(os.path.join(path_prefix,'parquet/',path))
return df
def write_parq |
'''写 parquet'''
df.write.mode("overwrite").parquet(path=os.path.join(path_prefix,'parquet/',path))
def write_orc(df, path,mode='overwrite'):
df.write.format('orc').mode(mode).save(path)
logger.info('write success')
def add_save_path(tablename, cp='', root='extenddir'):
'''
保存hive外表文件,分区默认为cp=2020,root为extenddir
:param tablename:
:param cp:
:return:
'''
if cp:
tmp_path = '/phoebus/_fileservice/users/slmp/shulianmingpin/{}/{}/cp={}'
return tmp_path.format(root, tablename.lower(), cp)
return '/phoebus/_fileservice/users/slmp/shulianmingpin/{}/{}/cp=2020'.format(root, tablename.lower())
def read_orc(path):
try:
df = spark.read.orc(path)
if df.take(1):
return df
except:
return None
def read_csv(file,schema):
path= '/phoebus/_fileservice/users/slmp/shulianmingpin/wa_data/touyou_case/'+file+'.csv'
return spark.read.csv(path, schema=schema,header=None)
def exists_travel_zjhm(df,zjhms,start_key='sfzh1',end_key='sfzh2'):
''' 双向一度 任一节点满足 '''
df.createOrReplaceTempView('all')
zjhms.createOrReplaceTempView('zjhms')
sql = ''' select /*+ BROADCAST (zjhms)*/all.* from all where (select count(1) as num from zjhms where all.%s = zjhms.zjhm) != 0
or (select count(1) as num from zjhms where all.%s = zjhms.zjhm) != 0 '''%(start_key,end_key)
res = spark.sql(sql)
return res
def inner_df_zjhm(df, zjhms, all_key='sfzh',find_key='zjhm'):
''' 是否存在 '''
df.createOrReplaceTempView('all')
zjhms.createOrReplaceTempView('zjhms')
sql = ''' select /*+ BROADCAST (zjhms)*/all.* from all inner join zjhms on all.%s=zjhms.%s''' % (all_key,find_key)
res = spark.sql(sql)
return res
def find_one_degree_call():
phone_schema = StructType([
StructField("phone", StringType(), True)
])
phones = read_csv('phone',phone_schema)
edge_groupcall_detail = read_orc(add_save_path('edge_groupcall_detail'))
phones.createOrReplaceTempView('phones')
edge_groupcall_detail.createOrReplaceTempView('call')
sql = ''' select /*+ BROADCAST (phones)*/call.* from call where (select count(1) as num from phones where call.start_phone = phones.phone) != 0
or (select count(1) as num from phones where call.end_phone = phones.phone) != 0 '''
res1 = spark.sql(sql)
one_degree_phones = (res1.selectExpr('start_phone phone').union(res1.selectExpr('end_phone phone'))).subtract(phones)
one_degree_phones.createOrReplaceTempView('other_phone')
sql = '''
select /*+ BROADCAST (other_phone)*/ a.* from call a
inner join
other_phone b
inner join
other_phone c
on a.start_phone=b.phone and a.end_phone=c.phone
'''
res2 = spark.sql(sql)
res = res1.unionAll(res2)
write_orc(res,root_path+'edge_groupcall_detail/cp=2020',)
call_detail = read_orc(root_path+'edge_groupcall_detail/cp=2020')
call_detail.createOrReplaceTempView('call_detail')
sql = '''
select start_phone,end_phone,min(start_time) start_time,
max(end_time) end_time, sum(call_duration) call_total_duration ,
count(1) call_total_times from call_detail
group by start_phone, end_phone
'''
df = spark.sql(sql)
write_orc(df,root_path+'edge_groupcall/cp=2020',)
def find_all_phone_dw():
cps = ['2020051800', '2020051900', '2020052000', '2020052100', '2020052200',
'2020052300', '2020052400', '2020052500', '2020052600', '2020052700']
call = read_orc(root_path+'edge_groupcall/cp=2020')
phones = (call.selectExpr('start_phone phone').union(call.selectExpr('end_phone phone'))).distinct()
phones.unpersist()
phones.createOrReplaceTempView('phones')
for cp in cps:
df = read_orc(add_save_path('bbd_dw_detail_tmp',cp=cp))
df.createOrReplaceTempView('dw')
sql = ''' select /*+ BROADCAST (phones)*/ dw.* from dw inner join phones on dw.phone=phones.phone '''
res = spark.sql(sql)
write_orc(res,root_path+'case_one_degree_dw/cp=%s'%cp)
logger.info('%s dw down'%cp)
phones.unpersist()
# smz = read_orc(add_save_path('edge_person_smz_phone_top'))
#
# smz_res = smz.join(phones,phones['phone']==smz['end_phone'],'inner') \
# .selectExpr('start_person','end_phone','0 start_time','0 end_time')
#
# write_orc(smz_res, root_path + 'edge_person_smz_phone/cp=2020', )
def vertex_case_info():
cols = ['ajbh asjbh','ajmc','0 asjfskssj','0 asjfsjssj','"" asjly','""ajlb','0 fxasjsj','"" fxasjdd_dzmc',
'jyaq','"" ajbs','0 larq']
df = spark.read.csv(root_path+'vertex_case.csv',header=True, sep='\t').selectExpr(*cols)
res = df.drop_duplicates(['asjbh'])
write_orc(res,root_path+'vertex_case/cp=2020')
## 人节点
p1 = read_orc(add_save_path('vertex_person'))
p_smz = read_orc(root_path+'edge_person_smz_phone/cp=2020').selectExpr('start_person')
p_links = read_orc(root_path+'edge_case_link_person/cp=2020').selectExpr('sfzh start_person')
p_airline1 = read_orc(root_path+'edge_person_with_airline_travel/cp=2020').selectExpr('sfzh1 start_person')
p_airline2 = read_orc(root_path+'edge_person_with_airline_travel/cp=2020').selectExpr('sfzh2 start_person')
p_train1 = read_orc(root_path+'edge_person_with_trainline_travel/cp=2020').selectExpr('sfzh1 start_person')
p_train2 = read_orc(root_path+'edge_person_with_trainline_travel/cp=2020').selectExpr('sfzh2 start_person')
p_house1 = read_orc(root_path+'edge_same_hotel_house/cp=2020').selectExpr('sfzh1 start_person')
p_house2 = read_orc(root_path+'edge_same_hotel_house/cp=2020').selectExpr('sfzh2 start_person')
p_com = read_orc(root_path+'edge_person_legal_com/cp=2020').selectExpr('start_person')
dfs = [p_smz,p_links,p_airline1,p_airline2,p_train1,p_train2,p_house1,p_house2,p_com]
all_p = reduce(lambda a,b:a.unionAll(b),filter(lambda a:a,dfs))
p2 = all_p.distinct()
p = p2.join(p1, p2.start_person == p1.zjhm, 'left') \
.selectExpr('start_person zjhm', 'zjlx', 'gj', 'xm', 'ywxm', 'zym', 'xb', 'csrq', 'mz', 'jg',
'whcd', 'hyzk', 'zzmm', 'hkszdxz', 'sjjzxz').na.fill('')
res = p.drop_duplicates(['zjhm'])
write_orc(res, root_path+'vertex_person/cp=2020')
## 公司节点
coms = read_orc(root_path+'edge_person_legal_com/cp=2020').selectExpr('end_company').distinct()
all_com = read_orc(add_save_path('vertex_company'))
res = inner_df_zjhm(all_com,coms,all_key='company',find_key='end_company')
write_orc(res,root_path+'vertex_company/cp=2020')
def edge_info():
## 案件关联人
col1 = ['ajbh','zjhm sfzh','0 start_time','0 end_time']
df1 = spark.read.csv(root_path+'case_link_person.csv',header=True, sep='\t').selectExpr(*col1)
write_orc(df1,root_path+'edge_case_link_person/cp=2020')
## 案件关联电话
col2 = ['ajbh', 'phone', '0 start_time', '0 end_time']
df2 = spark.read.csv(root_path + 'case_link_phone.csv', header=True, sep='\t').selectExpr(*col2)
write_orc(df2, root_path + 'edge_case_link_phone/cp=2020')
def person_do_something():
zjhms = read_orc(root_path +'edge_person_smz_phone/cp=2020').select('start_person zjhm').distinct()
## 同出行信息,同房
airline_travel = read_orc(add_save_path('edge_person_with_airline_travel'))
trainline_travel = read_orc(add_save_path('edge_person_with_trainline_travel'))
same_hotel_house = read_orc(add_save_path('edge_same_hotel_house'))
write_orc(exists_travel_zjhm(airline_travel,zjhms),root_path+'edge_person_with_airline_travel/cp=2020')
write_orc(exists_travel_zjhm(trainline_travel,zjhms),root_path+'edge_person_with_trainline_travel/cp=2020')
write_orc(exists_travel_zjhm(same_hotel_house,zjhms),root_path+'edge_same_hotel_house/cp=2020')
## 工商信息
legal_com = read_orc(add_save_path('edge_person_legal_com'))
write_orc(inner_df_zjhm(legal_com,zjhms,all_key='start_person'),root_path+'edge_person_legal_com/cp=2020')
if __name__ == "__main__":
logger.info('========================start time:%s==========================' % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
# find_one_degree_call()
find_all_phone_dw()
# edge_info()
# vertex_case_info()
# person_do_something()
logger.info('========================end time:%s==========================' % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) | uet(df,path): | identifier_name |
touyou_case.py | # -*- coding: utf-8 -*-
# @Time : 2020/3/7 13:30
# @Author : Deng Wenxing
# @Email : dengwenxingae86@163.com
# @File : qq_msg.py
# @Software: PyCharm
# @content : qq相关信息
import sys, os
from pyspark import SparkConf
from pyspark.sql import SparkSession
# from pyspark.sql import functions as fun
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.window import Window
import time, copy, re, math
from datetime import datetime, timedelta,date
import json
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
reload(sys)
sys.setdefaultencoding('utf-8')
#warehouse_location = '/user/hive/warehouse/'
conf=SparkConf().set('spark.driver.maxResultSize', '20g')
conf.set('spark.yarn.am.cores', 5)
conf.set('spark.executor.memory', '20g')
conf.set('spark.executor.instances', 40)
conf.set('spark.executor.cores', 8)
conf.set('spark.executor.extraJavaOptions', '-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+UseG1GC')
#conf.set("spark.sql.warehouse.dir", warehouse_location)
spark = SparkSession \
.builder \
.config(conf=conf) \
.enableHiveSupport() \
.getOrCreate()
path_prefix = '/phoebus/_fileservice/users/slmp/shulianmingpin/midfile/qq'
root_path = '/phoebus/_fileservice/users/slmp/shulianmingpin/wa_data/touyou_case/'
def read_parquet(path):
'''读 parquet'''
df = spark.read.parquet(os.path.join(path_prefix,'parquet/',path))
return df
def write_parquet(df,path):
'''写 parquet'''
df.write.mode("overwrite").parquet(path=os.path.join(path_prefix,'parquet/',path))
def write_orc(df, path,mode='overwrite'):
df.write.format('orc').mode(mode).save(path)
logger.info('write success')
def add_save_path(tablename, cp='', root='extenddir'):
'''
保存hive外表文件,分区默认为cp=2020,root为extenddir
:param tablename:
:param cp:
:return:
'''
if cp:
tmp_path = '/phoebus/_fileservice/users/slmp/shulianmingpin/{}/{}/cp={}'
return tmp_path.format(root, tablename.lower(), cp)
return '/phoebus/_fileservice/users/slmp/shulianmingpin/{}/{}/cp=2020'.format(root, tablename.lower())
def read_orc(path):
try:
df = spark.read.orc(path)
if df.take(1):
return df
except:
return None
def read_csv(file,schema):
path= '/phoebus/_fileservice/users/slmp/shulianmingpin/wa_data/touyou_case/'+file+'.csv'
return spark.read.csv(path, schema=schema,header=None)
def exists_travel_zjhm(df,zjhms,start_key='sfzh1',end_key='sfzh2'):
''' 双向一度 任一节点满足 '''
df.createOrReplaceTempView('all')
zjhms.createOrReplaceTempView('zjhms')
sql = ''' select /*+ BROADCAST (zjhms)*/all.* from all where (select count(1) as num from zjhms where all.%s = zjhms.zjhm) != 0
or (select count(1) as num from zjhms where all.%s = zjhms.zjhm) != 0 '''%(start_key,end_key)
res = spark.sql(sql)
return res
def inner_df_zjhm(df, zjhms, all_key='sfzh',find_key='zjhm'):
''' 是否存在 '''
df.createOrReplaceTempView('all')
zjhms.createOrReplaceTempView('zjhms')
sql = ''' select /*+ BROADCAST (zjhms)*/all.* from all inner join zjhms on all.%s=zjhms.%s''' % (all_key,find_key)
res = spark.sql(sql)
return res
def find_one_degree_call():
phone_schema = StructType([
StructField("phone", StringType(), True)
])
phones = read_csv('phone',phone_schema)
edge_groupcall_detail = read_orc(add_save_path('edge_groupcall_detail'))
phones.createOrReplaceTempView('phones')
edge_groupcall_detail.createOrReplaceTempView('call')
sql = ''' select /*+ BROADCAST (phones)*/call.* from call where (select count(1) as num from phones where call.start_phone = phones.phone) != 0
or (select count(1) as num from phones where call.end_phone = phones.phone) != 0 '''
res1 = spark.sql(sql)
one_degree_phones = (res1.selectExpr('start_phone phone').union(res1.selectExpr('end_phone phone'))).subtract(phones)
one_degree_phones.createOrReplaceTempView('other_phone')
sql = '''
select /*+ BROADCAST (other_phone)*/ a.* from call a
inner join
other_phone b
inner join
other_phone c
on a.start_phone=b.phone and a.end_phone=c.phone
'''
res2 = spark.sql(sql)
res = res1.unionAll(res2)
write_orc(res,root_path+'edge_groupcall_detail/cp=2020',)
call_detail = read_orc(root_path+'edge_groupcall_detail/cp=2020')
call_detail.createOrReplaceTempView('call_detail')
sql = '''
select start_phone,end_phone,min(start_time) start_time,
max(end_time) end_time, sum(call_duration) call_total_duration ,
count(1) call_total_times from call_detail
group by start_phone, end_phone
'''
df = spark.sql(sql)
write_orc(df,root_path+'edge_groupcall/cp=2020',)
def find_all_phone_dw():
cps = ['2020051800', '2020051900', '2020052000', '2020052100', '2020052200',
'2020052300', '2020052400', '2020052500', '2020052600', '2020052700']
call = read_orc(root_path+'edge_groupcall/cp=2020')
phones = (call.selectExpr('start_phone phone').union(call.selectExpr('end_phone phone'))).distinct()
phones.unpersist()
phones.createOrReplaceTempView('phones')
for cp in cps:
df = read_orc(add_save_path('bbd_dw_detail_tmp',cp=cp))
df.createOrReplaceTempView('dw')
sql = ''' select /*+ BROADCAST (phones)*/ dw.* from dw inner join phones on dw.phone=phones.phone '''
res = spark.sql(sql)
write_orc(res,root_path+'case_one_degree_dw/cp=%s'%cp)
logger.info('%s dw down'%cp)
phones.unpersist()
# smz = read_orc(add_save_path('edge_person_smz_phone_top'))
#
# smz_res = smz.join(phones,phones['phone']==smz['end_phone'],'inner') \
# .selectExpr('start_person','end_phone','0 start_time','0 end_time')
#
# write_orc(smz_res, root_path + 'edge_person_smz_phone/cp=2020', )
def vertex_case_info():
cols = ['ajbh asjbh','ajmc','0 asjfskssj','0 asjfsjssj','"" asjly','""ajlb','0 fxasjsj','"" fxasjdd_dzmc',
'jyaq','"" ajbs','0 larq']
df = spark.read.csv(root_path+'vertex_case.csv',header=True, sep='\t').selectExpr(*cols)
res = df.drop_duplicates(['asjbh'])
write_orc(res,root_path+'vertex_case/cp=2020')
## 人节点
p1 = read_orc(add_save_path('vertex_person'))
p_smz = read_orc(root_path+'edge_person_smz_phone/cp=2020').selectExpr('start_person')
p_links = read_orc(root_path+'edge_case_link_person/cp=2020').selectExpr('sfzh start_person')
p_airline1 = read_orc(root_path+'edge_person_with_airline_travel/cp=2020').selectExpr('sfzh1 start_person')
p_airline2 = read_orc(root_path+'edge_person_with_airline_travel/cp=2020').selectExpr('sfzh2 start_person')
p_train1 = read_orc(root_path+'edge_person_with_trainline_travel/cp=2020').selectExpr('sfzh1 start_person')
p_train2 = read_orc(root_path+'edge_person_with_trainline_travel/cp=2020').selectExpr('sfzh2 start_person')
p_house1 = read_orc(root_path+'edge_same_hotel_house/cp=2020').selectExpr('sfzh1 start_person')
p_house2 = read_orc(root_path+'edge_same_hotel_house/cp=2020').selectExpr('sfzh2 start_person')
p_com = read_orc(root_path+'edge_person_legal_com/cp=2020').selectExpr('start_person')
dfs = [p_smz,p_links,p_airline1,p_airline2,p_train1,p_train2,p_house1,p_house2,p_com]
all_p = reduce(lambda a,b:a.unionAll(b),filter(lambda a:a,dfs))
p2 = all_p.distinct()
p = p2.join(p1, p2.start_person == p1.zjhm, 'left') \
.selectExpr('start_person zjhm', 'zjlx', 'gj', 'xm', 'ywxm', 'zym', 'xb', 'csrq', 'mz', 'jg',
'whcd', 'hyzk', 'zzmm', 'hkszdxz', 'sjjzxz').na.fill('')
res = p.drop_duplicates(['zjhm'])
write_orc(res, root_path+'vertex_person/cp=2020')
## 公司节点
coms = read_orc(root_path+'edge_person_legal_com/cp=2020').selectExpr('end_company').distinct()
all_com = read_orc(add_save_path('vertex_company'))
res = inner_df_zjhm(all_com,coms,all_key='company',find_key='end_company')
write_orc(res,root_path+'vertex_company/cp=2020')
def edge_info():
## 案件关联人
col1 = ['ajbh','zjhm sfzh','0 start_time','0 end_time']
df1 = spark.read.csv(root_path+'case_link_person.csv',header=True, sep='\t').selectExpr(*col1)
write_orc(df1,root_path+'edge_case_link_person/cp=2020')
## 案件关联电话
col2 = ['ajbh', 'phone', '0 start_time', '0 end_time']
df2 = spark.read.csv(root_path + 'case_link_phone.csv', header=True, sep='\t').selectExpr(*col2)
write_orc(df2, root_path + 'edge_case_link_phone/cp=2020')
def person_do_something():
zjhms = read_orc(root_path +'edge_person_smz_phone/cp=2020').select('start_person zjhm').distinct()
## 同出行信息,同房
airline_travel = read_orc(add_save_path('edge_person_with_airline_travel'))
trainline_travel = read_orc(add_save_path('edge_person_with_trainline_travel'))
same_hotel_house = read_orc(add_save_path('edge_same_hotel_house'))
write_orc(exists_travel_zjhm(airline_travel,zjhms),root_path+'edge_person_with_airline_travel/cp=2020') |
## 工商信息
legal_com = read_orc(add_save_path('edge_person_legal_com'))
write_orc(inner_df_zjhm(legal_com,zjhms,all_key='start_person'),root_path+'edge_person_legal_com/cp=2020')
if __name__ == "__main__":
logger.info('========================start time:%s==========================' % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
# find_one_degree_call()
find_all_phone_dw()
# edge_info()
# vertex_case_info()
# person_do_something()
logger.info('========================end time:%s==========================' % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) | write_orc(exists_travel_zjhm(trainline_travel,zjhms),root_path+'edge_person_with_trainline_travel/cp=2020')
write_orc(exists_travel_zjhm(same_hotel_house,zjhms),root_path+'edge_same_hotel_house/cp=2020') | random_line_split |
touyou_case.py | # -*- coding: utf-8 -*-
# @Time : 2020/3/7 13:30
# @Author : Deng Wenxing
# @Email : dengwenxingae86@163.com
# @File : qq_msg.py
# @Software: PyCharm
# @content : qq相关信息
import sys, os
from pyspark import SparkConf
from pyspark.sql import SparkSession
# from pyspark.sql import functions as fun
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.window import Window
import time, copy, re, math
from datetime import datetime, timedelta,date
import json
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
reload(sys)
sys.setdefaultencoding('utf-8')
#warehouse_location = '/user/hive/warehouse/'
conf=SparkConf().set('spark.driver.maxResultSize', '20g')
conf.set('spark.yarn.am.cores', 5)
conf.set('spark.executor.memory', '20g')
conf.set('spark.executor.instances', 40)
conf.set('spark.executor.cores', 8)
conf.set('spark.executor.extraJavaOptions', '-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+UseG1GC')
#conf.set("spark.sql.warehouse.dir", warehouse_location)
spark = SparkSession \
.builder \
.config(conf=conf) \
.enableHiveSupport() \
.getOrCreate()
path_prefix = '/phoebus/_fileservice/users/slmp/shulianmingpin/midfile/qq'
root_path = '/phoebus/_fileservice/users/slmp/shulianmingpin/wa_data/touyou_case/'
def read_parquet(path):
'''读 parquet'''
df = spark.read.parquet(os.path.join(path_prefix,'parquet/',path))
return df
def write_parquet(df,path):
'''写 parquet'''
df.write.mode("overwrite").parquet(path=os.path.join(path_prefix,'parquet/',path))
def write_orc(df, path,mode='overwrite'):
df.write.format('orc').mode(mode).save(path)
logger.info('write success')
def add_save_path(tablename, cp='', root='extenddir'):
'''
保存hive外表文件,分区默认为cp=2020,root为extenddir
:param tablename:
:param cp:
:return:
'''
if cp:
tmp_path = '/phoebus/_fileservice/users/slmp/shulianmingpin/{}/{}/cp={}'
return tmp_path.format(root, tablename.lower(), cp)
return '/phoebus/_fileservice/users/slmp/shulianmingpin/{}/{}/cp=2020'.format(root, tablename.lower())
def read_orc(path):
try:
df = spark.read.orc(path)
if df.take(1):
return df
except:
return None
def read_csv(file,schema):
path= '/phoebus/_fileservice/users/slmp/shulianmingpin/wa_data/touyou_case/'+file+'.csv'
return spark.read.csv(path, schema=schema,header=None)
def exists_travel_zjhm(df,zjhms,start_key='sfzh1',end_key='sfzh2'):
''' 双向一度 任一节点满足 '''
df.createOrReplaceTempView('all')
zjhms.createOrReplaceTempView('zjhms')
sql = ''' select /*+ BROADCAST (zjhms)*/all.* from all where (select count(1) as num from zjhms where all.%s = zjhms.zjhm) != 0
or (select count(1) as num from zjhms where all.%s = zjhms.zjhm) != 0 '''%(start_key,end_key)
res = spark.sql(sql)
return res
def inner_df_zjhm(df, zjhms, all_key='sfzh',find_key='zjhm'):
''' 是否存在 '''
df.createOrReplaceTempView('all')
zjhms.createOrReplaceTempView('zjhms')
sql = ''' select /*+ BROADCAST (zjhms)*/all.* from all inner join zjhms on all.%s=zjhms.%s''' % (all_key,find_key)
res = spark.sql(sql)
return res
def find_one_degree_call():
phone_schema = StructType([
StructField("phone", StringType(), True)
])
phones = read_csv('phone',phone_schema)
edge_groupcall_detail = read_orc(add_save_path('edge_groupcall_detail'))
phones.createOrReplaceTempView('phones')
edge_groupcall_detail.createOrReplaceTempView('call')
sql = ''' select /*+ BROADCAST (phones)*/call.* from call where (select count(1) as num from phones where call.start_phone = phones.phone) != 0
or (select count(1) as num from phones where call.end_phone = phones.phone) != 0 '''
res1 = spark.sql(sql)
one_degree_phones = (res1.selectExpr('start_phone phone').union(res1.selectExpr('end_phone phone'))).subtract(phones)
one_degree_phones.createOrReplaceTempView('other_phone')
sql = '''
select /*+ BROADCAST (other_phone)*/ a.* from call a
inner join
other_phone b
inner join
other_phone c
on a.start_phone=b.phone and a.end_phone=c.phone
'''
res2 = spark.sql(sql)
res = res1.unionAll(res2)
write_orc(res,root_path+'edge_groupcall_detail/cp=2020',)
call_detail = read_orc(root_path+'edge_groupcall_detail/cp=2020')
call_detail.createOrReplaceTempView('call_detail')
sql = '''
select start_phone,end_phone,min(start_time) start_time,
max(end_time) end_time, sum(call_duration) call_total_duration ,
count(1) call_total_times from call_detail
group by start_phone, end_phone
'''
df = spark.sql(sql)
write_orc(df,root_path+'edge_groupcall/cp=2020',)
def find_all_phone_dw():
cps = ['2020051800', '2020051900', '2020052000', '2020052100', '2020052200',
'2020052300', '2020052400', '2020052500', '2020052600', '2020052700']
call = read_orc(root_path+'edge_groupcall/cp=2020')
phones = (call.selectExpr('start_phone phone').union(call.selectExpr('end_phone phone'))).distinct()
phones.unpersist()
phones.createOrReplaceTempView('phones')
for cp in cps:
df = read_orc(add_save_path('bbd_dw_detail_tmp',cp=cp))
df.c | rson_smz_phone_top'))
#
# smz_res = smz.join(phones,phones['phone']==smz['end_phone'],'inner') \
# .selectExpr('start_person','end_phone','0 start_time','0 end_time')
#
# write_orc(smz_res, root_path + 'edge_person_smz_phone/cp=2020', )
def vertex_case_info():
cols = ['ajbh asjbh','ajmc','0 asjfskssj','0 asjfsjssj','"" asjly','""ajlb','0 fxasjsj','"" fxasjdd_dzmc',
'jyaq','"" ajbs','0 larq']
df = spark.read.csv(root_path+'vertex_case.csv',header=True, sep='\t').selectExpr(*cols)
res = df.drop_duplicates(['asjbh'])
write_orc(res,root_path+'vertex_case/cp=2020')
## 人节点
p1 = read_orc(add_save_path('vertex_person'))
p_smz = read_orc(root_path+'edge_person_smz_phone/cp=2020').selectExpr('start_person')
p_links = read_orc(root_path+'edge_case_link_person/cp=2020').selectExpr('sfzh start_person')
p_airline1 = read_orc(root_path+'edge_person_with_airline_travel/cp=2020').selectExpr('sfzh1 start_person')
p_airline2 = read_orc(root_path+'edge_person_with_airline_travel/cp=2020').selectExpr('sfzh2 start_person')
p_train1 = read_orc(root_path+'edge_person_with_trainline_travel/cp=2020').selectExpr('sfzh1 start_person')
p_train2 = read_orc(root_path+'edge_person_with_trainline_travel/cp=2020').selectExpr('sfzh2 start_person')
p_house1 = read_orc(root_path+'edge_same_hotel_house/cp=2020').selectExpr('sfzh1 start_person')
p_house2 = read_orc(root_path+'edge_same_hotel_house/cp=2020').selectExpr('sfzh2 start_person')
p_com = read_orc(root_path+'edge_person_legal_com/cp=2020').selectExpr('start_person')
dfs = [p_smz,p_links,p_airline1,p_airline2,p_train1,p_train2,p_house1,p_house2,p_com]
all_p = reduce(lambda a,b:a.unionAll(b),filter(lambda a:a,dfs))
p2 = all_p.distinct()
p = p2.join(p1, p2.start_person == p1.zjhm, 'left') \
.selectExpr('start_person zjhm', 'zjlx', 'gj', 'xm', 'ywxm', 'zym', 'xb', 'csrq', 'mz', 'jg',
'whcd', 'hyzk', 'zzmm', 'hkszdxz', 'sjjzxz').na.fill('')
res = p.drop_duplicates(['zjhm'])
write_orc(res, root_path+'vertex_person/cp=2020')
## 公司节点
coms = read_orc(root_path+'edge_person_legal_com/cp=2020').selectExpr('end_company').distinct()
all_com = read_orc(add_save_path('vertex_company'))
res = inner_df_zjhm(all_com,coms,all_key='company',find_key='end_company')
write_orc(res,root_path+'vertex_company/cp=2020')
def edge_info():
## 案件关联人
col1 = ['ajbh','zjhm sfzh','0 start_time','0 end_time']
df1 = spark.read.csv(root_path+'case_link_person.csv',header=True, sep='\t').selectExpr(*col1)
write_orc(df1,root_path+'edge_case_link_person/cp=2020')
## 案件关联电话
col2 = ['ajbh', 'phone', '0 start_time', '0 end_time']
df2 = spark.read.csv(root_path + 'case_link_phone.csv', header=True, sep='\t').selectExpr(*col2)
write_orc(df2, root_path + 'edge_case_link_phone/cp=2020')
def person_do_something():
zjhms = read_orc(root_path +'edge_person_smz_phone/cp=2020').select('start_person zjhm').distinct()
## 同出行信息,同房
airline_travel = read_orc(add_save_path('edge_person_with_airline_travel'))
trainline_travel = read_orc(add_save_path('edge_person_with_trainline_travel'))
same_hotel_house = read_orc(add_save_path('edge_same_hotel_house'))
write_orc(exists_travel_zjhm(airline_travel,zjhms),root_path+'edge_person_with_airline_travel/cp=2020')
write_orc(exists_travel_zjhm(trainline_travel,zjhms),root_path+'edge_person_with_trainline_travel/cp=2020')
write_orc(exists_travel_zjhm(same_hotel_house,zjhms),root_path+'edge_same_hotel_house/cp=2020')
## 工商信息
legal_com = read_orc(add_save_path('edge_person_legal_com'))
write_orc(inner_df_zjhm(legal_com,zjhms,all_key='start_person'),root_path+'edge_person_legal_com/cp=2020')
if __name__ == "__main__":
logger.info('========================start time:%s==========================' % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
# find_one_degree_call()
find_all_phone_dw()
# edge_info()
# vertex_case_info()
# person_do_something()
logger.info('========================end time:%s==========================' % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) | reateOrReplaceTempView('dw')
sql = ''' select /*+ BROADCAST (phones)*/ dw.* from dw inner join phones on dw.phone=phones.phone '''
res = spark.sql(sql)
write_orc(res,root_path+'case_one_degree_dw/cp=%s'%cp)
logger.info('%s dw down'%cp)
phones.unpersist()
# smz = read_orc(add_save_path('edge_pe | conditional_block |
touyou_case.py | # -*- coding: utf-8 -*-
# @Time : 2020/3/7 13:30
# @Author : Deng Wenxing
# @Email : dengwenxingae86@163.com
# @File : qq_msg.py
# @Software: PyCharm
# @content : qq相关信息
import sys, os
from pyspark import SparkConf
from pyspark.sql import SparkSession
# from pyspark.sql import functions as fun
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.window import Window
import time, copy, re, math
from datetime import datetime, timedelta,date
import json
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
reload(sys)
sys.setdefaultencoding('utf-8')
#warehouse_location = '/user/hive/warehouse/'
conf=SparkConf().set('spark.driver.maxResultSize', '20g')
conf.set('spark.yarn.am.cores', 5)
conf.set('spark.executor.memory', '20g')
conf.set('spark.executor.instances', 40)
conf.set('spark.executor.cores', 8)
conf.set('spark.executor.extraJavaOptions', '-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+UseG1GC')
#conf.set("spark.sql.warehouse.dir", warehouse_location)
spark = SparkSession \
.builder \
.config(conf=conf) \
.enableHiveSupport() \
.getOrCreate()
path_prefix = '/phoebus/_fileservice/users/slmp/shulianmingpin/midfile/qq'
root_path = '/phoebus/_fileservice/users/slmp/shulianmingpin/wa_data/touyou_case/'
def read_parquet(path):
'''读 parquet'''
df = spark.read.parquet(os.path.join(path_prefix,'parquet/',path))
return df
def write_parquet(df,path):
'''写 parquet'''
df.write.mode("overwrite").parquet(path=os.path.join(path_prefix,'parquet/',path))
def write_orc(df, path,mode='overwrite'):
df.write.format('orc').mode(mode).save(path)
logger.info('write success')
def add_save_path(tablename, cp='', root='extenddir'):
'''
保存hive外表文件,分区默认为cp=2020,root为extenddir
:param tablename:
:param cp:
:return:
'''
if cp:
tmp_path = '/phoebus/_fileservice/users/slmp/shulianmingpin/{}/{}/cp={}'
return tmp_path.format(root, tablename.lower(), cp)
return '/phoebus/_fileservice/users/slmp/shulianmingpin/{}/{}/cp=2020'.format(root, tablename.lower())
def read_orc(path):
try:
df = spark.read.orc(path)
if df.take(1):
return df
except:
return None
def read_csv(file,schema):
path= '/phoebus/_fileservice/users/slmp/shulianmingpin/wa_data/touyou_case/'+file+'.csv'
return spark.read.csv(path, schema=schema,header=None)
def exists_travel_zjhm(df,zjhms,start_key='sfzh1',end_key='sfzh2'):
''' 双向一度 任一节点满足 '''
df.createOrReplaceTempView('all')
zjhms.createOrReplaceTempView('zjhms')
sql = ''' select /*+ BROADCAST (zjhms)*/all.* from all where (select count(1) as num from zjhms where all.%s = zjhms.zjhm) != 0
or (select count(1) as num from zjhms where all.%s = zjhms.zjhm) != 0 '''%(start_key,end_key)
res = spark.sql(sql)
return res
def inner_df_zjhm(df, zjhms, all_key='sfzh',find_key='zjhm'):
''' 是否存在 '''
df.createOrReplaceTempView('all')
zjhms.createOrReplaceTempView('zjhms')
sql = ''' select /*+ BROADCAST (zjhms)*/all.* from all inner join zjhms on all.%s=zjhms.%s''' % (all_key,find_key)
res = spark.sql(sql)
return res
def find_one_degree_call():
phone_schema = StructType([
StructField("phone", StringType(), True)
])
phones = read_csv('phone',phone_schema)
edge_groupcall_detail = read_orc(add_save_path('edge_groupcall_detail'))
phones.createOrReplaceTempView('phones')
edge_groupcall_detail.createOrReplaceTempView('call')
sql = ''' select /*+ BROADCAST (phones)*/call.* from call where (select count(1) as num from phones where call.start_phone = phones.phone) != 0
or (select count(1) as num from phones where call.end_phone = phones.phone) != 0 '''
res1 = spark.sql(sql)
one_degree_phones = (res1.selectExpr('start_phone phone').union(res1.selectExpr('end_phone phone'))).subtract(phones)
one_degree_phones.createOrReplaceTempView('other_phone')
sql = '''
select /*+ BROADCAST (other_phone)*/ a.* from call a
inner join
other_phone b
inner join
other_phone c
on a.start_phone=b.phone and a.end_phone=c.phone
'''
res2 = spark.sql(sql)
res = res1.unionAll(res2)
write_orc(res,root_path+'edge_groupcall_detail/cp=2020',)
call_detail = read_orc(root_path+'edge_groupcall_detail/cp=2020')
call_detail.createOrReplaceTempView('call_detail')
sql = '''
select start_phone,end_phone,min(start_time) start_time,
max(end_time) end_time, sum(call_duration) call_total_duration ,
count(1) call_total_times from call_detail
group by start_phone, end_phone
'''
df = spark.sql(sql)
write_orc(df,root_path+'edge_groupcall/cp=2020',)
def find_all_phone_dw():
cps = ['2020051800', '2020051900', '2020052000', '2020052100', '2020052200',
'2020052300', '2020052400', '2020052500', '2020052600', '2020052700']
call = read_orc(root_path+'edge_groupcall/cp=2020')
phones = (call.selectExpr('start_phone phone').union(call.selectExpr('end_phone phone'))).distinct()
phones.unpersist()
phones.createOrReplaceTempView('phones')
for cp in cps:
df = read_orc(add_save_path('bbd_dw_detail_tmp',cp=cp))
df.createOrReplaceTempView('dw')
sql = ''' select /*+ BROADCAST (phones)*/ dw.* from dw inner join phones on dw.phone=phones.phone '''
res = spark.sql(sql)
write_orc(res,root_path+'case_one_degree_dw/cp=%s'%cp)
logger.info('%s dw down'%cp)
phones.unpersist()
# smz = read_orc(add_save_path('edge_person_smz_phone_top'))
#
# smz_res = smz.join(phones,phones['phone']==smz['end_phone'],'inner') \
# .selectExpr('start_person','end_phone','0 start_time','0 end_time')
#
# write_orc(smz_res, root_path + 'edge_person_smz_phone/cp=2020', )
def vertex_case_info():
cols = ['ajbh asjbh','ajmc','0 asjfskssj','0 asjfsjssj','"" asjly','""ajlb','0 fxasjsj','"" fxasjdd_dzmc',
'jyaq','"" ajbs','0 larq']
df = spark.read.csv(root_path+'vertex_case.csv',header=True, sep='\t').selectExpr(*cols)
res = df.drop_duplicates(['asjbh'])
write_orc(res,root_path+'vertex_case/cp=2020')
## 人节点
p1 = read_orc(add_save_path('vertex_person'))
p_smz = read_orc(root_path+'edge_person_smz_phone/cp=2020').selectExpr('start_person')
p_links = read_orc(root_path+'edge_case_link_person/cp=2020').selectExpr('sfzh start_person')
p_airline1 = read_orc(root_path+'edge_person_with_airline_travel/cp=2020').selectExpr('sfzh1 start_person')
p_airline2 = read_orc(root_path+'edge_person_with_airline_travel/cp=2020').selectExpr('sfzh2 start_person')
p_train1 = read_orc(root_path+'edge_person_with_trainline_travel/cp=2020').selectExpr('sfzh1 start_person')
p_train2 = read_orc(root_path+'edge_person_with_trainline_travel/cp=2020').selectExpr('sfzh2 start_person')
p_house1 = read_orc(root_path+'edge_same_hotel_house/cp=2020').selectExpr('sfzh1 start_person')
p_house2 = read_orc(root_path+'edge_same_hotel_house/cp=2020').selectExpr('sfzh2 start_person')
p_com = read_orc(root_path+'edge_person_legal_com/cp=2020').selectExpr('start_person')
dfs = [p_smz,p_links,p_airline1,p_airline2,p_train1,p_train2,p_house1,p_house2,p_com]
all_p = reduce(lambda a,b:a.unionAll(b),filter(lambda a:a,dfs))
p2 = all_p.distinct()
p = p2.join(p1, p2.start_person == p1.zjhm, 'left') \
.selectExpr('start_person zjhm', 'zjlx', 'gj', 'xm', 'ywxm', 'zym', 'xb', 'csrq', 'mz', 'jg',
'whcd', 'hyzk', 'zzmm', 'hkszdxz', 'sjjzxz').na.fill('')
res = p.drop_duplicates(['zjhm'])
write_orc(res, root_path+'vertex_person/cp=2020')
## 公司节点
coms = read_orc(root_path+'edge_person_legal_com/cp=2020').selectExpr('end_company').distinct()
all_com = read_orc(add_save_path('vertex_company'))
res = inner_df_zjhm(all_com,coms,all_key='company',find_key='end_company')
write_orc(res,root_path+'vertex_company/cp=2020')
def edge_info():
## 案件关联人
col1 = ['ajbh','zjhm sfzh','0 start_time','0 end_time']
df1 = spark.read.csv(root_path+'case_link_person.csv',header=True, sep='\t').selectExpr(*col1)
write_orc(df1,root_path+'edge_case_link_person/cp=2020')
## 案件关联电话
col2 = ['ajbh', 'phone', '0 start_time', '0 end_time']
df2 = spark.read.csv(root_path + 'case_link_phone.csv', header=True, sep='\t').selectExpr(*col2)
write_orc(df2, root_path + 'edge_case_link_phone/cp=2020')
def person_do_something():
zjhms = read_orc(root_path +'edge_person_smz_phone/cp=2020').select('start_person zjhm').distinct()
| strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
# find_one_degree_call()
find_all_phone_dw()
# edge_info()
# vertex_case_info()
# person_do_something()
logger.info('========================end time:%s==========================' % (
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))) | ## 同出行信息,同房
airline_travel = read_orc(add_save_path('edge_person_with_airline_travel'))
trainline_travel = read_orc(add_save_path('edge_person_with_trainline_travel'))
same_hotel_house = read_orc(add_save_path('edge_same_hotel_house'))
write_orc(exists_travel_zjhm(airline_travel,zjhms),root_path+'edge_person_with_airline_travel/cp=2020')
write_orc(exists_travel_zjhm(trainline_travel,zjhms),root_path+'edge_person_with_trainline_travel/cp=2020')
write_orc(exists_travel_zjhm(same_hotel_house,zjhms),root_path+'edge_same_hotel_house/cp=2020')
## 工商信息
legal_com = read_orc(add_save_path('edge_person_legal_com'))
write_orc(inner_df_zjhm(legal_com,zjhms,all_key='start_person'),root_path+'edge_person_legal_com/cp=2020')
if __name__ == "__main__":
logger.info('========================start time:%s==========================' % (
time. | identifier_body |
gui_imageviewer.py | # Image viewer
import wx
import sys
import io
import urllib.request, urllib.parse, urllib.error
from urllib.request import Request, urlopen
import asyncio
from io import StringIO
# from pydbg import dbg
from gui.coord_utils import ZoomInfo
from typing import List, Set, Dict, Tuple, Optional
from media import images
from gui.settings import PRO_EDITION
from generate_code.gen_plantuml import plant_uml_create_png_and_return_image_url_async
from dialogs.DialogPlantUmlText import DialogPlantUmlText
from common.dialog_dir_path import dialog_path_pyinstaller_push, dialog_path_pyinstaller_pop
from common.messages import *
import datetime
from app.settings import CancelRefreshPlantUmlEvent, EVT_CANCEL_REFRESH_PLANTUML_EVENT
from common.url_to_data import url_to_data
import logging
from common.logger import config_log
log = logging.getLogger(__name__)
config_log(log)
ALLOW_DRAWING = True
DEFAULT_IMAGE_SIZE = (21, 21) # used to be 2000, 2000 for some reason
BMP_EXTRA_MARGIN = 20 # margin for plantuml images to allow scrolling them fully into view
unregistered = not PRO_EDITION
class ImageViewer(wx.ScrolledWindow):
def __init__(self, parent, id=-1, size=wx.DefaultSize):
wx.ScrolledWindow.__init__(self, parent, id, (0, 0), size=size, style=wx.SUNKEN_BORDER)
self.lines = []
self.maxWidth, self.maxHeight = DEFAULT_IMAGE_SIZE
self.x = self.y = 0
self.curLine = []
self.drawing = False
self.SetBackgroundColour("WHITE") # for areas of the frame not covered by the bmp
# TODO these areas don't get refreshed properly when scrolling when pen marks are around, we only refresh bmp area and when bmp area < client window we get artifacts.
bmp, dc = self._CreateNewWhiteBmp(self.maxWidth, self.maxHeight)
self.bmp = bmp
self.bmp_transparent_ori = None
self.SetVirtualSize((self.maxWidth, self.maxHeight))
self.SetScrollRate(1, 1) # set the ScrollRate to 1 in order for panning to work nicely
self.zoomscale = 1.0
self.clear_whole_window = False
if ALLOW_DRAWING:
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftButtonEvent)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftButtonEvent)
self.Bind(wx.EVT_MOTION, self.OnLeftButtonEvent)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightButtonEvent)
# self.Bind(wx.EVT_IDLE, self.OnIdle) # ANDY HACK
self.Bind(wx.EVT_PAINT, self.OnPaint)
# self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnWheelScroll)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMove)
self.Bind(wx.EVT_SIZE, self.OnResize)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyPress)
self.Bind(wx.EVT_KEY_UP, self.onKeyUp)
self.Bind(wx.EVT_CHAR, self.onKeyChar) # 2019 added
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightButtonMenu)
self.was_dragging = False # True if dragging map
self.move_dx = 0 # drag delta values
self.move_dy = 0
self.last_drag_x = None # previous drag position
self.last_drag_y = None
self.SetScrollbars(1, 1, int(self.GetVirtualSize()[0]), int(self.GetVirtualSize()[1]))
self.mywheelscroll = 0
self.popupmenu = None
# self.repaint_needed = False
self.working = False # key press re-entrancy protection
self.plantuml_text = ""
# Image fetching states and flags
self.error_msg = "" # message to display on big screen, when there is an error
self.fetching_msg = "" # message to display on big screen, use as flag for when working
self.fetching_started_time = None
@property
def working_fetching(self): # stop multiple plant uml refreshes
return self.fetching_msg != "" # if there is a message, that's a flag that we are http-ing
@property
def time_taken_fetching(self) -> float:
raw_diff = datetime.datetime.utcnow() - self.fetching_started_time
return raw_diff.total_seconds()
def clear(self):
self.error_msg = PLANTUML_VIEW_INITAL_HELP
self.fetching_msg = ""
self.bmp, dc = self._CreateNewWhiteBmp(self.maxWidth, self.maxHeight)
self.bmp_transparent_ori = None
self.plantuml_text = ""
self.lines = []
self.Refresh()
def clear_cos_connection_error(self, msg=""):
self.clear()
self.error_msg = PLANTUML_VIEW_INTERNET_FAIL % msg
# print(plant_uml_create_png_and_return_image_url.cache_info())
plant_uml_create_png_and_return_image_url_async.cache_clear()
url_to_data.cache_clear()
self.Refresh()
def user_aborted(self):
self.error_msg = PLANTUML_VIEW_USER_ABORT
plant_uml_create_png_and_return_image_url_async.cache_clear()
url_to_data.cache_clear()
self.Refresh()
def render_in_progress(self, rendering: bool, frame):
|
async def ViewImage(self, thefile="", url=""):
"""Loads url or file and sets .bmp and .bmp_transparent_ori, the img is discarded"""
self.error_msg = None
if thefile:
img = wx.Image(thefile, wx.BITMAP_TYPE_ANY)
bmp = wx.Bitmap(img) # ANDY added 2019
elif url:
# print(url_to_data.cache_info())
try:
data, status = await url_to_data(url)
log.info(f"(2nd, image grabbing) Response from plant_uml_server status_code {status}")
except asyncio.TimeoutError as e: # there is no string repr of this exception
self.clear_cos_connection_error(msg="(timeout)")
url_to_data.cache_clear() # so if retry you won't get the same error
log.error("TimeoutError getting plantuml IMAGE")
return
if status != 200:
self.clear_cos_connection_error(msg=f"(bad response {status})")
log.error(f"Error getting plantuml IMAGE, (bad response {status})")
return
stream = io.BytesIO(data)
img = wx.Image(stream)
bmp = wx.Bitmap(img)
# try:
# bmp = img.ConvertToBitmap()
# except Exception as e:
# print(e)
# return
self.maxWidth, self.maxHeight = bmp.GetWidth(), bmp.GetHeight()
self.maxHeight += BMP_EXTRA_MARGIN # stop bitmaps getting slightly clipped
# dbg(bmp)
# ANDY bmp.HasAlpha() does not work, since wx.Image has this method but wx.Bitmap
# does not. But Bitmaps have some alpha channel concepts in them too...?
# Render bmp to a second white bmp to remove transparency effects
# if False and bmp.HasAlpha():
if img.HasAlpha():
self.bmp_transparent_ori = bmp
bmp2 = wx.Bitmap(bmp.GetWidth(), bmp.GetHeight())
dc = wx.MemoryDC()
dc.SelectObject(bmp2)
dc.Clear()
dc.DrawBitmap(bmp, 0, 0, True)
dc.SelectObject(wx.NullBitmap)
self.bmp = bmp2
else:
self.bmp_transparent_ori = None
self.bmp = bmp
# def OnIdle(self, event):
# """Idle Handler."""
# if self.working:
# dbg("re-entrancy avoided")
# return
# self.working = True
# if self.repaint_needed:
# dbg("repaint needed mate")
# self.Refresh()
# self.Update() # or wx.SafeYield() # Without this the nodes don't paint during a "L" layout (edges do!?)
# wx.SafeYield() # Needed on Mac to see result if in a compute loop.
# self.repaint_needed = False
# self.working = 0
def _CreateNewWhiteBmp(self, width, height, wantdc=False):
bmp = wx.Bitmap(width, height)
# Could simply return here, but bitmap would be black (or a bit random, under linux)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
# dbg(wantdc)
if wantdc: # just in case want to continue drawing
return bmp, dc
else:
dc.SelectObject(wx.NullBitmap)
return bmp, dc
def OnHandleSaveImage(self, event):
pass # pro feature
def OnHandleSaveImagePreserveTransparencies(self, event):
pass # pro feature
def OnHandleSaveImageInclDoodles(self, event):
pass # pro feature
def OnHandleQuickLoadImage(self, event):
self.ViewImage(FILE)
def OnHandleQuickLoadFromYumlUrl(self, event):
baseUrl = "http://yuml.me/diagram/dir:lr;scruffy/class/"
yuml_txt = (
"[Customer]+1->*[Order],[Order]++1-items >*[LineItem],[Order]-0..1>[PaymentMethod]"
)
url = baseUrl + urllib.parse.quote(yuml_txt)
self.ViewImage(url=url)
def OnRightButtonMenu(self, event): # Menu
if event.ShiftDown():
event.Skip()
return
"""
Accelerator tables need unique ids, whereas direct menuitem binding with Bind(...source=menuitem)
doesn't care about ids and can thus use wx.ID_ANY (which is always -1)
Use wx.NewIdRef() if you want a real fresh id.
"""
x, y = event.GetPosition()
frame = self.GetTopLevelParent()
image = images.pro.GetBitmap() if unregistered else None
if self.popupmenu:
self.popupmenu.Destroy() # wx.Menu objects need to be explicitly destroyed (e.g. menu.Destroy()) in this situation. Otherwise, they will rack up the USER Objects count on Windows; eventually crashing a program when USER Objects is maxed out. -- U. Artie Eoff http://wiki.wxpython.org/index.cgi/PopupMenuOnRightClick
self.popupmenu = wx.Menu() # Create a menu
if event.AltDown():
# Debug menu items
item = self.popupmenu.Append(wx.ID_ANY, "Load Image...")
frame.Bind(wx.EVT_MENU, self.OnHandleFileLoad, item)
item = self.popupmenu.Append(wx.ID_ANY, "Quick Load Image from Disk")
frame.Bind(wx.EVT_MENU, self.OnHandleQuickLoadImage, item)
item = self.popupmenu.Append(wx.ID_ANY, "Quick Load Image from Yuml Url")
frame.Bind(wx.EVT_MENU, self.OnHandleQuickLoadFromYumlUrl, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG...")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImage, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
if self.bmp_transparent_ori:
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG... (preserve transparent areas)")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImagePreserveTransparencies, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG... (incl. pen doodles)")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImageInclDoodles, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Clear pen doodles (SHIFT drag to create)\tE")
frame.Bind(wx.EVT_MENU, self.OnClearPenLines, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "View PlantUML markup...")
frame.Bind(wx.EVT_MENU, self.OnViewPlantUmlMarkup, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Cancel")
frame.PopupMenu(self.popupmenu, wx.Point(x, y))
def OnPro_update(self, event):
event.Enable(not unregistered)
def OnViewPlantUmlMarkup(self, event, alt_down=False):
mouse_state: wx.MouseState = wx.GetMouseState()
if mouse_state.AltDown():
print(self.plantuml_text)
def display_dialog(txt_plantuml):
"""
Displays dialog for editing comments
Args:
comment: comment string
Returns: (result, comment)
"""
class EditDialog(DialogPlantUmlText):
# Custom dialog built via wxformbuilder - subclass it first, to hook up event handlers
def OnClassNameEnter(self, event):
self.EndModal(wx.ID_OK)
# change cwd so that dialog can find the 'pro' image jpg which is relative to dialogs/
# when deployed via pyinstaller, this path is a bit tricky to find, so use this func.
dir = dialog_path_pyinstaller_push(frame = self)
try:
dialog = EditDialog(None)
dialog.txt_plantuml.Value = txt_plantuml
dialog.txt_plantuml.SetFocus()
dialog.txt_plantuml.Enable(not unregistered)
dialog.ShowModal()
# dialog.Show()
dialog.Destroy()
finally:
dialog_path_pyinstaller_pop()
display_dialog(self.plantuml_text)
# wx.MessageBox(f"PRO mode lets you copy the PlantUML text to the clipboard\n\n{self.plantuml_text}")
def OnHandleFileLoad(self, event):
frame = self.GetTopLevelParent()
wildcard = (
"Images (*.png; *.jpeg; *.jpg; *.bmp)|*.png;*.jpeg;*.jpg;*.bmp|" "All files (*.*)|*.*"
)
dlg = wx.FileDialog(
parent=frame,
message="choose",
defaultDir=".",
defaultFile="",
wildcard=wildcard,
style=wx.FD_OPEN,
pos=wx.DefaultPosition,
)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.ViewImage(filename)
def SetScrollRateSmart(self, newstep=None, printinfo=False):
"""changing scrollwindow scroll unit without scroll pos changing - utility.
There is a slight jump when going from small scroll step e.g. 1 to large e.g. 20
because the resolution isn't the same (scroll step might be 3 out of 10 instead of the
more precise 30 out of 100). I couldn't get rid of this jump, even by fiddling with
the virtual size - might just have to live with it.
and
"""
oldstep = self.GetScrollPixelsPerUnit()[0]
oldscrollx = self.GetScrollPos(wx.HORIZONTAL)
oldscrolly = self.GetScrollPos(wx.VERTICAL)
oldvirtx = self.GetVirtualSize()[0]
oldvirty = self.GetVirtualSize()[1]
# rot = event.GetWheelRotation()
if printinfo:
print(f"\nIN step {oldstep} newstep {newstep} old scroll {oldscrollx}, {oldscrolly} virt {oldvirtx}, {oldvirty}")
if newstep is not None:
if oldstep == newstep:
if printinfo:
print(f"Nothing to do, step of {newstep} already set.")
else:
q = newstep / oldstep # min(1, newstep)
newscrollx = int(oldscrollx / q)
newscrolly = int(oldscrolly / q)
# newvirtx = oldvirtx / q
# newvirty = oldvirty / q
# Aha - image size * step => virtual bounds
newvirtx = int(self.maxWidth / newstep * self.zoomscale)
newvirty = int(self.maxHeight / newstep * self.zoomscale)
if printinfo:
print(f"OUT step {newstep} new scroll {newscrollx}, {newscrolly} virt {newvirtx}, {newvirty} q {q}")
self.SetScrollbars(
int(newstep), int(newstep),
int(newvirtx), int(newvirty), # new virtual size
int(newscrollx), int(newscrolly), # new scroll positions
noRefresh=True)
# self.Refresh()
if printinfo:
print(self.GetVirtualSize())
def onKeyChar(self, event):
if event.GetKeyCode() >= 256:
event.Skip()
return
if self.working:
event.Skip()
return
self.working = True
keycode = chr(event.GetKeyCode())
# print("imgkeycode", keycode)
if keycode == "a":
self.SetScrollRateSmart(newstep=None, printinfo=True)
# elif keycode in ["1", "2", "3", "4", "5", "6", "7", "8"]:
# todisplay = ord(keycode) - ord("1")
# self.snapshot_mgr.Restore(todisplay) # snapshot 1 becomes 0 as a param
# self.mega_refresh()
elif keycode == "d":
self.SetScrollRateSmart(newstep=20, printinfo=True)
elif keycode == "s":
self.SetScrollRateSmart(newstep=1, printinfo=True)
elif keycode == "e":
self.clear_pen_lines()
self.working = False
def onKeyPress(self, event): # ANDY
keycode = event.GetKeyCode()
# dbg(keycode)
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
if keycode == wx.WXK_ESCAPE:
if self.working_fetching:
frame = self.GetTopLevelParent()
frame.SetStatusText("ESC key detected: PlantUML render Aborted")
wx.PostEvent(frame, CancelRefreshPlantUmlEvent())
self.user_aborted()
else:
if self.plantuml_text:
self.error_msg = "" # clear any annoying error message, so can see bmp
self.Refresh()
else:
self.error_msg = PLANTUML_VIEW_INITAL_HELP
# if self.working:
# event.Skip()
# return
# self.working = True
#
# keycode = event.GetKeyCode() # http://www.wxpython.org/docs/api/wx.KeyEvent-class.html
# self.working = False
event.Skip()
def onKeyUp(self, event): # ANDY
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
event.Skip()
def getWidth(self):
return self.maxWidth
def getHeight(self):
return self.maxHeight
def OnErase(self, event): # ANDY
pass
def OnWheelScroll(self, event):
## This is an example of what to do for the EVT_MOUSEWHEEL event,
## but since wx.ScrolledWindow does this already it's not
## necessary to do it ourselves.
#
# ANDY
# But since I set the ScrollRate to 1
# in order for panning to work nicely
# scrolling is too slow. So invoke this code!!
#
# dbg(f"OnWheelScroll {self.GetScrollPixelsPerUnit()}")
if event.ControlDown():
event.Skip()
return
# Version 1 - too jumpy
# self.SetScrollRate(20, 20)
# Version 2 - nicer, but need a common routine callable from multiple places
#
# oldscrollx = self.GetScrollPos(wx.HORIZONTAL)
# oldscrolly = self.GetScrollPos(wx.VERTICAL)
# # dbg(oldscrollx)
# # dbg(oldscrolly)
#
# # How to adjust ? take into account the 1 to 20 factor, as well as the zoom level
# delta = event.GetWheelDelta()
# rot = event.GetWheelRotation()
# # dbg(delta)
# # dbg(rot)
# # if rot > 0:
# if self.GetScrollPixelsPerUnit()[0] != 20:
# dbg(self.GetScrollPixelsPerUnit())
#
# # dbg(oldscrollx)
# # dbg(oldscrolly)
# oldscrollx = oldscrollx /20#- (1 * rot) / 20 * self.zoomscale
# oldscrolly = oldscrolly /20#- (1 * rot) / 20 * self.zoomscale
# # oldscrollx = oldscrollx + 20 * rot / self.zoomscale
# # oldscrolly = oldscrolly + 20 * rot / self.zoomscale
# # dbg(oldscrollx)
# # dbg(oldscrolly)
#
# self.SetScrollbars(
# 20, 20, # each scroll unit is 1 pixel, meaning scroll units match client coord units
# self.GetVirtualSize()[0] / 20, self.GetVirtualSize()[1] / 20, # new virtual size
# oldscrollx, oldscrolly, # new scroll positions
# noRefresh=True
# )
# Version 3
self.SetScrollRateSmart(20)
# Old version 0 - complex and buggy and jumpy
#
# delta = event.GetWheelDelta()
# rot = event.GetWheelRotation()
# linesPer = event.GetLinesPerAction()
# # print delta, rot, linesPer
# linesPer *= 20 # ANDY trick to override the small ScrollRate
# ws = self.mywheelscroll
# ws = ws + rot
# lines = ws / delta
# ws = ws - lines * delta
# self.mywheelscroll = ws
# if lines != 0:
# lines = lines * linesPer
# vsx, vsy = self.GetViewStart()
# scrollTo = vsy - lines
# self.Scroll(-1, scrollTo)
event.Skip()
def OnResize(self, event): # ANDY interesting - GetVirtualSize grows when resize frame
self.DebugSizez("resize")
if self.NeedToClear() and self.IsShownOnScreen():
self.clear_whole_window = True
self.Refresh()
def CalcVirtSize(self):
# VirtualSize is essentially the visible picture
return (self.maxWidth * self.zoomscale, self.maxHeight * self.zoomscale)
def NeedToClear(self):
# Since VirtualSize auto grows when resize frame, can't rely on it to know if client area is bigger than visible pic.
# Need to rederive the original VirtualSize set when zoom calculated rather than relying on calls to self.GetVirtualSize()
return (
self.GetClientSize()[0] > self.CalcVirtSize()[0]
or self.GetClientSize()[1] > self.CalcVirtSize()[1]
)
def DebugSizez(self, fromwheremsg):
return
if self.NeedToClear():
msg = "!!!!!!! "
else:
msg = "! "
print(
msg
+ "(%s) visible %d NeedToClear %s GetVirtualSize %d getWidth %d GetClientSize %d self.GetViewStart() %d self.maxWidth %d "
% (
fromwheremsg,
self.IsShownOnScreen(),
self.NeedToClear(),
self.GetVirtualSize()[0],
self.getWidth(),
self.GetClientSize()[0],
self.GetViewStart()[0],
self.maxWidth,
)
)
def OnPaint(self, event): # ANDY
dc = wx.PaintDC(self)
self.PrepareDC(dc)
dc.SetUserScale(self.zoomscale, self.zoomscale)
# since we're not buffering in this case, we have to
# paint the whole window, potentially very time consuming.
self.DoDrawing(dc)
def Redraw(self, dc):
self.DoDrawing(dc)
def OnLeftDown(self, event): # ANDY some PAN ideas from http://code.google.com/p/pyslip/
"""Left mouse button down. Prepare for possible drag."""
if event.ShiftDown():
event.Skip()
return
click_posn = event.GetPosition()
self.SetCursor(wx.Cursor(wx.CURSOR_HAND))
(self.last_drag_x, self.last_drag_y) = click_posn
event.Skip()
def OnLeftUp(self, event): # ANDY PAN
"""Left mouse button up."""
if event.ShiftDown():
event.Skip()
return
self.last_drag_x = self.last_drag_y = None
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
# turn off drag
self.was_dragging = False
# force PAINT event to remove selection box (if required)
# self.Update()
event.Skip()
def OnMove(self, event): # ANDY PAN
"""Handle a mouse move (map drag).
event the mouse move event
"""
if event.ShiftDown():
event.Skip()
return
# for windows, set focus onto pyslip window
# linux seems to do this automatically
if sys.platform == "win32" and self.FindFocus() != self:
self.SetFocus()
# get current mouse position
(x, y) = event.GetPosition()
# from common.architecture_support import whoscalling2
# dbg(whoscalling2())
# self.RaiseMousePositionEvent((x, y))
if event.Dragging() and event.LeftIsDown():
# are we doing box select?
if not self.last_drag_x is None:
# no, just a map drag
self.was_dragging = True
dx = self.last_drag_x - x
dy = self.last_drag_y - y
# dx /= 20
# dy /= 20
# dbg(dx)
# dbg(dy)
# print "PAN %d %d" % (dx, dy)
# print self.GetViewStart()
currx, curry = self.GetViewStart()
self.Scroll(
currx + dx, curry + dy
) # Note The positions are in scroll units, not pixels, so to convert to pixels you will have to multiply by the number of pixels per scroll increment. If either parameter is -1, that position will be ignored (no change in that direction).
# print "Scroll pan %d %d" % (currx+dx, curry+dy)
# adjust remembered X,Y
self.last_drag_x = x
self.last_drag_y = y
# redraw client area
self.Update()
def DoDrawing(self, dc, printing=False):
# dbg(f"DoDrawing {len(self.curLine)}")
if self.clear_whole_window:
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
self.clear_whole_window = False
if self.bmp:
dc.DrawBitmap(self.bmp, 0, 0, False) # false means don't use mask
# dc.SetTextForeground('BLUE')
# text = "UML via Pynsource and PlantUML"
# dc.DrawText(text, 2, 2)
self.DrawSavedLines(dc)
if self.error_msg:
dc.DrawText(self.error_msg, 2, 2)
if self.fetching_msg and self.time_taken_fetching > 0.5:
"""
Text is never drawn with the current pen. It's drawn with the current
text color. Try
dc.SetTextForeground((255,255,0))
This is a historical implementation detail in Windows GDI. The pen is
used for lines, the brush is used for fills, and text had its own
attributes.
"""
# dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
# dc.SetBrush(wx.Brush("RED"))
# dc.SetTextForeground((204, 0, 0)) # red
# dc.SetTextForeground((204, 102, 0)) # dark orange
dc.SetTextForeground((255, 255, 255)) # white
dc.SetTextBackground((0, 0, 0)) # black
dc.SetBackgroundMode(wx.SOLID)
dc.DrawText(self.fetching_msg, 2, 2)
def DrawSavedLines(self, dc): # PEN DRAWING
dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
for line in self.lines:
for coords in line:
dc.DrawLine(*map(int, coords))
def OnClearPenLines(self, event):
self.clear_pen_lines()
def clear_pen_lines(self):
self.lines = []
self.Refresh()
def SetXY(self, event): # PEN DRAWING
self.x, self.y = self.ConvertEventCoords(event)
def ConvertEventCoords(self, event): # PEN DRAWING
newpos = self.CalcUnscrolledPosition(event.GetX(), event.GetY())
newpos = (
newpos[0] * self.GetScaleX() / self.zoomscale,
newpos[1] * self.GetScaleY() / self.zoomscale,
)
return newpos
def OnRightButtonEvent(self, event): # PEN DRAWING - ANDY
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
self.clear_pen_lines()
event.Skip()
def OnLeftButtonEvent(self, event): # PEN DRAWING
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
if event.LeftDown():
# self.SetScrollRate(1, 1) # works to slow the scrolling, but causes scroll jump to 0,0
self.SetScrollRateSmart(1) # smoother pan when scroll step is 1
# dbg(f"LeftDown {self.GetScrollPixelsPerUnit()}")
self.SetFocus()
self.SetXY(event)
self.curLine = []
self.CaptureMouse()
self.drawing = True
elif event.Dragging() and self.drawing:
# print("dragging.....")
# ANDY UPDATE 2019 - Drawing to wx.ClientDC doesn't work well these days and you only
# see the result when an on paint occurs much later - and often cannot force the paint?
# instead, issue a Refresh() which triggers a paint, and draw there instead.
coords = (self.x, self.y) + self.ConvertEventCoords(event)
self.curLine.append(coords)
self.lines.append(self.curLine)
self.curLine = []
self.SetXY(event) # reset line drawing start point to current mouse pos
self.Refresh()
# Version 0. Old version.draw directly to a wx.ClientDC
# dc = wx.ClientDC(self)
# self.PrepareDC(dc)
# dc.SetUserScale(self.zoomscale, self.zoomscale)
#
# dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
# coords = (self.x, self.y) + self.ConvertEventCoords(event)
# self.curLine.append(coords) # For when we are not double buffering #ANDY
# dc.DrawLine(*coords)
# self.SetXY(event)
#
# Failed Hacks to try and make version 0 work.
#
# self.Refresh() # ANDY added, pheonix
# # frame = self.GetTopLevelParent()
# # frame.Layout() # needed when running phoenix
# self.Update() # or wx.SafeYield() # Without this the nodes don't paint during a "L" layout (edges do!?)
# wx.SafeYield() # Needed on Mac to see result if in a compute loop.
# self.repaint_needed = True
elif event.LeftUp() and self.drawing:
self.lines.append(self.curLine)
self.curLine = []
self.ReleaseMouse()
self.drawing = False
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
self.Refresh() # ANDY added, pheonix
class TestFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
ImageViewer(self)
class App(wx.App):
def OnInit(self):
frame = TestFrame(None, title="Andy Image Viewer")
frame.Show(True)
frame.Centre()
return True
if __name__ == "__main__":
app = App(0)
app.MainLoop()
| if rendering:
self.error_msg = ""
self.fetching_msg = PLANTUML_VIEW_FETCHING_MSG
self.fetching_started_time = datetime.datetime.utcnow()
else:
self.fetching_msg = ""
# Update PlantUML view, guarding against the situation where when shutting down app
# and killing pending tasks, the window may not exist anymore
try:
self.Refresh()
wx.SafeYield() # Needed to "breathe" and refresh the UI
# print("warning use of safe yield in image viewer")
except RuntimeError:
pass # avoid error when shutting down tasks | identifier_body |
gui_imageviewer.py | # Image viewer
import wx
import sys
import io
import urllib.request, urllib.parse, urllib.error
from urllib.request import Request, urlopen
import asyncio
from io import StringIO
# from pydbg import dbg
from gui.coord_utils import ZoomInfo
from typing import List, Set, Dict, Tuple, Optional
from media import images
from gui.settings import PRO_EDITION
from generate_code.gen_plantuml import plant_uml_create_png_and_return_image_url_async
from dialogs.DialogPlantUmlText import DialogPlantUmlText
from common.dialog_dir_path import dialog_path_pyinstaller_push, dialog_path_pyinstaller_pop
from common.messages import *
import datetime
from app.settings import CancelRefreshPlantUmlEvent, EVT_CANCEL_REFRESH_PLANTUML_EVENT
from common.url_to_data import url_to_data
import logging
from common.logger import config_log
log = logging.getLogger(__name__)
config_log(log)
ALLOW_DRAWING = True
DEFAULT_IMAGE_SIZE = (21, 21) # used to be 2000, 2000 for some reason
BMP_EXTRA_MARGIN = 20 # margin for plantuml images to allow scrolling them fully into view
unregistered = not PRO_EDITION
class ImageViewer(wx.ScrolledWindow):
def __init__(self, parent, id=-1, size=wx.DefaultSize):
wx.ScrolledWindow.__init__(self, parent, id, (0, 0), size=size, style=wx.SUNKEN_BORDER)
self.lines = []
self.maxWidth, self.maxHeight = DEFAULT_IMAGE_SIZE
self.x = self.y = 0
self.curLine = []
self.drawing = False
self.SetBackgroundColour("WHITE") # for areas of the frame not covered by the bmp
# TODO these areas don't get refreshed properly when scrolling when pen marks are around, we only refresh bmp area and when bmp area < client window we get artifacts.
bmp, dc = self._CreateNewWhiteBmp(self.maxWidth, self.maxHeight)
self.bmp = bmp
self.bmp_transparent_ori = None
self.SetVirtualSize((self.maxWidth, self.maxHeight))
self.SetScrollRate(1, 1) # set the ScrollRate to 1 in order for panning to work nicely
self.zoomscale = 1.0
self.clear_whole_window = False
if ALLOW_DRAWING:
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftButtonEvent)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftButtonEvent)
self.Bind(wx.EVT_MOTION, self.OnLeftButtonEvent)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightButtonEvent)
# self.Bind(wx.EVT_IDLE, self.OnIdle) # ANDY HACK
self.Bind(wx.EVT_PAINT, self.OnPaint)
# self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnWheelScroll)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMove)
self.Bind(wx.EVT_SIZE, self.OnResize)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyPress)
self.Bind(wx.EVT_KEY_UP, self.onKeyUp)
self.Bind(wx.EVT_CHAR, self.onKeyChar) # 2019 added
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightButtonMenu)
self.was_dragging = False # True if dragging map
self.move_dx = 0 # drag delta values
self.move_dy = 0
self.last_drag_x = None # previous drag position
self.last_drag_y = None
self.SetScrollbars(1, 1, int(self.GetVirtualSize()[0]), int(self.GetVirtualSize()[1]))
self.mywheelscroll = 0
self.popupmenu = None
# self.repaint_needed = False
self.working = False # key press re-entrancy protection
self.plantuml_text = ""
# Image fetching states and flags
self.error_msg = "" # message to display on big screen, when there is an error
self.fetching_msg = "" # message to display on big screen, use as flag for when working
self.fetching_started_time = None
@property
def working_fetching(self): # stop multiple plant uml refreshes
return self.fetching_msg != "" # if there is a message, that's a flag that we are http-ing
@property
def time_taken_fetching(self) -> float:
raw_diff = datetime.datetime.utcnow() - self.fetching_started_time
return raw_diff.total_seconds()
def clear(self):
self.error_msg = PLANTUML_VIEW_INITAL_HELP
self.fetching_msg = ""
self.bmp, dc = self._CreateNewWhiteBmp(self.maxWidth, self.maxHeight)
self.bmp_transparent_ori = None
self.plantuml_text = ""
self.lines = []
self.Refresh()
def clear_cos_connection_error(self, msg=""):
self.clear()
self.error_msg = PLANTUML_VIEW_INTERNET_FAIL % msg
# print(plant_uml_create_png_and_return_image_url.cache_info())
plant_uml_create_png_and_return_image_url_async.cache_clear()
url_to_data.cache_clear()
self.Refresh()
def user_aborted(self):
self.error_msg = PLANTUML_VIEW_USER_ABORT
plant_uml_create_png_and_return_image_url_async.cache_clear()
url_to_data.cache_clear()
self.Refresh()
def render_in_progress(self, rendering: bool, frame):
if rendering:
self.error_msg = ""
self.fetching_msg = PLANTUML_VIEW_FETCHING_MSG
self.fetching_started_time = datetime.datetime.utcnow()
else:
self.fetching_msg = ""
# Update PlantUML view, guarding against the situation where when shutting down app
# and killing pending tasks, the window may not exist anymore
try:
self.Refresh()
wx.SafeYield() # Needed to "breathe" and refresh the UI
# print("warning use of safe yield in image viewer")
except RuntimeError:
pass # avoid error when shutting down tasks
async def ViewImage(self, thefile="", url=""):
"""Loads url or file and sets .bmp and .bmp_transparent_ori, the img is discarded"""
self.error_msg = None
if thefile:
img = wx.Image(thefile, wx.BITMAP_TYPE_ANY)
bmp = wx.Bitmap(img) # ANDY added 2019
elif url:
# print(url_to_data.cache_info())
try:
data, status = await url_to_data(url)
log.info(f"(2nd, image grabbing) Response from plant_uml_server status_code {status}")
except asyncio.TimeoutError as e: # there is no string repr of this exception
self.clear_cos_connection_error(msg="(timeout)")
url_to_data.cache_clear() # so if retry you won't get the same error
log.error("TimeoutError getting plantuml IMAGE")
return
if status != 200:
self.clear_cos_connection_error(msg=f"(bad response {status})")
log.error(f"Error getting plantuml IMAGE, (bad response {status})")
return
stream = io.BytesIO(data)
img = wx.Image(stream)
bmp = wx.Bitmap(img)
# try:
# bmp = img.ConvertToBitmap()
# except Exception as e:
# print(e)
# return
self.maxWidth, self.maxHeight = bmp.GetWidth(), bmp.GetHeight()
self.maxHeight += BMP_EXTRA_MARGIN # stop bitmaps getting slightly clipped
# dbg(bmp)
# ANDY bmp.HasAlpha() does not work, since wx.Image has this method but wx.Bitmap
# does not. But Bitmaps have some alpha channel concepts in them too...?
# Render bmp to a second white bmp to remove transparency effects
# if False and bmp.HasAlpha():
if img.HasAlpha():
self.bmp_transparent_ori = bmp
bmp2 = wx.Bitmap(bmp.GetWidth(), bmp.GetHeight())
dc = wx.MemoryDC()
dc.SelectObject(bmp2)
dc.Clear()
dc.DrawBitmap(bmp, 0, 0, True)
dc.SelectObject(wx.NullBitmap)
self.bmp = bmp2
else:
self.bmp_transparent_ori = None
self.bmp = bmp
# def OnIdle(self, event):
# """Idle Handler."""
# if self.working:
# dbg("re-entrancy avoided")
# return
# self.working = True
# if self.repaint_needed:
# dbg("repaint needed mate")
# self.Refresh()
# self.Update() # or wx.SafeYield() # Without this the nodes don't paint during a "L" layout (edges do!?)
# wx.SafeYield() # Needed on Mac to see result if in a compute loop.
# self.repaint_needed = False
# self.working = 0
def _CreateNewWhiteBmp(self, width, height, wantdc=False):
bmp = wx.Bitmap(width, height)
# Could simply return here, but bitmap would be black (or a bit random, under linux)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
# dbg(wantdc)
if wantdc: # just in case want to continue drawing
return bmp, dc
else:
dc.SelectObject(wx.NullBitmap)
return bmp, dc
def OnHandleSaveImage(self, event):
pass # pro feature
def OnHandleSaveImagePreserveTransparencies(self, event):
pass # pro feature
def OnHandleSaveImageInclDoodles(self, event):
pass # pro feature
def OnHandleQuickLoadImage(self, event):
self.ViewImage(FILE)
def OnHandleQuickLoadFromYumlUrl(self, event):
baseUrl = "http://yuml.me/diagram/dir:lr;scruffy/class/"
yuml_txt = (
"[Customer]+1->*[Order],[Order]++1-items >*[LineItem],[Order]-0..1>[PaymentMethod]"
)
url = baseUrl + urllib.parse.quote(yuml_txt)
self.ViewImage(url=url)
def OnRightButtonMenu(self, event): # Menu
if event.ShiftDown():
event.Skip()
return
"""
Accelerator tables need unique ids, whereas direct menuitem binding with Bind(...source=menuitem)
doesn't care about ids and can thus use wx.ID_ANY (which is always -1)
Use wx.NewIdRef() if you want a real fresh id.
"""
x, y = event.GetPosition()
frame = self.GetTopLevelParent()
image = images.pro.GetBitmap() if unregistered else None
if self.popupmenu:
self.popupmenu.Destroy() # wx.Menu objects need to be explicitly destroyed (e.g. menu.Destroy()) in this situation. Otherwise, they will rack up the USER Objects count on Windows; eventually crashing a program when USER Objects is maxed out. -- U. Artie Eoff http://wiki.wxpython.org/index.cgi/PopupMenuOnRightClick
self.popupmenu = wx.Menu() # Create a menu
if event.AltDown():
# Debug menu items
item = self.popupmenu.Append(wx.ID_ANY, "Load Image...")
frame.Bind(wx.EVT_MENU, self.OnHandleFileLoad, item)
item = self.popupmenu.Append(wx.ID_ANY, "Quick Load Image from Disk")
frame.Bind(wx.EVT_MENU, self.OnHandleQuickLoadImage, item)
item = self.popupmenu.Append(wx.ID_ANY, "Quick Load Image from Yuml Url")
frame.Bind(wx.EVT_MENU, self.OnHandleQuickLoadFromYumlUrl, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG...")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImage, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
if self.bmp_transparent_ori:
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG... (preserve transparent areas)")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImagePreserveTransparencies, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG... (incl. pen doodles)")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImageInclDoodles, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Clear pen doodles (SHIFT drag to create)\tE")
frame.Bind(wx.EVT_MENU, self.OnClearPenLines, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "View PlantUML markup...")
frame.Bind(wx.EVT_MENU, self.OnViewPlantUmlMarkup, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Cancel")
frame.PopupMenu(self.popupmenu, wx.Point(x, y))
def OnPro_update(self, event):
event.Enable(not unregistered)
def OnViewPlantUmlMarkup(self, event, alt_down=False):
mouse_state: wx.MouseState = wx.GetMouseState()
if mouse_state.AltDown():
print(self.plantuml_text)
def display_dialog(txt_plantuml):
"""
Displays dialog for editing comments
Args:
comment: comment string
Returns: (result, comment)
"""
class EditDialog(DialogPlantUmlText):
# Custom dialog built via wxformbuilder - subclass it first, to hook up event handlers
def OnClassNameEnter(self, event):
self.EndModal(wx.ID_OK)
# change cwd so that dialog can find the 'pro' image jpg which is relative to dialogs/
# when deployed via pyinstaller, this path is a bit tricky to find, so use this func.
dir = dialog_path_pyinstaller_push(frame = self)
try:
dialog = EditDialog(None)
dialog.txt_plantuml.Value = txt_plantuml
dialog.txt_plantuml.SetFocus()
dialog.txt_plantuml.Enable(not unregistered)
dialog.ShowModal()
# dialog.Show()
dialog.Destroy()
finally:
dialog_path_pyinstaller_pop()
display_dialog(self.plantuml_text)
# wx.MessageBox(f"PRO mode lets you copy the PlantUML text to the clipboard\n\n{self.plantuml_text}")
def OnHandleFileLoad(self, event):
frame = self.GetTopLevelParent()
wildcard = (
"Images (*.png; *.jpeg; *.jpg; *.bmp)|*.png;*.jpeg;*.jpg;*.bmp|" "All files (*.*)|*.*"
)
dlg = wx.FileDialog(
parent=frame,
message="choose",
defaultDir=".",
defaultFile="",
wildcard=wildcard,
style=wx.FD_OPEN,
pos=wx.DefaultPosition,
)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.ViewImage(filename)
def SetScrollRateSmart(self, newstep=None, printinfo=False):
"""changing scrollwindow scroll unit without scroll pos changing - utility.
There is a slight jump when going from small scroll step e.g. 1 to large e.g. 20
because the resolution isn't the same (scroll step might be 3 out of 10 instead of the
more precise 30 out of 100). I couldn't get rid of this jump, even by fiddling with
the virtual size - might just have to live with it.
and
"""
oldstep = self.GetScrollPixelsPerUnit()[0]
oldscrollx = self.GetScrollPos(wx.HORIZONTAL)
oldscrolly = self.GetScrollPos(wx.VERTICAL)
oldvirtx = self.GetVirtualSize()[0]
oldvirty = self.GetVirtualSize()[1]
# rot = event.GetWheelRotation()
if printinfo:
print(f"\nIN step {oldstep} newstep {newstep} old scroll {oldscrollx}, {oldscrolly} virt {oldvirtx}, {oldvirty}")
if newstep is not None:
if oldstep == newstep:
if printinfo:
print(f"Nothing to do, step of {newstep} already set.")
else:
q = newstep / oldstep # min(1, newstep)
newscrollx = int(oldscrollx / q)
newscrolly = int(oldscrolly / q)
# newvirtx = oldvirtx / q
# newvirty = oldvirty / q
# Aha - image size * step => virtual bounds
newvirtx = int(self.maxWidth / newstep * self.zoomscale)
newvirty = int(self.maxHeight / newstep * self.zoomscale)
if printinfo:
print(f"OUT step {newstep} new scroll {newscrollx}, {newscrolly} virt {newvirtx}, {newvirty} q {q}")
self.SetScrollbars(
int(newstep), int(newstep),
int(newvirtx), int(newvirty), # new virtual size
int(newscrollx), int(newscrolly), # new scroll positions
noRefresh=True)
# self.Refresh()
if printinfo:
print(self.GetVirtualSize())
def onKeyChar(self, event):
if event.GetKeyCode() >= 256:
event.Skip()
return
if self.working:
event.Skip()
return
self.working = True
keycode = chr(event.GetKeyCode())
# print("imgkeycode", keycode)
if keycode == "a":
self.SetScrollRateSmart(newstep=None, printinfo=True)
# elif keycode in ["1", "2", "3", "4", "5", "6", "7", "8"]:
# todisplay = ord(keycode) - ord("1")
# self.snapshot_mgr.Restore(todisplay) # snapshot 1 becomes 0 as a param
# self.mega_refresh()
elif keycode == "d":
self.SetScrollRateSmart(newstep=20, printinfo=True)
elif keycode == "s":
self.SetScrollRateSmart(newstep=1, printinfo=True)
elif keycode == "e":
self.clear_pen_lines()
self.working = False
def onKeyPress(self, event): # ANDY
keycode = event.GetKeyCode()
# dbg(keycode)
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
if keycode == wx.WXK_ESCAPE:
if self.working_fetching:
frame = self.GetTopLevelParent()
frame.SetStatusText("ESC key detected: PlantUML render Aborted")
wx.PostEvent(frame, CancelRefreshPlantUmlEvent())
self.user_aborted()
else:
if self.plantuml_text:
self.error_msg = "" # clear any annoying error message, so can see bmp
self.Refresh()
else:
self.error_msg = PLANTUML_VIEW_INITAL_HELP
# if self.working:
# event.Skip()
# return
# self.working = True
#
# keycode = event.GetKeyCode() # http://www.wxpython.org/docs/api/wx.KeyEvent-class.html
# self.working = False
event.Skip()
def onKeyUp(self, event): # ANDY
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
event.Skip()
def getWidth(self):
return self.maxWidth
def getHeight(self):
return self.maxHeight
def OnErase(self, event): # ANDY
pass
def OnWheelScroll(self, event):
## This is an example of what to do for the EVT_MOUSEWHEEL event,
## but since wx.ScrolledWindow does this already it's not
## necessary to do it ourselves.
#
# ANDY
# But since I set the ScrollRate to 1
# in order for panning to work nicely
# scrolling is too slow. So invoke this code!!
#
# dbg(f"OnWheelScroll {self.GetScrollPixelsPerUnit()}")
if event.ControlDown():
event.Skip()
return
# Version 1 - too jumpy
# self.SetScrollRate(20, 20)
# Version 2 - nicer, but need a common routine callable from multiple places
#
# oldscrollx = self.GetScrollPos(wx.HORIZONTAL)
# oldscrolly = self.GetScrollPos(wx.VERTICAL)
# # dbg(oldscrollx)
# # dbg(oldscrolly)
#
# # How to adjust ? take into account the 1 to 20 factor, as well as the zoom level
# delta = event.GetWheelDelta()
# rot = event.GetWheelRotation()
# # dbg(delta)
# # dbg(rot)
# # if rot > 0:
# if self.GetScrollPixelsPerUnit()[0] != 20:
# dbg(self.GetScrollPixelsPerUnit())
#
# # dbg(oldscrollx)
# # dbg(oldscrolly)
# oldscrollx = oldscrollx /20#- (1 * rot) / 20 * self.zoomscale
# oldscrolly = oldscrolly /20#- (1 * rot) / 20 * self.zoomscale
# # oldscrollx = oldscrollx + 20 * rot / self.zoomscale
# # oldscrolly = oldscrolly + 20 * rot / self.zoomscale
# # dbg(oldscrollx)
# # dbg(oldscrolly)
#
# self.SetScrollbars(
# 20, 20, # each scroll unit is 1 pixel, meaning scroll units match client coord units
# self.GetVirtualSize()[0] / 20, self.GetVirtualSize()[1] / 20, # new virtual size
# oldscrollx, oldscrolly, # new scroll positions
# noRefresh=True
# )
# Version 3
self.SetScrollRateSmart(20)
# Old version 0 - complex and buggy and jumpy
#
# delta = event.GetWheelDelta()
# rot = event.GetWheelRotation()
# linesPer = event.GetLinesPerAction()
# # print delta, rot, linesPer
# linesPer *= 20 # ANDY trick to override the small ScrollRate
# ws = self.mywheelscroll
# ws = ws + rot
# lines = ws / delta
# ws = ws - lines * delta
# self.mywheelscroll = ws
# if lines != 0:
# lines = lines * linesPer
# vsx, vsy = self.GetViewStart()
# scrollTo = vsy - lines
# self.Scroll(-1, scrollTo)
event.Skip()
def OnResize(self, event): # ANDY interesting - GetVirtualSize grows when resize frame
self.DebugSizez("resize")
if self.NeedToClear() and self.IsShownOnScreen():
self.clear_whole_window = True
self.Refresh()
def CalcVirtSize(self):
# VirtualSize is essentially the visible picture
return (self.maxWidth * self.zoomscale, self.maxHeight * self.zoomscale)
def NeedToClear(self):
# Since VirtualSize auto grows when resize frame, can't rely on it to know if client area is bigger than visible pic.
# Need to rederive the original VirtualSize set when zoom calculated rather than relying on calls to self.GetVirtualSize()
return (
self.GetClientSize()[0] > self.CalcVirtSize()[0]
or self.GetClientSize()[1] > self.CalcVirtSize()[1]
)
def DebugSizez(self, fromwheremsg):
return
if self.NeedToClear():
msg = "!!!!!!! "
else:
msg = "! "
print(
msg
+ "(%s) visible %d NeedToClear %s GetVirtualSize %d getWidth %d GetClientSize %d self.GetViewStart() %d self.maxWidth %d "
% (
fromwheremsg,
self.IsShownOnScreen(),
self.NeedToClear(),
self.GetVirtualSize()[0],
self.getWidth(),
self.GetClientSize()[0],
self.GetViewStart()[0],
self.maxWidth,
)
)
def OnPaint(self, event): # ANDY
dc = wx.PaintDC(self)
self.PrepareDC(dc)
dc.SetUserScale(self.zoomscale, self.zoomscale)
# since we're not buffering in this case, we have to
# paint the whole window, potentially very time consuming.
self.DoDrawing(dc)
def Redraw(self, dc):
self.DoDrawing(dc)
def OnLeftDown(self, event): # ANDY some PAN ideas from http://code.google.com/p/pyslip/
"""Left mouse button down. Prepare for possible drag."""
if event.ShiftDown():
event.Skip()
return
click_posn = event.GetPosition()
self.SetCursor(wx.Cursor(wx.CURSOR_HAND))
(self.last_drag_x, self.last_drag_y) = click_posn
event.Skip()
def OnLeftUp(self, event): # ANDY PAN
"""Left mouse button up."""
if event.ShiftDown():
event.Skip()
return
self.last_drag_x = self.last_drag_y = None
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
# turn off drag
self.was_dragging = False
# force PAINT event to remove selection box (if required)
# self.Update()
event.Skip()
def | (self, event): # ANDY PAN
"""Handle a mouse move (map drag).
event the mouse move event
"""
if event.ShiftDown():
event.Skip()
return
# for windows, set focus onto pyslip window
# linux seems to do this automatically
if sys.platform == "win32" and self.FindFocus() != self:
self.SetFocus()
# get current mouse position
(x, y) = event.GetPosition()
# from common.architecture_support import whoscalling2
# dbg(whoscalling2())
# self.RaiseMousePositionEvent((x, y))
if event.Dragging() and event.LeftIsDown():
# are we doing box select?
if not self.last_drag_x is None:
# no, just a map drag
self.was_dragging = True
dx = self.last_drag_x - x
dy = self.last_drag_y - y
# dx /= 20
# dy /= 20
# dbg(dx)
# dbg(dy)
# print "PAN %d %d" % (dx, dy)
# print self.GetViewStart()
currx, curry = self.GetViewStart()
self.Scroll(
currx + dx, curry + dy
) # Note The positions are in scroll units, not pixels, so to convert to pixels you will have to multiply by the number of pixels per scroll increment. If either parameter is -1, that position will be ignored (no change in that direction).
# print "Scroll pan %d %d" % (currx+dx, curry+dy)
# adjust remembered X,Y
self.last_drag_x = x
self.last_drag_y = y
# redraw client area
self.Update()
def DoDrawing(self, dc, printing=False):
# dbg(f"DoDrawing {len(self.curLine)}")
if self.clear_whole_window:
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
self.clear_whole_window = False
if self.bmp:
dc.DrawBitmap(self.bmp, 0, 0, False) # false means don't use mask
# dc.SetTextForeground('BLUE')
# text = "UML via Pynsource and PlantUML"
# dc.DrawText(text, 2, 2)
self.DrawSavedLines(dc)
if self.error_msg:
dc.DrawText(self.error_msg, 2, 2)
if self.fetching_msg and self.time_taken_fetching > 0.5:
"""
Text is never drawn with the current pen. It's drawn with the current
text color. Try
dc.SetTextForeground((255,255,0))
This is a historical implementation detail in Windows GDI. The pen is
used for lines, the brush is used for fills, and text had its own
attributes.
"""
# dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
# dc.SetBrush(wx.Brush("RED"))
# dc.SetTextForeground((204, 0, 0)) # red
# dc.SetTextForeground((204, 102, 0)) # dark orange
dc.SetTextForeground((255, 255, 255)) # white
dc.SetTextBackground((0, 0, 0)) # black
dc.SetBackgroundMode(wx.SOLID)
dc.DrawText(self.fetching_msg, 2, 2)
def DrawSavedLines(self, dc): # PEN DRAWING
dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
for line in self.lines:
for coords in line:
dc.DrawLine(*map(int, coords))
def OnClearPenLines(self, event):
self.clear_pen_lines()
def clear_pen_lines(self):
self.lines = []
self.Refresh()
def SetXY(self, event): # PEN DRAWING
self.x, self.y = self.ConvertEventCoords(event)
def ConvertEventCoords(self, event): # PEN DRAWING
newpos = self.CalcUnscrolledPosition(event.GetX(), event.GetY())
newpos = (
newpos[0] * self.GetScaleX() / self.zoomscale,
newpos[1] * self.GetScaleY() / self.zoomscale,
)
return newpos
def OnRightButtonEvent(self, event): # PEN DRAWING - ANDY
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
self.clear_pen_lines()
event.Skip()
def OnLeftButtonEvent(self, event): # PEN DRAWING
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
if event.LeftDown():
# self.SetScrollRate(1, 1) # works to slow the scrolling, but causes scroll jump to 0,0
self.SetScrollRateSmart(1) # smoother pan when scroll step is 1
# dbg(f"LeftDown {self.GetScrollPixelsPerUnit()}")
self.SetFocus()
self.SetXY(event)
self.curLine = []
self.CaptureMouse()
self.drawing = True
elif event.Dragging() and self.drawing:
# print("dragging.....")
# ANDY UPDATE 2019 - Drawing to wx.ClientDC doesn't work well these days and you only
# see the result when an on paint occurs much later - and often cannot force the paint?
# instead, issue a Refresh() which triggers a paint, and draw there instead.
coords = (self.x, self.y) + self.ConvertEventCoords(event)
self.curLine.append(coords)
self.lines.append(self.curLine)
self.curLine = []
self.SetXY(event) # reset line drawing start point to current mouse pos
self.Refresh()
# Version 0. Old version.draw directly to a wx.ClientDC
# dc = wx.ClientDC(self)
# self.PrepareDC(dc)
# dc.SetUserScale(self.zoomscale, self.zoomscale)
#
# dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
# coords = (self.x, self.y) + self.ConvertEventCoords(event)
# self.curLine.append(coords) # For when we are not double buffering #ANDY
# dc.DrawLine(*coords)
# self.SetXY(event)
#
# Failed Hacks to try and make version 0 work.
#
# self.Refresh() # ANDY added, pheonix
# # frame = self.GetTopLevelParent()
# # frame.Layout() # needed when running phoenix
# self.Update() # or wx.SafeYield() # Without this the nodes don't paint during a "L" layout (edges do!?)
# wx.SafeYield() # Needed on Mac to see result if in a compute loop.
# self.repaint_needed = True
elif event.LeftUp() and self.drawing:
self.lines.append(self.curLine)
self.curLine = []
self.ReleaseMouse()
self.drawing = False
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
self.Refresh() # ANDY added, pheonix
class TestFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
ImageViewer(self)
class App(wx.App):
def OnInit(self):
frame = TestFrame(None, title="Andy Image Viewer")
frame.Show(True)
frame.Centre()
return True
if __name__ == "__main__":
app = App(0)
app.MainLoop()
| OnMove | identifier_name |
gui_imageviewer.py | # Image viewer
import wx
import sys
import io
import urllib.request, urllib.parse, urllib.error
from urllib.request import Request, urlopen
import asyncio
from io import StringIO
# from pydbg import dbg
from gui.coord_utils import ZoomInfo
from typing import List, Set, Dict, Tuple, Optional
from media import images
from gui.settings import PRO_EDITION
from generate_code.gen_plantuml import plant_uml_create_png_and_return_image_url_async
from dialogs.DialogPlantUmlText import DialogPlantUmlText
from common.dialog_dir_path import dialog_path_pyinstaller_push, dialog_path_pyinstaller_pop
from common.messages import *
import datetime
from app.settings import CancelRefreshPlantUmlEvent, EVT_CANCEL_REFRESH_PLANTUML_EVENT
from common.url_to_data import url_to_data
import logging
from common.logger import config_log
log = logging.getLogger(__name__)
config_log(log)
ALLOW_DRAWING = True
DEFAULT_IMAGE_SIZE = (21, 21) # used to be 2000, 2000 for some reason
BMP_EXTRA_MARGIN = 20 # margin for plantuml images to allow scrolling them fully into view
unregistered = not PRO_EDITION
class ImageViewer(wx.ScrolledWindow):
def __init__(self, parent, id=-1, size=wx.DefaultSize):
wx.ScrolledWindow.__init__(self, parent, id, (0, 0), size=size, style=wx.SUNKEN_BORDER)
self.lines = []
self.maxWidth, self.maxHeight = DEFAULT_IMAGE_SIZE
self.x = self.y = 0
self.curLine = []
self.drawing = False
self.SetBackgroundColour("WHITE") # for areas of the frame not covered by the bmp
# TODO these areas don't get refreshed properly when scrolling when pen marks are around, we only refresh bmp area and when bmp area < client window we get artifacts.
bmp, dc = self._CreateNewWhiteBmp(self.maxWidth, self.maxHeight)
self.bmp = bmp
self.bmp_transparent_ori = None
self.SetVirtualSize((self.maxWidth, self.maxHeight))
self.SetScrollRate(1, 1) # set the ScrollRate to 1 in order for panning to work nicely
self.zoomscale = 1.0
self.clear_whole_window = False
if ALLOW_DRAWING:
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftButtonEvent)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftButtonEvent)
self.Bind(wx.EVT_MOTION, self.OnLeftButtonEvent)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightButtonEvent)
# self.Bind(wx.EVT_IDLE, self.OnIdle) # ANDY HACK
self.Bind(wx.EVT_PAINT, self.OnPaint)
# self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnWheelScroll)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMove)
self.Bind(wx.EVT_SIZE, self.OnResize)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyPress)
self.Bind(wx.EVT_KEY_UP, self.onKeyUp)
self.Bind(wx.EVT_CHAR, self.onKeyChar) # 2019 added
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightButtonMenu)
self.was_dragging = False # True if dragging map
self.move_dx = 0 # drag delta values
self.move_dy = 0
self.last_drag_x = None # previous drag position
self.last_drag_y = None
self.SetScrollbars(1, 1, int(self.GetVirtualSize()[0]), int(self.GetVirtualSize()[1]))
self.mywheelscroll = 0
self.popupmenu = None
# self.repaint_needed = False
self.working = False # key press re-entrancy protection
self.plantuml_text = ""
# Image fetching states and flags
self.error_msg = "" # message to display on big screen, when there is an error
self.fetching_msg = "" # message to display on big screen, use as flag for when working
self.fetching_started_time = None
@property
def working_fetching(self): # stop multiple plant uml refreshes
return self.fetching_msg != "" # if there is a message, that's a flag that we are http-ing
@property
def time_taken_fetching(self) -> float:
raw_diff = datetime.datetime.utcnow() - self.fetching_started_time
return raw_diff.total_seconds()
def clear(self):
self.error_msg = PLANTUML_VIEW_INITAL_HELP
self.fetching_msg = ""
self.bmp, dc = self._CreateNewWhiteBmp(self.maxWidth, self.maxHeight)
self.bmp_transparent_ori = None
self.plantuml_text = ""
self.lines = []
self.Refresh()
def clear_cos_connection_error(self, msg=""):
self.clear()
self.error_msg = PLANTUML_VIEW_INTERNET_FAIL % msg
# print(plant_uml_create_png_and_return_image_url.cache_info())
plant_uml_create_png_and_return_image_url_async.cache_clear()
url_to_data.cache_clear()
self.Refresh()
def user_aborted(self):
self.error_msg = PLANTUML_VIEW_USER_ABORT
plant_uml_create_png_and_return_image_url_async.cache_clear()
url_to_data.cache_clear()
self.Refresh()
def render_in_progress(self, rendering: bool, frame):
if rendering:
self.error_msg = ""
self.fetching_msg = PLANTUML_VIEW_FETCHING_MSG
self.fetching_started_time = datetime.datetime.utcnow()
else:
self.fetching_msg = ""
# Update PlantUML view, guarding against the situation where when shutting down app
# and killing pending tasks, the window may not exist anymore
try:
self.Refresh()
wx.SafeYield() # Needed to "breathe" and refresh the UI
# print("warning use of safe yield in image viewer")
except RuntimeError:
pass # avoid error when shutting down tasks
async def ViewImage(self, thefile="", url=""):
"""Loads url or file and sets .bmp and .bmp_transparent_ori, the img is discarded"""
self.error_msg = None
if thefile:
img = wx.Image(thefile, wx.BITMAP_TYPE_ANY)
bmp = wx.Bitmap(img) # ANDY added 2019
elif url:
# print(url_to_data.cache_info())
try:
data, status = await url_to_data(url)
log.info(f"(2nd, image grabbing) Response from plant_uml_server status_code {status}")
except asyncio.TimeoutError as e: # there is no string repr of this exception
self.clear_cos_connection_error(msg="(timeout)")
url_to_data.cache_clear() # so if retry you won't get the same error
log.error("TimeoutError getting plantuml IMAGE")
return
if status != 200:
self.clear_cos_connection_error(msg=f"(bad response {status})")
log.error(f"Error getting plantuml IMAGE, (bad response {status})")
return
stream = io.BytesIO(data)
img = wx.Image(stream)
bmp = wx.Bitmap(img)
# try:
# bmp = img.ConvertToBitmap()
# except Exception as e:
# print(e)
# return
self.maxWidth, self.maxHeight = bmp.GetWidth(), bmp.GetHeight()
self.maxHeight += BMP_EXTRA_MARGIN # stop bitmaps getting slightly clipped
# dbg(bmp)
# ANDY bmp.HasAlpha() does not work, since wx.Image has this method but wx.Bitmap
# does not. But Bitmaps have some alpha channel concepts in them too...?
# Render bmp to a second white bmp to remove transparency effects
# if False and bmp.HasAlpha():
if img.HasAlpha():
self.bmp_transparent_ori = bmp
bmp2 = wx.Bitmap(bmp.GetWidth(), bmp.GetHeight())
dc = wx.MemoryDC()
dc.SelectObject(bmp2)
dc.Clear()
dc.DrawBitmap(bmp, 0, 0, True)
dc.SelectObject(wx.NullBitmap)
self.bmp = bmp2
else:
self.bmp_transparent_ori = None
self.bmp = bmp
# def OnIdle(self, event):
# """Idle Handler."""
# if self.working:
# dbg("re-entrancy avoided")
# return
# self.working = True
# if self.repaint_needed:
# dbg("repaint needed mate")
# self.Refresh()
# self.Update() # or wx.SafeYield() # Without this the nodes don't paint during a "L" layout (edges do!?)
# wx.SafeYield() # Needed on Mac to see result if in a compute loop.
# self.repaint_needed = False
# self.working = 0
def _CreateNewWhiteBmp(self, width, height, wantdc=False):
bmp = wx.Bitmap(width, height)
# Could simply return here, but bitmap would be black (or a bit random, under linux)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
# dbg(wantdc)
if wantdc: # just in case want to continue drawing
return bmp, dc
else:
dc.SelectObject(wx.NullBitmap)
return bmp, dc
def OnHandleSaveImage(self, event):
pass # pro feature
def OnHandleSaveImagePreserveTransparencies(self, event):
pass # pro feature
def OnHandleSaveImageInclDoodles(self, event):
pass # pro feature
def OnHandleQuickLoadImage(self, event):
self.ViewImage(FILE)
def OnHandleQuickLoadFromYumlUrl(self, event):
baseUrl = "http://yuml.me/diagram/dir:lr;scruffy/class/"
yuml_txt = (
"[Customer]+1->*[Order],[Order]++1-items >*[LineItem],[Order]-0..1>[PaymentMethod]"
)
url = baseUrl + urllib.parse.quote(yuml_txt)
self.ViewImage(url=url)
def OnRightButtonMenu(self, event): # Menu
if event.ShiftDown():
event.Skip()
return
"""
Accelerator tables need unique ids, whereas direct menuitem binding with Bind(...source=menuitem)
doesn't care about ids and can thus use wx.ID_ANY (which is always -1)
Use wx.NewIdRef() if you want a real fresh id.
"""
x, y = event.GetPosition()
frame = self.GetTopLevelParent()
image = images.pro.GetBitmap() if unregistered else None
if self.popupmenu:
self.popupmenu.Destroy() # wx.Menu objects need to be explicitly destroyed (e.g. menu.Destroy()) in this situation. Otherwise, they will rack up the USER Objects count on Windows; eventually crashing a program when USER Objects is maxed out. -- U. Artie Eoff http://wiki.wxpython.org/index.cgi/PopupMenuOnRightClick
self.popupmenu = wx.Menu() # Create a menu
if event.AltDown():
# Debug menu items
item = self.popupmenu.Append(wx.ID_ANY, "Load Image...")
frame.Bind(wx.EVT_MENU, self.OnHandleFileLoad, item)
item = self.popupmenu.Append(wx.ID_ANY, "Quick Load Image from Disk")
frame.Bind(wx.EVT_MENU, self.OnHandleQuickLoadImage, item)
item = self.popupmenu.Append(wx.ID_ANY, "Quick Load Image from Yuml Url")
frame.Bind(wx.EVT_MENU, self.OnHandleQuickLoadFromYumlUrl, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG...")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImage, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
if self.bmp_transparent_ori:
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG... (preserve transparent areas)")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImagePreserveTransparencies, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG... (incl. pen doodles)")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImageInclDoodles, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Clear pen doodles (SHIFT drag to create)\tE")
frame.Bind(wx.EVT_MENU, self.OnClearPenLines, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "View PlantUML markup...")
frame.Bind(wx.EVT_MENU, self.OnViewPlantUmlMarkup, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Cancel")
frame.PopupMenu(self.popupmenu, wx.Point(x, y))
def OnPro_update(self, event):
event.Enable(not unregistered)
def OnViewPlantUmlMarkup(self, event, alt_down=False):
mouse_state: wx.MouseState = wx.GetMouseState()
if mouse_state.AltDown():
print(self.plantuml_text)
def display_dialog(txt_plantuml):
"""
Displays dialog for editing comments
Args:
comment: comment string
Returns: (result, comment)
"""
class EditDialog(DialogPlantUmlText):
# Custom dialog built via wxformbuilder - subclass it first, to hook up event handlers
def OnClassNameEnter(self, event):
self.EndModal(wx.ID_OK)
# change cwd so that dialog can find the 'pro' image jpg which is relative to dialogs/
# when deployed via pyinstaller, this path is a bit tricky to find, so use this func.
dir = dialog_path_pyinstaller_push(frame = self)
try:
dialog = EditDialog(None)
dialog.txt_plantuml.Value = txt_plantuml
dialog.txt_plantuml.SetFocus()
dialog.txt_plantuml.Enable(not unregistered)
dialog.ShowModal()
# dialog.Show()
dialog.Destroy()
finally:
dialog_path_pyinstaller_pop()
display_dialog(self.plantuml_text)
# wx.MessageBox(f"PRO mode lets you copy the PlantUML text to the clipboard\n\n{self.plantuml_text}")
def OnHandleFileLoad(self, event):
frame = self.GetTopLevelParent()
wildcard = (
"Images (*.png; *.jpeg; *.jpg; *.bmp)|*.png;*.jpeg;*.jpg;*.bmp|" "All files (*.*)|*.*"
)
dlg = wx.FileDialog(
parent=frame,
message="choose",
defaultDir=".",
defaultFile="",
wildcard=wildcard,
style=wx.FD_OPEN,
pos=wx.DefaultPosition,
)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.ViewImage(filename)
def SetScrollRateSmart(self, newstep=None, printinfo=False):
"""changing scrollwindow scroll unit without scroll pos changing - utility.
There is a slight jump when going from small scroll step e.g. 1 to large e.g. 20
because the resolution isn't the same (scroll step might be 3 out of 10 instead of the
more precise 30 out of 100). I couldn't get rid of this jump, even by fiddling with
the virtual size - might just have to live with it.
and
"""
oldstep = self.GetScrollPixelsPerUnit()[0]
oldscrollx = self.GetScrollPos(wx.HORIZONTAL)
oldscrolly = self.GetScrollPos(wx.VERTICAL)
oldvirtx = self.GetVirtualSize()[0]
oldvirty = self.GetVirtualSize()[1]
# rot = event.GetWheelRotation()
if printinfo:
print(f"\nIN step {oldstep} newstep {newstep} old scroll {oldscrollx}, {oldscrolly} virt {oldvirtx}, {oldvirty}")
if newstep is not None:
if oldstep == newstep:
if printinfo:
print(f"Nothing to do, step of {newstep} already set.")
else:
q = newstep / oldstep # min(1, newstep)
newscrollx = int(oldscrollx / q)
newscrolly = int(oldscrolly / q)
# newvirtx = oldvirtx / q
# newvirty = oldvirty / q
# Aha - image size * step => virtual bounds
newvirtx = int(self.maxWidth / newstep * self.zoomscale)
newvirty = int(self.maxHeight / newstep * self.zoomscale)
if printinfo:
print(f"OUT step {newstep} new scroll {newscrollx}, {newscrolly} virt {newvirtx}, {newvirty} q {q}")
self.SetScrollbars(
int(newstep), int(newstep),
int(newvirtx), int(newvirty), # new virtual size
int(newscrollx), int(newscrolly), # new scroll positions
noRefresh=True)
# self.Refresh()
if printinfo:
print(self.GetVirtualSize())
def onKeyChar(self, event):
if event.GetKeyCode() >= 256:
event.Skip()
return
if self.working:
event.Skip()
return
self.working = True
keycode = chr(event.GetKeyCode())
# print("imgkeycode", keycode)
if keycode == "a":
self.SetScrollRateSmart(newstep=None, printinfo=True)
# elif keycode in ["1", "2", "3", "4", "5", "6", "7", "8"]:
# todisplay = ord(keycode) - ord("1")
# self.snapshot_mgr.Restore(todisplay) # snapshot 1 becomes 0 as a param
# self.mega_refresh()
elif keycode == "d":
self.SetScrollRateSmart(newstep=20, printinfo=True)
elif keycode == "s":
self.SetScrollRateSmart(newstep=1, printinfo=True)
elif keycode == "e":
self.clear_pen_lines()
self.working = False
def onKeyPress(self, event): # ANDY
keycode = event.GetKeyCode()
# dbg(keycode)
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
if keycode == wx.WXK_ESCAPE:
if self.working_fetching:
frame = self.GetTopLevelParent()
frame.SetStatusText("ESC key detected: PlantUML render Aborted")
wx.PostEvent(frame, CancelRefreshPlantUmlEvent())
self.user_aborted()
else:
if self.plantuml_text:
self.error_msg = "" # clear any annoying error message, so can see bmp
self.Refresh()
else:
self.error_msg = PLANTUML_VIEW_INITAL_HELP
# if self.working:
# event.Skip()
# return
# self.working = True
#
# keycode = event.GetKeyCode() # http://www.wxpython.org/docs/api/wx.KeyEvent-class.html
# self.working = False
event.Skip()
def onKeyUp(self, event): # ANDY
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
event.Skip()
def getWidth(self):
return self.maxWidth
def getHeight(self):
return self.maxHeight
def OnErase(self, event): # ANDY
pass
def OnWheelScroll(self, event):
## This is an example of what to do for the EVT_MOUSEWHEEL event,
## but since wx.ScrolledWindow does this already it's not
## necessary to do it ourselves.
#
# ANDY
# But since I set the ScrollRate to 1
# in order for panning to work nicely
# scrolling is too slow. So invoke this code!!
#
# dbg(f"OnWheelScroll {self.GetScrollPixelsPerUnit()}")
if event.ControlDown():
event.Skip()
return
# Version 1 - too jumpy
# self.SetScrollRate(20, 20)
# Version 2 - nicer, but need a common routine callable from multiple places
#
# oldscrollx = self.GetScrollPos(wx.HORIZONTAL)
# oldscrolly = self.GetScrollPos(wx.VERTICAL)
# # dbg(oldscrollx)
# # dbg(oldscrolly)
#
# # How to adjust ? take into account the 1 to 20 factor, as well as the zoom level
# delta = event.GetWheelDelta()
# rot = event.GetWheelRotation()
# # dbg(delta)
# # dbg(rot)
# # if rot > 0:
# if self.GetScrollPixelsPerUnit()[0] != 20:
# dbg(self.GetScrollPixelsPerUnit())
#
# # dbg(oldscrollx)
# # dbg(oldscrolly)
# oldscrollx = oldscrollx /20#- (1 * rot) / 20 * self.zoomscale
# oldscrolly = oldscrolly /20#- (1 * rot) / 20 * self.zoomscale
# # oldscrollx = oldscrollx + 20 * rot / self.zoomscale
# # oldscrolly = oldscrolly + 20 * rot / self.zoomscale
# # dbg(oldscrollx)
# # dbg(oldscrolly)
#
# self.SetScrollbars(
# 20, 20, # each scroll unit is 1 pixel, meaning scroll units match client coord units
# self.GetVirtualSize()[0] / 20, self.GetVirtualSize()[1] / 20, # new virtual size
# oldscrollx, oldscrolly, # new scroll positions
# noRefresh=True
# )
# Version 3
self.SetScrollRateSmart(20)
# Old version 0 - complex and buggy and jumpy
#
# delta = event.GetWheelDelta()
# rot = event.GetWheelRotation()
# linesPer = event.GetLinesPerAction()
# # print delta, rot, linesPer
# linesPer *= 20 # ANDY trick to override the small ScrollRate
# ws = self.mywheelscroll
# ws = ws + rot
# lines = ws / delta
# ws = ws - lines * delta
# self.mywheelscroll = ws
# if lines != 0:
# lines = lines * linesPer
# vsx, vsy = self.GetViewStart()
# scrollTo = vsy - lines
# self.Scroll(-1, scrollTo)
event.Skip()
def OnResize(self, event): # ANDY interesting - GetVirtualSize grows when resize frame
self.DebugSizez("resize")
if self.NeedToClear() and self.IsShownOnScreen():
self.clear_whole_window = True
self.Refresh()
def CalcVirtSize(self):
# VirtualSize is essentially the visible picture
return (self.maxWidth * self.zoomscale, self.maxHeight * self.zoomscale)
def NeedToClear(self):
# Since VirtualSize auto grows when resize frame, can't rely on it to know if client area is bigger than visible pic.
# Need to rederive the original VirtualSize set when zoom calculated rather than relying on calls to self.GetVirtualSize()
return (
self.GetClientSize()[0] > self.CalcVirtSize()[0]
or self.GetClientSize()[1] > self.CalcVirtSize()[1]
)
def DebugSizez(self, fromwheremsg):
return
if self.NeedToClear():
msg = "!!!!!!! "
else:
msg = "! "
print(
msg
+ "(%s) visible %d NeedToClear %s GetVirtualSize %d getWidth %d GetClientSize %d self.GetViewStart() %d self.maxWidth %d "
% (
fromwheremsg,
self.IsShownOnScreen(),
self.NeedToClear(),
self.GetVirtualSize()[0],
self.getWidth(),
self.GetClientSize()[0],
self.GetViewStart()[0],
self.maxWidth,
)
)
def OnPaint(self, event): # ANDY
dc = wx.PaintDC(self)
self.PrepareDC(dc)
dc.SetUserScale(self.zoomscale, self.zoomscale)
# since we're not buffering in this case, we have to
# paint the whole window, potentially very time consuming.
self.DoDrawing(dc)
def Redraw(self, dc):
self.DoDrawing(dc)
def OnLeftDown(self, event): # ANDY some PAN ideas from http://code.google.com/p/pyslip/
"""Left mouse button down. Prepare for possible drag."""
if event.ShiftDown():
event.Skip()
return
click_posn = event.GetPosition()
self.SetCursor(wx.Cursor(wx.CURSOR_HAND))
(self.last_drag_x, self.last_drag_y) = click_posn
event.Skip()
def OnLeftUp(self, event): # ANDY PAN
"""Left mouse button up."""
if event.ShiftDown():
event.Skip()
return
self.last_drag_x = self.last_drag_y = None
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
# turn off drag
self.was_dragging = False
# force PAINT event to remove selection box (if required)
# self.Update()
event.Skip()
def OnMove(self, event): # ANDY PAN
"""Handle a mouse move (map drag).
event the mouse move event
"""
if event.ShiftDown():
event.Skip()
return
# for windows, set focus onto pyslip window
# linux seems to do this automatically
if sys.platform == "win32" and self.FindFocus() != self:
self.SetFocus()
# get current mouse position
(x, y) = event.GetPosition() | # from common.architecture_support import whoscalling2
# dbg(whoscalling2())
# self.RaiseMousePositionEvent((x, y))
if event.Dragging() and event.LeftIsDown():
# are we doing box select?
if not self.last_drag_x is None:
# no, just a map drag
self.was_dragging = True
dx = self.last_drag_x - x
dy = self.last_drag_y - y
# dx /= 20
# dy /= 20
# dbg(dx)
# dbg(dy)
# print "PAN %d %d" % (dx, dy)
# print self.GetViewStart()
currx, curry = self.GetViewStart()
self.Scroll(
currx + dx, curry + dy
) # Note The positions are in scroll units, not pixels, so to convert to pixels you will have to multiply by the number of pixels per scroll increment. If either parameter is -1, that position will be ignored (no change in that direction).
# print "Scroll pan %d %d" % (currx+dx, curry+dy)
# adjust remembered X,Y
self.last_drag_x = x
self.last_drag_y = y
# redraw client area
self.Update()
def DoDrawing(self, dc, printing=False):
# dbg(f"DoDrawing {len(self.curLine)}")
if self.clear_whole_window:
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
self.clear_whole_window = False
if self.bmp:
dc.DrawBitmap(self.bmp, 0, 0, False) # false means don't use mask
# dc.SetTextForeground('BLUE')
# text = "UML via Pynsource and PlantUML"
# dc.DrawText(text, 2, 2)
self.DrawSavedLines(dc)
if self.error_msg:
dc.DrawText(self.error_msg, 2, 2)
if self.fetching_msg and self.time_taken_fetching > 0.5:
"""
Text is never drawn with the current pen. It's drawn with the current
text color. Try
dc.SetTextForeground((255,255,0))
This is a historical implementation detail in Windows GDI. The pen is
used for lines, the brush is used for fills, and text had its own
attributes.
"""
# dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
# dc.SetBrush(wx.Brush("RED"))
# dc.SetTextForeground((204, 0, 0)) # red
# dc.SetTextForeground((204, 102, 0)) # dark orange
dc.SetTextForeground((255, 255, 255)) # white
dc.SetTextBackground((0, 0, 0)) # black
dc.SetBackgroundMode(wx.SOLID)
dc.DrawText(self.fetching_msg, 2, 2)
def DrawSavedLines(self, dc): # PEN DRAWING
dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
for line in self.lines:
for coords in line:
dc.DrawLine(*map(int, coords))
def OnClearPenLines(self, event):
self.clear_pen_lines()
def clear_pen_lines(self):
self.lines = []
self.Refresh()
def SetXY(self, event): # PEN DRAWING
self.x, self.y = self.ConvertEventCoords(event)
def ConvertEventCoords(self, event): # PEN DRAWING
newpos = self.CalcUnscrolledPosition(event.GetX(), event.GetY())
newpos = (
newpos[0] * self.GetScaleX() / self.zoomscale,
newpos[1] * self.GetScaleY() / self.zoomscale,
)
return newpos
def OnRightButtonEvent(self, event): # PEN DRAWING - ANDY
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
self.clear_pen_lines()
event.Skip()
def OnLeftButtonEvent(self, event): # PEN DRAWING
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
if event.LeftDown():
# self.SetScrollRate(1, 1) # works to slow the scrolling, but causes scroll jump to 0,0
self.SetScrollRateSmart(1) # smoother pan when scroll step is 1
# dbg(f"LeftDown {self.GetScrollPixelsPerUnit()}")
self.SetFocus()
self.SetXY(event)
self.curLine = []
self.CaptureMouse()
self.drawing = True
elif event.Dragging() and self.drawing:
# print("dragging.....")
# ANDY UPDATE 2019 - Drawing to wx.ClientDC doesn't work well these days and you only
# see the result when an on paint occurs much later - and often cannot force the paint?
# instead, issue a Refresh() which triggers a paint, and draw there instead.
coords = (self.x, self.y) + self.ConvertEventCoords(event)
self.curLine.append(coords)
self.lines.append(self.curLine)
self.curLine = []
self.SetXY(event) # reset line drawing start point to current mouse pos
self.Refresh()
# Version 0. Old version.draw directly to a wx.ClientDC
# dc = wx.ClientDC(self)
# self.PrepareDC(dc)
# dc.SetUserScale(self.zoomscale, self.zoomscale)
#
# dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
# coords = (self.x, self.y) + self.ConvertEventCoords(event)
# self.curLine.append(coords) # For when we are not double buffering #ANDY
# dc.DrawLine(*coords)
# self.SetXY(event)
#
# Failed Hacks to try and make version 0 work.
#
# self.Refresh() # ANDY added, pheonix
# # frame = self.GetTopLevelParent()
# # frame.Layout() # needed when running phoenix
# self.Update() # or wx.SafeYield() # Without this the nodes don't paint during a "L" layout (edges do!?)
# wx.SafeYield() # Needed on Mac to see result if in a compute loop.
# self.repaint_needed = True
elif event.LeftUp() and self.drawing:
self.lines.append(self.curLine)
self.curLine = []
self.ReleaseMouse()
self.drawing = False
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
self.Refresh() # ANDY added, pheonix
class TestFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
ImageViewer(self)
class App(wx.App):
def OnInit(self):
frame = TestFrame(None, title="Andy Image Viewer")
frame.Show(True)
frame.Centre()
return True
if __name__ == "__main__":
app = App(0)
app.MainLoop() | random_line_split | |
gui_imageviewer.py | # Image viewer
import wx
import sys
import io
import urllib.request, urllib.parse, urllib.error
from urllib.request import Request, urlopen
import asyncio
from io import StringIO
# from pydbg import dbg
from gui.coord_utils import ZoomInfo
from typing import List, Set, Dict, Tuple, Optional
from media import images
from gui.settings import PRO_EDITION
from generate_code.gen_plantuml import plant_uml_create_png_and_return_image_url_async
from dialogs.DialogPlantUmlText import DialogPlantUmlText
from common.dialog_dir_path import dialog_path_pyinstaller_push, dialog_path_pyinstaller_pop
from common.messages import *
import datetime
from app.settings import CancelRefreshPlantUmlEvent, EVT_CANCEL_REFRESH_PLANTUML_EVENT
from common.url_to_data import url_to_data
import logging
from common.logger import config_log
log = logging.getLogger(__name__)
config_log(log)
ALLOW_DRAWING = True
DEFAULT_IMAGE_SIZE = (21, 21) # used to be 2000, 2000 for some reason
BMP_EXTRA_MARGIN = 20 # margin for plantuml images to allow scrolling them fully into view
unregistered = not PRO_EDITION
class ImageViewer(wx.ScrolledWindow):
def __init__(self, parent, id=-1, size=wx.DefaultSize):
wx.ScrolledWindow.__init__(self, parent, id, (0, 0), size=size, style=wx.SUNKEN_BORDER)
self.lines = []
self.maxWidth, self.maxHeight = DEFAULT_IMAGE_SIZE
self.x = self.y = 0
self.curLine = []
self.drawing = False
self.SetBackgroundColour("WHITE") # for areas of the frame not covered by the bmp
# TODO these areas don't get refreshed properly when scrolling when pen marks are around, we only refresh bmp area and when bmp area < client window we get artifacts.
bmp, dc = self._CreateNewWhiteBmp(self.maxWidth, self.maxHeight)
self.bmp = bmp
self.bmp_transparent_ori = None
self.SetVirtualSize((self.maxWidth, self.maxHeight))
self.SetScrollRate(1, 1) # set the ScrollRate to 1 in order for panning to work nicely
self.zoomscale = 1.0
self.clear_whole_window = False
if ALLOW_DRAWING:
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftButtonEvent)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftButtonEvent)
self.Bind(wx.EVT_MOTION, self.OnLeftButtonEvent)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightButtonEvent)
# self.Bind(wx.EVT_IDLE, self.OnIdle) # ANDY HACK
self.Bind(wx.EVT_PAINT, self.OnPaint)
# self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnWheelScroll)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMove)
self.Bind(wx.EVT_SIZE, self.OnResize)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyPress)
self.Bind(wx.EVT_KEY_UP, self.onKeyUp)
self.Bind(wx.EVT_CHAR, self.onKeyChar) # 2019 added
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightButtonMenu)
self.was_dragging = False # True if dragging map
self.move_dx = 0 # drag delta values
self.move_dy = 0
self.last_drag_x = None # previous drag position
self.last_drag_y = None
self.SetScrollbars(1, 1, int(self.GetVirtualSize()[0]), int(self.GetVirtualSize()[1]))
self.mywheelscroll = 0
self.popupmenu = None
# self.repaint_needed = False
self.working = False # key press re-entrancy protection
self.plantuml_text = ""
# Image fetching states and flags
self.error_msg = "" # message to display on big screen, when there is an error
self.fetching_msg = "" # message to display on big screen, use as flag for when working
self.fetching_started_time = None
@property
def working_fetching(self): # stop multiple plant uml refreshes
return self.fetching_msg != "" # if there is a message, that's a flag that we are http-ing
@property
def time_taken_fetching(self) -> float:
raw_diff = datetime.datetime.utcnow() - self.fetching_started_time
return raw_diff.total_seconds()
def clear(self):
self.error_msg = PLANTUML_VIEW_INITAL_HELP
self.fetching_msg = ""
self.bmp, dc = self._CreateNewWhiteBmp(self.maxWidth, self.maxHeight)
self.bmp_transparent_ori = None
self.plantuml_text = ""
self.lines = []
self.Refresh()
def clear_cos_connection_error(self, msg=""):
self.clear()
self.error_msg = PLANTUML_VIEW_INTERNET_FAIL % msg
# print(plant_uml_create_png_and_return_image_url.cache_info())
plant_uml_create_png_and_return_image_url_async.cache_clear()
url_to_data.cache_clear()
self.Refresh()
def user_aborted(self):
self.error_msg = PLANTUML_VIEW_USER_ABORT
plant_uml_create_png_and_return_image_url_async.cache_clear()
url_to_data.cache_clear()
self.Refresh()
def render_in_progress(self, rendering: bool, frame):
if rendering:
self.error_msg = ""
self.fetching_msg = PLANTUML_VIEW_FETCHING_MSG
self.fetching_started_time = datetime.datetime.utcnow()
else:
self.fetching_msg = ""
# Update PlantUML view, guarding against the situation where when shutting down app
# and killing pending tasks, the window may not exist anymore
try:
self.Refresh()
wx.SafeYield() # Needed to "breathe" and refresh the UI
# print("warning use of safe yield in image viewer")
except RuntimeError:
pass # avoid error when shutting down tasks
async def ViewImage(self, thefile="", url=""):
"""Loads url or file and sets .bmp and .bmp_transparent_ori, the img is discarded"""
self.error_msg = None
if thefile:
img = wx.Image(thefile, wx.BITMAP_TYPE_ANY)
bmp = wx.Bitmap(img) # ANDY added 2019
elif url:
# print(url_to_data.cache_info())
try:
data, status = await url_to_data(url)
log.info(f"(2nd, image grabbing) Response from plant_uml_server status_code {status}")
except asyncio.TimeoutError as e: # there is no string repr of this exception
self.clear_cos_connection_error(msg="(timeout)")
url_to_data.cache_clear() # so if retry you won't get the same error
log.error("TimeoutError getting plantuml IMAGE")
return
if status != 200:
self.clear_cos_connection_error(msg=f"(bad response {status})")
log.error(f"Error getting plantuml IMAGE, (bad response {status})")
return
stream = io.BytesIO(data)
img = wx.Image(stream)
bmp = wx.Bitmap(img)
# try:
# bmp = img.ConvertToBitmap()
# except Exception as e:
# print(e)
# return
self.maxWidth, self.maxHeight = bmp.GetWidth(), bmp.GetHeight()
self.maxHeight += BMP_EXTRA_MARGIN # stop bitmaps getting slightly clipped
# dbg(bmp)
# ANDY bmp.HasAlpha() does not work, since wx.Image has this method but wx.Bitmap
# does not. But Bitmaps have some alpha channel concepts in them too...?
# Render bmp to a second white bmp to remove transparency effects
# if False and bmp.HasAlpha():
if img.HasAlpha():
self.bmp_transparent_ori = bmp
bmp2 = wx.Bitmap(bmp.GetWidth(), bmp.GetHeight())
dc = wx.MemoryDC()
dc.SelectObject(bmp2)
dc.Clear()
dc.DrawBitmap(bmp, 0, 0, True)
dc.SelectObject(wx.NullBitmap)
self.bmp = bmp2
else:
self.bmp_transparent_ori = None
self.bmp = bmp
# def OnIdle(self, event):
# """Idle Handler."""
# if self.working:
# dbg("re-entrancy avoided")
# return
# self.working = True
# if self.repaint_needed:
# dbg("repaint needed mate")
# self.Refresh()
# self.Update() # or wx.SafeYield() # Without this the nodes don't paint during a "L" layout (edges do!?)
# wx.SafeYield() # Needed on Mac to see result if in a compute loop.
# self.repaint_needed = False
# self.working = 0
def _CreateNewWhiteBmp(self, width, height, wantdc=False):
bmp = wx.Bitmap(width, height)
# Could simply return here, but bitmap would be black (or a bit random, under linux)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
# dbg(wantdc)
if wantdc: # just in case want to continue drawing
return bmp, dc
else:
dc.SelectObject(wx.NullBitmap)
return bmp, dc
def OnHandleSaveImage(self, event):
pass # pro feature
def OnHandleSaveImagePreserveTransparencies(self, event):
pass # pro feature
def OnHandleSaveImageInclDoodles(self, event):
pass # pro feature
def OnHandleQuickLoadImage(self, event):
self.ViewImage(FILE)
def OnHandleQuickLoadFromYumlUrl(self, event):
baseUrl = "http://yuml.me/diagram/dir:lr;scruffy/class/"
yuml_txt = (
"[Customer]+1->*[Order],[Order]++1-items >*[LineItem],[Order]-0..1>[PaymentMethod]"
)
url = baseUrl + urllib.parse.quote(yuml_txt)
self.ViewImage(url=url)
def OnRightButtonMenu(self, event): # Menu
if event.ShiftDown():
event.Skip()
return
"""
Accelerator tables need unique ids, whereas direct menuitem binding with Bind(...source=menuitem)
doesn't care about ids and can thus use wx.ID_ANY (which is always -1)
Use wx.NewIdRef() if you want a real fresh id.
"""
x, y = event.GetPosition()
frame = self.GetTopLevelParent()
image = images.pro.GetBitmap() if unregistered else None
if self.popupmenu:
self.popupmenu.Destroy() # wx.Menu objects need to be explicitly destroyed (e.g. menu.Destroy()) in this situation. Otherwise, they will rack up the USER Objects count on Windows; eventually crashing a program when USER Objects is maxed out. -- U. Artie Eoff http://wiki.wxpython.org/index.cgi/PopupMenuOnRightClick
self.popupmenu = wx.Menu() # Create a menu
if event.AltDown():
# Debug menu items
item = self.popupmenu.Append(wx.ID_ANY, "Load Image...")
frame.Bind(wx.EVT_MENU, self.OnHandleFileLoad, item)
item = self.popupmenu.Append(wx.ID_ANY, "Quick Load Image from Disk")
frame.Bind(wx.EVT_MENU, self.OnHandleQuickLoadImage, item)
item = self.popupmenu.Append(wx.ID_ANY, "Quick Load Image from Yuml Url")
frame.Bind(wx.EVT_MENU, self.OnHandleQuickLoadFromYumlUrl, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG...")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImage, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
if self.bmp_transparent_ori:
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG... (preserve transparent areas)")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImagePreserveTransparencies, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
item = self.popupmenu.Append(wx.ID_ANY, "Save Image as PNG... (incl. pen doodles)")
frame.Bind(wx.EVT_MENU, self.OnHandleSaveImageInclDoodles, item)
frame.Bind(wx.EVT_UPDATE_UI, self.OnPro_update, item)
if image:
item.SetBitmap(image)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Clear pen doodles (SHIFT drag to create)\tE")
frame.Bind(wx.EVT_MENU, self.OnClearPenLines, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "View PlantUML markup...")
frame.Bind(wx.EVT_MENU, self.OnViewPlantUmlMarkup, item)
self.popupmenu.AppendSeparator()
item = self.popupmenu.Append(wx.ID_ANY, "Cancel")
frame.PopupMenu(self.popupmenu, wx.Point(x, y))
def OnPro_update(self, event):
event.Enable(not unregistered)
def OnViewPlantUmlMarkup(self, event, alt_down=False):
mouse_state: wx.MouseState = wx.GetMouseState()
if mouse_state.AltDown():
print(self.plantuml_text)
def display_dialog(txt_plantuml):
"""
Displays dialog for editing comments
Args:
comment: comment string
Returns: (result, comment)
"""
class EditDialog(DialogPlantUmlText):
# Custom dialog built via wxformbuilder - subclass it first, to hook up event handlers
def OnClassNameEnter(self, event):
self.EndModal(wx.ID_OK)
# change cwd so that dialog can find the 'pro' image jpg which is relative to dialogs/
# when deployed via pyinstaller, this path is a bit tricky to find, so use this func.
dir = dialog_path_pyinstaller_push(frame = self)
try:
dialog = EditDialog(None)
dialog.txt_plantuml.Value = txt_plantuml
dialog.txt_plantuml.SetFocus()
dialog.txt_plantuml.Enable(not unregistered)
dialog.ShowModal()
# dialog.Show()
dialog.Destroy()
finally:
dialog_path_pyinstaller_pop()
display_dialog(self.plantuml_text)
# wx.MessageBox(f"PRO mode lets you copy the PlantUML text to the clipboard\n\n{self.plantuml_text}")
def OnHandleFileLoad(self, event):
frame = self.GetTopLevelParent()
wildcard = (
"Images (*.png; *.jpeg; *.jpg; *.bmp)|*.png;*.jpeg;*.jpg;*.bmp|" "All files (*.*)|*.*"
)
dlg = wx.FileDialog(
parent=frame,
message="choose",
defaultDir=".",
defaultFile="",
wildcard=wildcard,
style=wx.FD_OPEN,
pos=wx.DefaultPosition,
)
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
self.ViewImage(filename)
def SetScrollRateSmart(self, newstep=None, printinfo=False):
"""changing scrollwindow scroll unit without scroll pos changing - utility.
There is a slight jump when going from small scroll step e.g. 1 to large e.g. 20
because the resolution isn't the same (scroll step might be 3 out of 10 instead of the
more precise 30 out of 100). I couldn't get rid of this jump, even by fiddling with
the virtual size - might just have to live with it.
and
"""
oldstep = self.GetScrollPixelsPerUnit()[0]
oldscrollx = self.GetScrollPos(wx.HORIZONTAL)
oldscrolly = self.GetScrollPos(wx.VERTICAL)
oldvirtx = self.GetVirtualSize()[0]
oldvirty = self.GetVirtualSize()[1]
# rot = event.GetWheelRotation()
if printinfo:
print(f"\nIN step {oldstep} newstep {newstep} old scroll {oldscrollx}, {oldscrolly} virt {oldvirtx}, {oldvirty}")
if newstep is not None:
if oldstep == newstep:
if printinfo:
print(f"Nothing to do, step of {newstep} already set.")
else:
q = newstep / oldstep # min(1, newstep)
newscrollx = int(oldscrollx / q)
newscrolly = int(oldscrolly / q)
# newvirtx = oldvirtx / q
# newvirty = oldvirty / q
# Aha - image size * step => virtual bounds
newvirtx = int(self.maxWidth / newstep * self.zoomscale)
newvirty = int(self.maxHeight / newstep * self.zoomscale)
if printinfo:
print(f"OUT step {newstep} new scroll {newscrollx}, {newscrolly} virt {newvirtx}, {newvirty} q {q}")
self.SetScrollbars(
int(newstep), int(newstep),
int(newvirtx), int(newvirty), # new virtual size
int(newscrollx), int(newscrolly), # new scroll positions
noRefresh=True)
# self.Refresh()
if printinfo:
print(self.GetVirtualSize())
def onKeyChar(self, event):
if event.GetKeyCode() >= 256:
event.Skip()
return
if self.working:
event.Skip()
return
self.working = True
keycode = chr(event.GetKeyCode())
# print("imgkeycode", keycode)
if keycode == "a":
self.SetScrollRateSmart(newstep=None, printinfo=True)
# elif keycode in ["1", "2", "3", "4", "5", "6", "7", "8"]:
# todisplay = ord(keycode) - ord("1")
# self.snapshot_mgr.Restore(todisplay) # snapshot 1 becomes 0 as a param
# self.mega_refresh()
elif keycode == "d":
self.SetScrollRateSmart(newstep=20, printinfo=True)
elif keycode == "s":
self.SetScrollRateSmart(newstep=1, printinfo=True)
elif keycode == "e":
self.clear_pen_lines()
self.working = False
def onKeyPress(self, event): # ANDY
keycode = event.GetKeyCode()
# dbg(keycode)
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
if keycode == wx.WXK_ESCAPE:
if self.working_fetching:
frame = self.GetTopLevelParent()
frame.SetStatusText("ESC key detected: PlantUML render Aborted")
wx.PostEvent(frame, CancelRefreshPlantUmlEvent())
self.user_aborted()
else:
if self.plantuml_text:
self.error_msg = "" # clear any annoying error message, so can see bmp
self.Refresh()
else:
self.error_msg = PLANTUML_VIEW_INITAL_HELP
# if self.working:
# event.Skip()
# return
# self.working = True
#
# keycode = event.GetKeyCode() # http://www.wxpython.org/docs/api/wx.KeyEvent-class.html
# self.working = False
event.Skip()
def onKeyUp(self, event): # ANDY
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
event.Skip()
def getWidth(self):
return self.maxWidth
def getHeight(self):
return self.maxHeight
def OnErase(self, event): # ANDY
pass
def OnWheelScroll(self, event):
## This is an example of what to do for the EVT_MOUSEWHEEL event,
## but since wx.ScrolledWindow does this already it's not
## necessary to do it ourselves.
#
# ANDY
# But since I set the ScrollRate to 1
# in order for panning to work nicely
# scrolling is too slow. So invoke this code!!
#
# dbg(f"OnWheelScroll {self.GetScrollPixelsPerUnit()}")
if event.ControlDown():
event.Skip()
return
# Version 1 - too jumpy
# self.SetScrollRate(20, 20)
# Version 2 - nicer, but need a common routine callable from multiple places
#
# oldscrollx = self.GetScrollPos(wx.HORIZONTAL)
# oldscrolly = self.GetScrollPos(wx.VERTICAL)
# # dbg(oldscrollx)
# # dbg(oldscrolly)
#
# # How to adjust ? take into account the 1 to 20 factor, as well as the zoom level
# delta = event.GetWheelDelta()
# rot = event.GetWheelRotation()
# # dbg(delta)
# # dbg(rot)
# # if rot > 0:
# if self.GetScrollPixelsPerUnit()[0] != 20:
# dbg(self.GetScrollPixelsPerUnit())
#
# # dbg(oldscrollx)
# # dbg(oldscrolly)
# oldscrollx = oldscrollx /20#- (1 * rot) / 20 * self.zoomscale
# oldscrolly = oldscrolly /20#- (1 * rot) / 20 * self.zoomscale
# # oldscrollx = oldscrollx + 20 * rot / self.zoomscale
# # oldscrolly = oldscrolly + 20 * rot / self.zoomscale
# # dbg(oldscrollx)
# # dbg(oldscrolly)
#
# self.SetScrollbars(
# 20, 20, # each scroll unit is 1 pixel, meaning scroll units match client coord units
# self.GetVirtualSize()[0] / 20, self.GetVirtualSize()[1] / 20, # new virtual size
# oldscrollx, oldscrolly, # new scroll positions
# noRefresh=True
# )
# Version 3
self.SetScrollRateSmart(20)
# Old version 0 - complex and buggy and jumpy
#
# delta = event.GetWheelDelta()
# rot = event.GetWheelRotation()
# linesPer = event.GetLinesPerAction()
# # print delta, rot, linesPer
# linesPer *= 20 # ANDY trick to override the small ScrollRate
# ws = self.mywheelscroll
# ws = ws + rot
# lines = ws / delta
# ws = ws - lines * delta
# self.mywheelscroll = ws
# if lines != 0:
# lines = lines * linesPer
# vsx, vsy = self.GetViewStart()
# scrollTo = vsy - lines
# self.Scroll(-1, scrollTo)
event.Skip()
def OnResize(self, event): # ANDY interesting - GetVirtualSize grows when resize frame
self.DebugSizez("resize")
if self.NeedToClear() and self.IsShownOnScreen():
self.clear_whole_window = True
self.Refresh()
def CalcVirtSize(self):
# VirtualSize is essentially the visible picture
return (self.maxWidth * self.zoomscale, self.maxHeight * self.zoomscale)
def NeedToClear(self):
# Since VirtualSize auto grows when resize frame, can't rely on it to know if client area is bigger than visible pic.
# Need to rederive the original VirtualSize set when zoom calculated rather than relying on calls to self.GetVirtualSize()
return (
self.GetClientSize()[0] > self.CalcVirtSize()[0]
or self.GetClientSize()[1] > self.CalcVirtSize()[1]
)
def DebugSizez(self, fromwheremsg):
return
if self.NeedToClear():
msg = "!!!!!!! "
else:
msg = "! "
print(
msg
+ "(%s) visible %d NeedToClear %s GetVirtualSize %d getWidth %d GetClientSize %d self.GetViewStart() %d self.maxWidth %d "
% (
fromwheremsg,
self.IsShownOnScreen(),
self.NeedToClear(),
self.GetVirtualSize()[0],
self.getWidth(),
self.GetClientSize()[0],
self.GetViewStart()[0],
self.maxWidth,
)
)
def OnPaint(self, event): # ANDY
dc = wx.PaintDC(self)
self.PrepareDC(dc)
dc.SetUserScale(self.zoomscale, self.zoomscale)
# since we're not buffering in this case, we have to
# paint the whole window, potentially very time consuming.
self.DoDrawing(dc)
def Redraw(self, dc):
self.DoDrawing(dc)
def OnLeftDown(self, event): # ANDY some PAN ideas from http://code.google.com/p/pyslip/
"""Left mouse button down. Prepare for possible drag."""
if event.ShiftDown():
|
click_posn = event.GetPosition()
self.SetCursor(wx.Cursor(wx.CURSOR_HAND))
(self.last_drag_x, self.last_drag_y) = click_posn
event.Skip()
def OnLeftUp(self, event): # ANDY PAN
"""Left mouse button up."""
if event.ShiftDown():
event.Skip()
return
self.last_drag_x = self.last_drag_y = None
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
# turn off drag
self.was_dragging = False
# force PAINT event to remove selection box (if required)
# self.Update()
event.Skip()
def OnMove(self, event): # ANDY PAN
"""Handle a mouse move (map drag).
event the mouse move event
"""
if event.ShiftDown():
event.Skip()
return
# for windows, set focus onto pyslip window
# linux seems to do this automatically
if sys.platform == "win32" and self.FindFocus() != self:
self.SetFocus()
# get current mouse position
(x, y) = event.GetPosition()
# from common.architecture_support import whoscalling2
# dbg(whoscalling2())
# self.RaiseMousePositionEvent((x, y))
if event.Dragging() and event.LeftIsDown():
# are we doing box select?
if not self.last_drag_x is None:
# no, just a map drag
self.was_dragging = True
dx = self.last_drag_x - x
dy = self.last_drag_y - y
# dx /= 20
# dy /= 20
# dbg(dx)
# dbg(dy)
# print "PAN %d %d" % (dx, dy)
# print self.GetViewStart()
currx, curry = self.GetViewStart()
self.Scroll(
currx + dx, curry + dy
) # Note The positions are in scroll units, not pixels, so to convert to pixels you will have to multiply by the number of pixels per scroll increment. If either parameter is -1, that position will be ignored (no change in that direction).
# print "Scroll pan %d %d" % (currx+dx, curry+dy)
# adjust remembered X,Y
self.last_drag_x = x
self.last_drag_y = y
# redraw client area
self.Update()
def DoDrawing(self, dc, printing=False):
# dbg(f"DoDrawing {len(self.curLine)}")
if self.clear_whole_window:
dc.SetBackground(wx.Brush(self.GetBackgroundColour()))
dc.Clear()
self.clear_whole_window = False
if self.bmp:
dc.DrawBitmap(self.bmp, 0, 0, False) # false means don't use mask
# dc.SetTextForeground('BLUE')
# text = "UML via Pynsource and PlantUML"
# dc.DrawText(text, 2, 2)
self.DrawSavedLines(dc)
if self.error_msg:
dc.DrawText(self.error_msg, 2, 2)
if self.fetching_msg and self.time_taken_fetching > 0.5:
"""
Text is never drawn with the current pen. It's drawn with the current
text color. Try
dc.SetTextForeground((255,255,0))
This is a historical implementation detail in Windows GDI. The pen is
used for lines, the brush is used for fills, and text had its own
attributes.
"""
# dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
# dc.SetBrush(wx.Brush("RED"))
# dc.SetTextForeground((204, 0, 0)) # red
# dc.SetTextForeground((204, 102, 0)) # dark orange
dc.SetTextForeground((255, 255, 255)) # white
dc.SetTextBackground((0, 0, 0)) # black
dc.SetBackgroundMode(wx.SOLID)
dc.DrawText(self.fetching_msg, 2, 2)
def DrawSavedLines(self, dc): # PEN DRAWING
dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
for line in self.lines:
for coords in line:
dc.DrawLine(*map(int, coords))
def OnClearPenLines(self, event):
self.clear_pen_lines()
def clear_pen_lines(self):
self.lines = []
self.Refresh()
def SetXY(self, event): # PEN DRAWING
self.x, self.y = self.ConvertEventCoords(event)
def ConvertEventCoords(self, event): # PEN DRAWING
newpos = self.CalcUnscrolledPosition(event.GetX(), event.GetY())
newpos = (
newpos[0] * self.GetScaleX() / self.zoomscale,
newpos[1] * self.GetScaleY() / self.zoomscale,
)
return newpos
def OnRightButtonEvent(self, event): # PEN DRAWING - ANDY
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
self.clear_pen_lines()
event.Skip()
def OnLeftButtonEvent(self, event): # PEN DRAWING
if event.ShiftDown():
self.SetCursor(wx.Cursor(wx.CURSOR_PENCIL))
if event.LeftDown():
# self.SetScrollRate(1, 1) # works to slow the scrolling, but causes scroll jump to 0,0
self.SetScrollRateSmart(1) # smoother pan when scroll step is 1
# dbg(f"LeftDown {self.GetScrollPixelsPerUnit()}")
self.SetFocus()
self.SetXY(event)
self.curLine = []
self.CaptureMouse()
self.drawing = True
elif event.Dragging() and self.drawing:
# print("dragging.....")
# ANDY UPDATE 2019 - Drawing to wx.ClientDC doesn't work well these days and you only
# see the result when an on paint occurs much later - and often cannot force the paint?
# instead, issue a Refresh() which triggers a paint, and draw there instead.
coords = (self.x, self.y) + self.ConvertEventCoords(event)
self.curLine.append(coords)
self.lines.append(self.curLine)
self.curLine = []
self.SetXY(event) # reset line drawing start point to current mouse pos
self.Refresh()
# Version 0. Old version.draw directly to a wx.ClientDC
# dc = wx.ClientDC(self)
# self.PrepareDC(dc)
# dc.SetUserScale(self.zoomscale, self.zoomscale)
#
# dc.SetPen(wx.Pen("MEDIUM FOREST GREEN", 4))
# coords = (self.x, self.y) + self.ConvertEventCoords(event)
# self.curLine.append(coords) # For when we are not double buffering #ANDY
# dc.DrawLine(*coords)
# self.SetXY(event)
#
# Failed Hacks to try and make version 0 work.
#
# self.Refresh() # ANDY added, pheonix
# # frame = self.GetTopLevelParent()
# # frame.Layout() # needed when running phoenix
# self.Update() # or wx.SafeYield() # Without this the nodes don't paint during a "L" layout (edges do!?)
# wx.SafeYield() # Needed on Mac to see result if in a compute loop.
# self.repaint_needed = True
elif event.LeftUp() and self.drawing:
self.lines.append(self.curLine)
self.curLine = []
self.ReleaseMouse()
self.drawing = False
self.SetCursor(wx.Cursor(wx.CURSOR_DEFAULT))
self.Refresh() # ANDY added, pheonix
class TestFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
ImageViewer(self)
class App(wx.App):
def OnInit(self):
frame = TestFrame(None, title="Andy Image Viewer")
frame.Show(True)
frame.Centre()
return True
if __name__ == "__main__":
app = App(0)
app.MainLoop()
| event.Skip()
return | conditional_block |
api_op_UpdateAnomalySubscription.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package costexplorer
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/costexplorer/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Updates an existing cost anomaly subscription. Specify the fields that you want
// to update. Omitted fields are unchanged. The JSON below describes the generic
// construct for each type. See Request Parameters (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_UpdateAnomalySubscription.html#API_UpdateAnomalySubscription_RequestParameters)
// for possible values as they apply to AnomalySubscription .
func (c *Client) UpdateAnomalySubscription(ctx context.Context, params *UpdateAnomalySubscriptionInput, optFns ...func(*Options)) (*UpdateAnomalySubscriptionOutput, error) {
if params == nil {
params = &UpdateAnomalySubscriptionInput{}
}
result, metadata, err := c.invokeOperation(ctx, "UpdateAnomalySubscription", params, optFns, c.addOperationUpdateAnomalySubscriptionMiddlewares)
if err != nil {
return nil, err
}
out := result.(*UpdateAnomalySubscriptionOutput)
out.ResultMetadata = metadata
return out, nil
}
type UpdateAnomalySubscriptionInput struct {
// A cost anomaly subscription Amazon Resource Name (ARN).
//
// This member is required.
SubscriptionArn *string
// The update to the frequency value that subscribers receive notifications.
Frequency types.AnomalySubscriptionFrequency
// A list of cost anomaly monitor ARNs.
MonitorArnList []string
// The update to the subscriber list.
Subscribers []types.Subscriber
// The new name of the subscription.
SubscriptionName *string
// (deprecated) The update to the threshold value for receiving notifications.
// This field has been deprecated. To update a threshold, use ThresholdExpression.
// Continued use of Threshold will be treated as shorthand syntax for a
// ThresholdExpression. You can specify either Threshold or ThresholdExpression,
// but not both.
//
// Deprecated: Threshold has been deprecated in favor of ThresholdExpression
Threshold *float64
// The update to the Expression (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html)
// object used to specify the anomalies that you want to generate alerts for. This
// supports dimensions and nested expressions. The supported dimensions are
// ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE ,
// corresponding to an anomaly’s TotalImpact and TotalImpactPercentage,
// respectively (see Impact (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Impact.html)
// for more details). The supported nested expression types are AND and OR . The
// match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between
// 0 and 10,000,000,000 in string format. You can specify either Threshold or
// ThresholdExpression, but not both. The following are examples of valid
// ThresholdExpressions:
// - Absolute threshold: { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }
// - Percentage threshold: { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }
// - AND two thresholds together: { "And": [ { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }, { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } } ] }
// - OR two thresholds together: { "Or": [ { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }, { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } } ] }
ThresholdExpression *types.Expression
noSmithyDocumentSerde
}
type UpdateAnomalySubscriptionOutput struct {
// A cost anomaly subscription ARN.
//
// This member is required.
SubscriptionArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateAnomalySubscriptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateAnomalySubscription{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateAnomalySubscription{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
| if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateAnomalySubscriptionResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateAnomalySubscriptionValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAnomalySubscription(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateAnomalySubscription(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ce",
OperationName: "UpdateAnomalySubscription",
}
}
type opUpdateAnomalySubscriptionResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateAnomalySubscriptionResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateAnomalySubscriptionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "ce"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "ce"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("ce")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addUpdateAnomalySubscriptionResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opUpdateAnomalySubscriptionResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| return err
}
| conditional_block |
api_op_UpdateAnomalySubscription.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package costexplorer
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/costexplorer/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Updates an existing cost anomaly subscription. Specify the fields that you want
// to update. Omitted fields are unchanged. The JSON below describes the generic
// construct for each type. See Request Parameters (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_UpdateAnomalySubscription.html#API_UpdateAnomalySubscription_RequestParameters)
// for possible values as they apply to AnomalySubscription .
func (c *Client) UpdateAnomalySubscription(ctx context.Context, params *UpdateAnomalySubscriptionInput, optFns ...func(*Options)) (*UpdateAnomalySubscriptionOutput, error) {
if params == nil {
params = &UpdateAnomalySubscriptionInput{}
}
result, metadata, err := c.invokeOperation(ctx, "UpdateAnomalySubscription", params, optFns, c.addOperationUpdateAnomalySubscriptionMiddlewares)
if err != nil {
return nil, err
}
out := result.(*UpdateAnomalySubscriptionOutput)
out.ResultMetadata = metadata
return out, nil
}
type UpdateAnomalySubscriptionInput struct {
// A cost anomaly subscription Amazon Resource Name (ARN).
//
// This member is required.
SubscriptionArn *string
// The update to the frequency value that subscribers receive notifications.
Frequency types.AnomalySubscriptionFrequency
// A list of cost anomaly monitor ARNs.
MonitorArnList []string
// The update to the subscriber list.
Subscribers []types.Subscriber
// The new name of the subscription.
SubscriptionName *string
// (deprecated) The update to the threshold value for receiving notifications.
// This field has been deprecated. To update a threshold, use ThresholdExpression.
// Continued use of Threshold will be treated as shorthand syntax for a
// ThresholdExpression. You can specify either Threshold or ThresholdExpression,
// but not both.
//
// Deprecated: Threshold has been deprecated in favor of ThresholdExpression
Threshold *float64
// The update to the Expression (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html)
// object used to specify the anomalies that you want to generate alerts for. This
// supports dimensions and nested expressions. The supported dimensions are
// ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE ,
// corresponding to an anomaly’s TotalImpact and TotalImpactPercentage,
// respectively (see Impact (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Impact.html)
// for more details). The supported nested expression types are AND and OR . The
// match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between
// 0 and 10,000,000,000 in string format. You can specify either Threshold or
// ThresholdExpression, but not both. The following are examples of valid
// ThresholdExpressions:
// - Absolute threshold: { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }
// - Percentage threshold: { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }
// - AND two thresholds together: { "And": [ { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }, { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } } ] }
// - OR two thresholds together: { "Or": [ { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }, { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } } ] }
ThresholdExpression *types.Expression
noSmithyDocumentSerde
}
type UpdateAnomalySubscriptionOutput struct {
// A cost anomaly subscription ARN.
//
// This member is required.
SubscriptionArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateAnomalySubscriptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateAnomalySubscription{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateAnomalySubscription{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateAnomalySubscriptionResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateAnomalySubscriptionValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAnomalySubscription(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateAnomalySubscription(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ce",
OperationName: "UpdateAnomalySubscription",
}
}
type opUpdateAnomalySubscriptionResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateAnomalySubscriptionResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateAnomalySubscriptionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "ce"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "ce"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("ce")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func ad | tack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opUpdateAnomalySubscriptionResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| dUpdateAnomalySubscriptionResolveEndpointMiddleware(s | identifier_name |
api_op_UpdateAnomalySubscription.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package costexplorer
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/costexplorer/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Updates an existing cost anomaly subscription. Specify the fields that you want
// to update. Omitted fields are unchanged. The JSON below describes the generic
// construct for each type. See Request Parameters (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_UpdateAnomalySubscription.html#API_UpdateAnomalySubscription_RequestParameters)
// for possible values as they apply to AnomalySubscription .
func (c *Client) UpdateAnomalySubscription(ctx context.Context, params *UpdateAnomalySubscriptionInput, optFns ...func(*Options)) (*UpdateAnomalySubscriptionOutput, error) {
if params == nil {
params = &UpdateAnomalySubscriptionInput{}
}
result, metadata, err := c.invokeOperation(ctx, "UpdateAnomalySubscription", params, optFns, c.addOperationUpdateAnomalySubscriptionMiddlewares)
if err != nil {
return nil, err
}
out := result.(*UpdateAnomalySubscriptionOutput)
out.ResultMetadata = metadata
return out, nil
}
type UpdateAnomalySubscriptionInput struct {
// A cost anomaly subscription Amazon Resource Name (ARN).
//
// This member is required.
SubscriptionArn *string
// The update to the frequency value that subscribers receive notifications.
Frequency types.AnomalySubscriptionFrequency
// A list of cost anomaly monitor ARNs.
MonitorArnList []string
// The update to the subscriber list.
Subscribers []types.Subscriber
// The new name of the subscription.
SubscriptionName *string
// (deprecated) The update to the threshold value for receiving notifications.
// This field has been deprecated. To update a threshold, use ThresholdExpression.
// Continued use of Threshold will be treated as shorthand syntax for a
// ThresholdExpression. You can specify either Threshold or ThresholdExpression,
// but not both.
//
// Deprecated: Threshold has been deprecated in favor of ThresholdExpression
Threshold *float64
// The update to the Expression (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html)
// object used to specify the anomalies that you want to generate alerts for. This
// supports dimensions and nested expressions. The supported dimensions are
// ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE ,
// corresponding to an anomaly’s TotalImpact and TotalImpactPercentage,
// respectively (see Impact (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Impact.html)
// for more details). The supported nested expression types are AND and OR . The
// match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between
// 0 and 10,000,000,000 in string format. You can specify either Threshold or
// ThresholdExpression, but not both. The following are examples of valid
// ThresholdExpressions:
// - Absolute threshold: { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }
// - Percentage threshold: { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }
// - AND two thresholds together: { "And": [ { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }, { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } } ] }
// - OR two thresholds together: { "Or": [ { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }, { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } } ] }
ThresholdExpression *types.Expression
noSmithyDocumentSerde
}
type UpdateAnomalySubscriptionOutput struct {
// A cost anomaly subscription ARN.
//
// This member is required.
SubscriptionArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateAnomalySubscriptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateAnomalySubscription{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateAnomalySubscription{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err | }
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateAnomalySubscriptionResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateAnomalySubscriptionValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAnomalySubscription(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateAnomalySubscription(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ce",
OperationName: "UpdateAnomalySubscription",
}
}
type opUpdateAnomalySubscriptionResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateAnomalySubscriptionResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateAnomalySubscriptionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "ce"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "ce"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("ce")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addUpdateAnomalySubscriptionResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
return stack.Serialize.Insert(&opUpdateAnomalySubscriptionResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
} | random_line_split | |
api_op_UpdateAnomalySubscription.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package costexplorer
import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/service/costexplorer/types"
smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Updates an existing cost anomaly subscription. Specify the fields that you want
// to update. Omitted fields are unchanged. The JSON below describes the generic
// construct for each type. See Request Parameters (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_UpdateAnomalySubscription.html#API_UpdateAnomalySubscription_RequestParameters)
// for possible values as they apply to AnomalySubscription .
func (c *Client) UpdateAnomalySubscription(ctx context.Context, params *UpdateAnomalySubscriptionInput, optFns ...func(*Options)) (*UpdateAnomalySubscriptionOutput, error) {
if params == nil {
params = &UpdateAnomalySubscriptionInput{}
}
result, metadata, err := c.invokeOperation(ctx, "UpdateAnomalySubscription", params, optFns, c.addOperationUpdateAnomalySubscriptionMiddlewares)
if err != nil {
return nil, err
}
out := result.(*UpdateAnomalySubscriptionOutput)
out.ResultMetadata = metadata
return out, nil
}
type UpdateAnomalySubscriptionInput struct {
// A cost anomaly subscription Amazon Resource Name (ARN).
//
// This member is required.
SubscriptionArn *string
// The update to the frequency value that subscribers receive notifications.
Frequency types.AnomalySubscriptionFrequency
// A list of cost anomaly monitor ARNs.
MonitorArnList []string
// The update to the subscriber list.
Subscribers []types.Subscriber
// The new name of the subscription.
SubscriptionName *string
// (deprecated) The update to the threshold value for receiving notifications.
// This field has been deprecated. To update a threshold, use ThresholdExpression.
// Continued use of Threshold will be treated as shorthand syntax for a
// ThresholdExpression. You can specify either Threshold or ThresholdExpression,
// but not both.
//
// Deprecated: Threshold has been deprecated in favor of ThresholdExpression
Threshold *float64
// The update to the Expression (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Expression.html)
// object used to specify the anomalies that you want to generate alerts for. This
// supports dimensions and nested expressions. The supported dimensions are
// ANOMALY_TOTAL_IMPACT_ABSOLUTE and ANOMALY_TOTAL_IMPACT_PERCENTAGE ,
// corresponding to an anomaly’s TotalImpact and TotalImpactPercentage,
// respectively (see Impact (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_Impact.html)
// for more details). The supported nested expression types are AND and OR . The
// match option GREATER_THAN_OR_EQUAL is required. Values must be numbers between
// 0 and 10,000,000,000 in string format. You can specify either Threshold or
// ThresholdExpression, but not both. The following are examples of valid
// ThresholdExpressions:
// - Absolute threshold: { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }
// - Percentage threshold: { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }
// - AND two thresholds together: { "And": [ { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }, { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } } ] }
// - OR two thresholds together: { "Or": [ { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_ABSOLUTE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } }, { "Dimensions": { "Key":
// "ANOMALY_TOTAL_IMPACT_PERCENTAGE", "MatchOptions": [ "GREATER_THAN_OR_EQUAL" ],
// "Values": [ "100" ] } } ] }
ThresholdExpression *types.Expression
noSmithyDocumentSerde
}
type UpdateAnomalySubscriptionOutput struct {
// A cost anomaly subscription ARN.
//
// This member is required.
SubscriptionArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationUpdateAnomalySubscriptionMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateAnomalySubscription{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateAnomalySubscription{}, middleware.After)
if err != nil {
return err
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addUpdateAnomalySubscriptionResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addOpUpdateAnomalySubscriptionValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAnomalySubscription(options.Region), middleware.Before); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opUpdateAnomalySubscription(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ce",
OperationName: "UpdateAnomalySubscription",
}
}
type opUpdateAnomalySubscriptionResolveEndpointMiddleware struct {
EndpointResolver EndpointResolverV2
BuiltInResolver builtInParameterResolver
}
func (*opUpdateAnomalySubscriptionResolveEndpointMiddleware) ID() string {
return "ResolveEndpointV2"
}
func (m *opUpdateAnomalySubscriptionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
return next.HandleSerialize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if m.EndpointResolver == nil {
return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
}
params := EndpointParameters{}
m.BuiltInResolver.ResolveBuiltIns(¶ms)
var resolvedEndpoint smithyendpoints.Endpoint
resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params)
if err != nil {
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
req.URL = &resolvedEndpoint.URI
for k := range resolvedEndpoint.Headers {
req.Header.Set(
k,
resolvedEndpoint.Headers.Get(k),
)
}
authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties)
if err != nil {
var nfe *internalauth.NoAuthenticationSchemesFoundError
if errors.As(err, &nfe) {
// if no auth scheme is found, default to sigv4
signingName := "ce"
signingRegion := m.BuiltInResolver.(*builtInResolver).Region
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
}
var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError
if errors.As(err, &ue) {
return out, metadata, fmt.Errorf(
"This operation requests signer version(s) %v but the client only supports %v",
ue.UnsupportedSchemes,
internalauth.SupportedSchemes,
)
}
}
for _, authScheme := range authSchemes {
switch authScheme.(type) {
case *internalauth.AuthenticationSchemeV4:
v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4)
var signingName, signingRegion string
if v4Scheme.SigningName == nil {
signingName = "ce"
} else {
signingName = *v4Scheme.SigningName
}
if v4Scheme.SigningRegion == nil {
signingRegion = m.BuiltInResolver.(*builtInResolver).Region
} else {
signingRegion = *v4Scheme.SigningRegion
}
if v4Scheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion)
break
case *internalauth.AuthenticationSchemeV4A:
v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A)
if v4aScheme.SigningName == nil {
v4aScheme.SigningName = aws.String("ce")
}
if v4aScheme.DisableDoubleEncoding != nil {
// The signer sets an equivalent value at client initialization time.
// Setting this context value will cause the signer to extract it
// and override the value set at client initialization time.
ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding)
}
ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName)
ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0])
break
case *internalauth.AuthenticationSchemeNone:
break
}
}
return next.HandleSerialize(ctx, in)
}
func addUpdateAnomalySubscriptionResolveEndpointMiddleware(stack *middleware.Stack, options Options) error {
| return stack.Serialize.Insert(&opUpdateAnomalySubscriptionResolveEndpointMiddleware{
EndpointResolver: options.EndpointResolverV2,
BuiltInResolver: &builtInResolver{
Region: options.Region,
UseDualStack: options.EndpointOptions.UseDualStackEndpoint,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint,
Endpoint: options.BaseEndpoint,
},
}, "ResolveEndpoint", middleware.After)
}
| identifier_body | |
post_processing_gw.py | #------------------------------------------------------------
# Copyright 2016 Congduc Pham, University of Pau, France.
#
# Congduc.Pham@univ-pau.fr
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------
# IMPORTANT
# Parts that can be modified are identified with
#////////////////////////////////////////////////////////////
# TEXT
# END
#////////////////////////////////////////////////////////////
import sys
import select
import threading
from threading import Timer
import time
import datetime
import getopt
import os
import json
import re
#////////////////////////////////////////////////////////////
# ADD HERE BOOLEAN VARIABLES TO SUPPORT OTHER CLOUDS
# OR VARIABLES FOR YOUR OWN NEEDS
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#with firebase support?
#------------------------------------------------------------
_firebase=False
#------------------------------------------------------------
#with thingspeak support?
#------------------------------------------------------------
_thingspeak=False
#plot snr instead of seq
_thingspeaksnr=False
#------------------------------------------------------------
#with sensorcloud support?
#------------------------------------------------------------
_sensorcloud=False
#------------------------------------------------------------
#with grovestreams support?
#------------------------------------------------------------
_grovestreams=False
#------------------------------------------------------------
#with fiware support?
#------------------------------------------------------------
_fiware=False
#////////////////////////////////////////////////////////////
# ADD HERE APP KEYS THAT YOU WANT TO ALLOW FOR YOUR GATEWAY
#////////////////////////////////////////////////////////////
# NOTE: the format of the application key list has changed from
# a list of list, to a list of string that will be process as
# a byte array. Doing so wilL allow for dictionary construction
# using the appkey to retrieve information such as encryption key,...
app_key_list = [
#for testing
'****',
#change here your application key
'\x01\x02\x03\x04',
'\x05\x06\x07\x08'
]
#////////////////////////////////////////////////////////////
#FOR AES DECRYPTION
#////////////////////////////////////////////////////////////
#put your key here, should match the end-device's key
aes_key="0123456789010123"
#put your initialisation vector here, should match the end-device's initialisation vector
aes_iv="\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
#aes_iv="\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
#association between appkey and aes_key
appkey_aeskey = {
'\x01\x02\x03\x04':"0123456789010123",
'\x05\x06\x07\x08':"0123456789010123"
}
#association between appkey and aes_iv
appkey_aesiv = {
'\x01\x02\x03\x04':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00",
'\x05\x06\x07\x08':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
}
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#header packet information
#------------------------------------------------------------
HEADER_SIZE=4
APPKEY_SIZE=4
PKT_TYPE_DATA=0x10
PKT_TYPE_ACK=0x20
PKT_FLAG_ACK_REQ=0x08
PKT_FLAG_DATA_ENCRYPTED=0x04
PKT_FLAG_DATA_WAPPKEY=0x02
PKT_FLAG_DATA_ISBINARY=0x01
#------------------------------------------------------------
#last pkt information
#------------------------------------------------------------
dst=0
ptype=0
ptypestr="N/A"
src=0
seq=0
datalen=0
SNR=0
RSSI=0
bw=0
cr=0
sf=0
#------------------------------------------------------------
#------------------------------------------------------------
#will ignore lines beginning with '?'
#------------------------------------------------------------
_ignoreComment=1
#------------------------------------------------------------
#with mongoDB support?
#------------------------------------------------------------
_mongodb = False
#------------------------------------------------------------
#log gateway message?
#------------------------------------------------------------
_logGateway=0
#------------------------------------------------------------
#raw output from gateway?
#------------------------------------------------------------
_rawFormat=0
#------------------------------------------------------------
_ourcustomFormat=0;
_lorawanFormat=0
#------------------------------------------------------------
#------------------------------------------------------------
#check for app key?
#------------------------------------------------------------
_wappkey=0
#------------------------------------------------------------
the_app_key = '\x00\x00\x00\x00'
#valid app key? by default we do not check for the app key
_validappkey=1
#------------------------------------------------------------
#for local AES decrypting
#------------------------------------------------------------
_aes=0
_hasClearData=0
#------------------------------------------------------------
#open json file to recover gateway_address
#------------------------------------------------------------
f = open(os.path.expanduser("local_conf.json"),"r")
lines = f.readlines()
f.close()
array = ""
#get all the lines in a string
for line in lines :
array += line
#change it into a python array
json_array = json.loads(array)
#set the gateway_address for having different log filenames
_gwaddr = json_array["gateway_conf"]["gateway_ID"]
#////////////////////////////////////////////////////////////
# CHANGE HERE THE VARIOUS PATHS FOR YOUR LOG FILES
#////////////////////////////////////////////////////////////
_folder_path = "/home/pi/Dropbox/LoRa-test/"
_gwlog_filename = _folder_path+"gateway_"+str(_gwaddr)+".log"
_telemetrylog_filename = _folder_path+"telemetry_"+str(_gwaddr)+".log"
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#initialize gateway DHT22 sensor
#------------------------------------------------------------
_gw_dht22 = json_array["gateway_conf"]["dht22"]
_date_save_dht22 = None
if(_gw_dht22):
print "Use DHT22 to get gateway temperature and humidity level"
#read values from dht22 in the gateway box
sys.path.insert(0, os.path.expanduser('./sensors_in_raspi/dht22'))
from read_dht22 import get_dht22_values
_temperature = 0
_humidity = 0
# retrieve dht22 values
def save_dht22_values():
global _temperature, _humidity, _date_save_dht22
_humidity, _temperature = get_dht22_values()
_date_save_dht22 = datetime.datetime.utcnow()
print "Gateway TC : "+_temperature+" C | HU : "+_humidity+" % at "+str(_date_save_dht22)
#save values from the gateway box's DHT22 sensor, if _mongodb is true
if(_mongodb):
#saving data in a JSON var
str_json_data = "{\"th\":"+_temperature+", \"hu\":"+_humidity+"}"
#creating document to add
doc = {
"type" : "DATA_GW_DHT22",
"gateway_eui" : _gwaddr,
"node_eui" : "gw",
"snr" : "",
"rssi" : "",
"cr" : "",
"datarate" : "",
"time" : _date_save_dht22,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
def dht22_target():
while True:
print "Getting gateway temperature"
save_dht22_values()
sys.stdout.flush()
global _gw_dht22
time.sleep(_gw_dht22)
#------------------------------------------------------------
#for managing the input data when we can have aes encryption
#------------------------------------------------------------
_linebuf="the line buffer"
_linebuf_idx=0
_has_linebuf=0
def getSingleChar():
global _has_linebuf
# if we have a valid _linebuf then read from _linebuf
if _has_linebuf==1:
global _linebuf_idx
global _linebuf
if _linebuf_idx < len(_linebuf):
_linebuf_idx = _linebuf_idx + 1
return _linebuf[_linebuf_idx-1]
else:
# no more character from _linebuf, so read from stdin
_has_linebuf = 0
return sys.stdin.read(1)
else:
return sys.stdin.read(1)
def getAllLine():
global _linebuf_idx
p=_linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 0
global _linebuf
# return the remaining of the string and clear the _linebuf
return _linebuf[p:]
def fillLinebuf(n):
global _linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 1
global _linebuf
# fill in our _linebuf from stdin
_linebuf=sys.stdin.read(n)
#////////////////////////////////////////////////////////////
# ADD HERE OPTIONS THAT YOU MAY WANT TO ADD
# BE CAREFUL, IT IS NOT ADVISED TO REMOVE OPTIONS UNLESS YOU
# REALLY KNOW WHAT YOU ARE DOING
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#for parsing the options
#------------------------------------------------------------
def main(argv):
try:
opts, args = getopt.getopt(argv,'iftLam:',[\
'ignorecomment',\
'firebase',\
'thingspeak',\
'retrythsk',\
'thingspeaksnr',\
'fiware',\
'sensorcloud',\
'grovestreams',\
'loggw',\
'addr',\
'wappkey',\
'raw',\
'aes',\
'mongodb'])
except getopt.GetoptError:
print 'post_processing_gw '+\
'-i/--ignorecomment '+\
'-f/--firebase '+\
'-t/--thingspeak '+\
'--retrythsk '+\
'--thingspeaksnr '+\
'--fiware '+\
'--sensorcloud '+\
'--grovestreams '+\
'-L/--loggw '+\
'-a/--addr '+\
'--wappkey '+\
'--raw '+\
'--aes '+\
'-m/--mongodb'
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--ignorecomment"):
print("will ignore commented lines")
global _ignoreComment
_ignoreComment = 1
elif opt in ("-f", "--firebase"):
print("will enable firebase support")
global _firebase
_firebase = True
global firebase_uploadSingleData
from FireBase import firebase_uploadSingleData
elif opt in ("-t", "--thingspeak"):
print("will enable thingspeak support")
global _thingspeak
_thingspeak = True
global thingspeak_uploadSingleData, thingspeak_uploadMultipleData
from ThingSpeak import thingspeak_uploadSingleData, thingspeak_uploadMultipleData
elif opt in ("--retrythsk"):
print("will enable thingspeak retry")
global thingspeak_setRetry
from ThingSpeak import thingspeak_setRetry
#set retry to True
thingspeak_setRetry(True)
elif opt in ("--thingspeaksnr"):
print("will plot snr instead of seq")
global _thingspeaksnr
_thingspeaksnr = True
elif opt in ("--fiware"):
print("will enable fiware support")
global _fiware
_fiware = True
elif opt in ("--sensorcloud"):
print("will enable sensorcloud support")
global _sensorcloud
_sensorcloud = True
global sensorcloud_uploadSingleData
from SensorCloud import sensorcloud_uploadSingleData
elif opt in ("--grovestreams"):
print("will enable grovestreams support")
global _grovestreams
_grovestreams = True
global grovestreams_uploadSingleData
from GroveStreams import grovestreams_uploadSingleData
elif opt in ("-L", "--loggw"):
print("will log gateway message prefixed by ^$")
global _logGateway
_logGateway = 1
elif opt in ("-a", "--addr"):
global _gwaddr
_gwaddr = arg
print("overwrite: will use _"+str(_gwaddr)+" for gateway and telemetry log files")
elif opt in ("--wappkey"):
global _wappkey
_wappkey = 1
global _validappkey
_validappkey=0
print("will check for correct app key")
elif opt in ("--raw"):
global _rawFormat
_rawFormat = 1
print("raw output from gateway. post_processing_gw will handle packet format")
elif opt in ("--aes"):
global _aes
_aes = 1
global AES
from Crypto.Cipher import AES
print("enable AES encrypted data")
elif opt in ("-m", "--mongodb"):
print("will enable local MongoDB support, max months to store is "+arg)
global _mongodb
_mongodb = True
global add_document, remove_if_new_month, mongodb_set_max_months
from MongoDB import add_document, remove_if_new_month, mongodb_set_max_months
#setting max months
mongodb_set_max_months(int(arg))
# END
#////////////////////////////////////////////////////////////
if __name__ == "__main__":
main(sys.argv[1:])
#gateway dht22
if (_gw_dht22):
print "Starting thread to measure gateway temperature"
t = threading.Thread(target=dht22_target)
t.daemon = True
t.start()
print "Current working directory: "+os.getcwd()
while True:
sys.stdout.flush()
ch = getSingleChar()
#expected prefixes
# ^p indicates a ctrl pkt info ^pdst(%d),ptype(%d),src(%d),seq(%d),len(%d),SNR(%d),RSSI=(%d) for the last received packet
# example: ^p1,16,3,0,234,8,-45
#
# ^r indicate a ctrl radio info ^rbw,cr,sf for the last received packet
# example: ^r500,5,12
#
# ^$ indicates an output (debug or log purposes) from the gateway that should be logged in the (Dropbox) gateway.log file
# example: ^$Set LoRa mode 4
#
# ^l indicates a ctrl LAS info ^lsrc(%d),type(%d)
# type is 1 for DSP_REG, 2 for DSP_INIT, 3 for DSP_UPDT, 4 for DSP_DATA
# example: ^l3,4
#
# \$ indicates a message that should be logged in the (Dropbox) telemetry.log file
# example: \$hello -> hello will be logged in the following format
# (src=3 seq=0 len=6 SNR=8 RSSI=-54) 2015-10-16T14:47:44.072230> hello
#
# \& indicates a message that should be logged in the firebase cloud database
# example: \&hello -> hello will be logged in json format
#
# \! indicates a message that should be logged on a thingspeak channel
# example: \!SGSH52UGPVAUYG3S#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at default field, i.e. field 1
# \!2#9.4 -> 9.4 will be logged in the default channel at field 2
# \!SGSH52UGPVAUYG3S#2#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at field 2
#
# you can log other information such as src, seq, len, SNR and RSSI on specific fields
#
# \xFF\xFE indicates radio data prefix
#
#
#------------------------------------------------------------
# '^' is reserved for control information from the gateway
#------------------------------------------------------------
if (ch=='^'):
now = datetime.datetime.utcnow()
ch=sys.stdin.read(1)
if (ch=='p'):
data = sys.stdin.readline()
print now.isoformat()
print "rcv ctrl pkt info (^p): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
dst=arr[0]
ptype=arr[1]
ptypestr="N/A"
if ((ptype & 0xF0)==PKT_TYPE_DATA):
ptypestr="DATA"
if (ptype & PKT_FLAG_DATA_ISBINARY)==PKT_FLAG_DATA_ISBINARY:
ptypestr = ptypestr + " IS_BINARY"
if (ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY:
ptypestr = ptypestr + " WAPPKEY"
if (ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED:
ptypestr = ptypestr + " ENCRYPTED"
if (ptype & PKT_FLAG_ACK_REQ)==PKT_FLAG_ACK_REQ:
ptypestr = ptypestr + " ACK_REQ"
if ((ptype & 0xF0)==PKT_TYPE_ACK):
ptypestr="ACK"
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
if (_rawFormat==0):
info_str="(dst=%d type=0x%.2X(%s) src=%d seq=%d len=%d SNR=%d RSSI=%d)" % (dst,ptype,ptypestr,src,seq,datalen,SNR,RSSI)
else:
info_str="rawFormat(len=%d SNR=%d RSSI=%d)" % (datalen,SNR,RSSI)
print info_str
# TODO: maintain statistics from received messages and periodically add these informations in the gateway.log file
if (ch=='r'):
data = sys.stdin.readline()
print "rcv ctrl radio info (^r): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
bw=arr[0]
cr=arr[1]
sf=arr[2]
info_str="(BW=%d CR=%d SF=%d)" % (bw,cr,sf)
print info_str
if (ch=='t'):
rcv_timestamp = sys.stdin.readline()
print "rcv timestamp (^t): "+rcv_timestamp
if (ch=='l'):
# TODO: LAS service
print 'not implemented yet'
if (ch=='$' and _logGateway==1):
data = sys.stdin.readline()
print "rcv gw output to log (^$): "+data,
f=open(os.path.expanduser(_gwlog_filename),"a")
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
continue
#------------------------------------------------------------
# '\' is reserved for message logging service
#------------------------------------------------------------
if (ch=='\\'):
now = datetime.datetime.utcnow()
if _validappkey==1:
print 'valid app key: accept data'
ch=getSingleChar()
if (ch=='$'): #log on Dropbox
data = getAllLine()
print "rcv msg to log (\$) on dropbox: "+data,
f=open(os.path.expanduser(_telemetrylog_filename),"a")
f.write(info_str+' ')
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
#/////////////////////////////////////////////////////////////
# YOU CAN MODIFY HERE HOW YOU WANT DATA TO BE PUSHED TO CLOUDS
# WE PROVIDE EXAMPLES FOR THINGSPEAK, GROVESTREAM
# IT IS ADVISED TO USE A SEPERATE PYTHON SCRIPT PER CLOUD
#////////////////////////////////////////////////////////////
elif (ch=='&' and _firebase): #log on Firebase
ldata = getAllLine()
print 'rcv msg to log (\&) on firebase: '+data
firebase_msg = {
'dst':dst,
'type':ptypestr,
'gateway_eui' : _gwaddr,
'node_eui':src,
'seq':seq,
'len':datalen,
'snr':SNR,
'rssi':RSSI,
'cr' : cr,
'datarate' : "SF"+str(sf)+"BW"+str(bw),
'time':now.isoformat(),
'info_str':info_str+' '+now.isoformat()+'> '+ldata,
'data':ldata
}
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#get the data
data = ldata.split('/')
#change data in two arrays : nomenclature_array and value_array
iteration = 0
nomenclature_array = []
value_array = []
while iteration<len(data) :
if (iteration == 0 or iteration%2 == 0) :
nomenclature_array.append(data[iteration])
else :
value_array.append(data[iteration])
iteration += 1
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
iteration = 0
while iteration < len(nomenclature_array) :
#last iteration, do not add "," at the end
if iteration == len(nomenclature_array)-1 :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]
else :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]+", "
iteration += 1
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
sensor_entry='sensor%d'% (src)
msg_entry='msg%d' % (seq)
#upload data to firebase
firebase_uploadSingleData(firebase_msg, sensor_entry, msg_entry, now)
elif (ch=='!'): #log on thingspeak, grovestreams, sensorcloud and connectingnature
ldata = getAllLine()
# get number of '#' separator
nsharp = ldata.count('#')
#no separator
if nsharp==0:
#will use default channel and field
data=['','']
#contains ['', '', "s1", s1value, "s2", s2value, ...]
data_array = data + re.split("/", ldata)
elif nsharp==1:
#only 1 separator
data_array = re.split("#|/", ldata)
#if the first item has length > 1 then we assume that it is a channel write key
if len(data_array[0])>1:
#insert '' to indicate default field
data_array.insert(1,'');
else:
#insert '' to indicate default channel
data_array.insert(0,'');
else:
#contains [channel, field, "s1", s1value, "s2", s2value, ...]
data_array = re.split("#|/", ldata)
#just in case we have an ending CR or 0
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\n', '')
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\0', '')
#test if there are characters at the end of each value, then delete these characters
i = 3
while i < len(data_array) :
while not data_array[i][len(data_array[i])-1].isdigit() :
data_array[i] = data_array[i][:-1]
i += 2
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
#start from the first nomenclature
iteration = 2
while iteration < len(data_array)-1 :
#last iteration, do not add "," at the end
if iteration == len(data_array)-2 :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]
else :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]+", "
iteration += 2
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
# get number of '/' separator
nslash = ldata.count('/')
index_first_data = 2
if nslash==0:
# old syntax without nomenclature key
index_first_data=2
else:
# new syntax with nomenclature key
index_first_data=3
#------------------
#test for thingspeak
#------------------
if (_thingspeak):
second_data=str(seq)
if (_thingspeaksnr):
second_data=str(SNR)
#data to send to thingspeak
data = []
data.append(data_array[0]) #channel (if '' default)
data.append(data_array[1]) #field (if '' default)
data.append(data_array[index_first_data]) #value to add (the first sensor value in data_array)
#upload data to thingspeak
#JUST FOR UPLOAD A SINGLE DATA IN A SPECIFIC FIELD AND SECOND DATA
thingspeak_uploadSingleData(data, second_data)
# if you want to upload all data starting at field 1, uncomment next line, and comment previous line
#thingspeak_uploadMultipleData(data_array) # upload all data in the fields
#------------------
#test for FIWARE
#need FIWARE access
#------------------
if (_fiware):
print("FIWARE: upload")
#entity_id = 'test_item_'+now.isoformat()
entity_id = 'sensor%d'% (src)
#send the first sensor value in data_array
cmd = 'python ./fiware_UpdateEntityAttribute.py '+entity_id+' test temperature float '+data_array[index_first_data]
print("FiWare: will issue python script")
print(cmd)
args = cmd.split()
try:
out = subprocess.check_output(args, shell=False)
except subprocess.CalledProcessError:
print("FiWare: python script failed")
if out.find('"reasonPhrase" : "OK"') > 0:
print("FiWare: Entity updated with ENTITY_ID "+entity_id)
else:
print("FiWare: Entity update failed")
#------------------
#test for sensorcloud
#------------------
if (_sensorcloud) :
#send the first sensor value in data_array
sensorcloud_uploadSingleData(data_array[index_first_data])
#------------------
#test for grovestreams
#------------------
if (_grovestreams):
nomenclatures = []
data = []
if nslash==0:
# old syntax without nomemclature key, so insert only one key
nomenclatures.append("temp")
data.append(data_array[index_first_data])
else:
#completing nomenclatures and data
i=2
while i < len(data_array)-1 :
nomenclatures.append(data_array[i])
data.append(data_array[i+1])
i += 2
#upload data to grovestreams
grovestreams_uploadSingleData(nomenclatures, data, str(src))
# END
#////////////////////////////////////////////////////////////
else: # not a known data logging prefix
#you may want to upload to a default service
#so just implement it here
print('unrecognized data logging prefix: discard data')
getAllLine()
else:
print('invalid app key: discard data')
getAllLine()
continue
# handle binary prefixes
if (ch == '\xFF' or ch == '+'):
#if (ch == '\xFF'):
print("got first framing byte")
ch=getSingleChar()
# data prefix for non-encrypted data
if (ch == '\xFE' or ch == '+'):
#if (ch == '\xFE'):
#the data prefix is inserted by the gateway
#do not modify, unless you know what you are doing and that you modify lora_gateway (comment WITH_DATA_PREFIX)
print("--> got data prefix")
#we actually need to use DATA_PREFIX in order to differentiate data from radio coming to the post-processing stage
#if _wappkey is set then we have to first indicate that _validappkey=0
if (_wappkey==1):
_validappkey=0
else:
_validappkey=1
# if we have raw output from gw, then try to determine which kind of packet it is
if (_rawFormat==1):
ch=getSingleChar()
# probably our modified Libelium header where the destination is the gateway
# dissect our modified Libelium format
if ch==1:
dst=ord(ch)
ptype=ord(getSingleChar())
src=ord(getSingleChar())
seq=ord(getSingleChar())
print("Libelium[dst=%d ptype=0x%.2X src=%d seq=%d]" % (dst,ptype,src,seq))
# now we read datalen-4 (the header length) bytes in our line buffer
fillLinebuf(datalen-HEADER_SIZE)
# TODO: dissect LoRaWAN
# you can implement LoRaWAN decoding if this is necessary for your system
# look at the LoRaWAN packet format specification to dissect the packet in detail
#
# LoRaWAN uses the MHDR(1B)
# ----------------------------
# | 7 6 5 | 4 3 2 | 1 0 |
# ----------------------------
# MType RFU major
#
# the main MType is unconfirmed data up which value is 010
if (ch & 0x40)==0x40:
# Do the LoRaWAN decoding
print("LoRaWAN?")
# for the moment just discard the data
fillLinebuf(datalen-1)
getAllLine()
else:
# now we read datalen bytes in our line buffer
fillLinebuf(datalen)
# encrypted data payload?
if ((ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED):
print("--> DATA encrypted: encrypted payload size is %d" % datalen)
_hasClearData=0 |
if _aes==1:
print("--> decrypting")
decrypt_handler = AES.new(aes_key, AES.MODE_CBC, aes_iv)
# decrypt
s = decrypt_handler.decrypt(_linebuf)
for i in range(0, len(s)):
print "%.2X " % ord(s[i]),
print "\nEnd"
# get the real (decrypted) payload size
rsize = ord(s[APPKEY_SIZE])
print("--> real payload size is %d" % rsize)
# then add the appkey + the appkey framing bytes
rsize = rsize+APPKEY_SIZE+1
_linebuf = s[:APPKEY_SIZE] + s[APPKEY_SIZE+1:rsize]
for i in range(0, len(_linebuf)):
print "%.2X " % ord(_linebuf[i]),
print "\nEnd"
# normally next read from input will get data from the decrypted _linebuf
print "--> decrypted payload is: ",
print _linebuf[APPKEY_SIZE:]
_hasClearData=1
else:
print("--> DATA encrypted: aes not activated")
# drain stdin of all the encrypted data
enc_data=getAllLine()
print("--> discard encrypted data")
else:
_hasClearData=1
# with_appkey?
if ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY and _hasClearData==1):
print("--> DATA with_appkey: read app key sequence")
the_app_key = getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
print "app key is ",
print " ".join("0x{:02x}".format(ord(c)) for c in the_app_key)
if the_app_key in app_key_list:
print("in app key list")
if _wappkey==1:
_validappkey=1
else:
print("not in app key list")
if _wappkey==1:
_validappkey=0
else:
#we do not check for app key
_validappkey=1
print("but app key disabled")
continue
if (ch == '?' and _ignoreComment==1):
sys.stdin.readline()
continue
sys.stdout.write(ch) | random_line_split | |
post_processing_gw.py | #------------------------------------------------------------
# Copyright 2016 Congduc Pham, University of Pau, France.
#
# Congduc.Pham@univ-pau.fr
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------
# IMPORTANT
# Parts that can be modified are identified with
#////////////////////////////////////////////////////////////
# TEXT
# END
#////////////////////////////////////////////////////////////
import sys
import select
import threading
from threading import Timer
import time
import datetime
import getopt
import os
import json
import re
#////////////////////////////////////////////////////////////
# ADD HERE BOOLEAN VARIABLES TO SUPPORT OTHER CLOUDS
# OR VARIABLES FOR YOUR OWN NEEDS
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#with firebase support?
#------------------------------------------------------------
_firebase=False
#------------------------------------------------------------
#with thingspeak support?
#------------------------------------------------------------
_thingspeak=False
#plot snr instead of seq
_thingspeaksnr=False
#------------------------------------------------------------
#with sensorcloud support?
#------------------------------------------------------------
_sensorcloud=False
#------------------------------------------------------------
#with grovestreams support?
#------------------------------------------------------------
_grovestreams=False
#------------------------------------------------------------
#with fiware support?
#------------------------------------------------------------
_fiware=False
#////////////////////////////////////////////////////////////
# ADD HERE APP KEYS THAT YOU WANT TO ALLOW FOR YOUR GATEWAY
#////////////////////////////////////////////////////////////
# NOTE: the format of the application key list has changed from
# a list of list, to a list of string that will be process as
# a byte array. Doing so wilL allow for dictionary construction
# using the appkey to retrieve information such as encryption key,...
app_key_list = [
#for testing
'****',
#change here your application key
'\x01\x02\x03\x04',
'\x05\x06\x07\x08'
]
#////////////////////////////////////////////////////////////
#FOR AES DECRYPTION
#////////////////////////////////////////////////////////////
#put your key here, should match the end-device's key
aes_key="0123456789010123"
#put your initialisation vector here, should match the end-device's initialisation vector
aes_iv="\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
#aes_iv="\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
#association between appkey and aes_key
appkey_aeskey = {
'\x01\x02\x03\x04':"0123456789010123",
'\x05\x06\x07\x08':"0123456789010123"
}
#association between appkey and aes_iv
appkey_aesiv = {
'\x01\x02\x03\x04':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00",
'\x05\x06\x07\x08':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
}
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#header packet information
#------------------------------------------------------------
HEADER_SIZE=4
APPKEY_SIZE=4
PKT_TYPE_DATA=0x10
PKT_TYPE_ACK=0x20
PKT_FLAG_ACK_REQ=0x08
PKT_FLAG_DATA_ENCRYPTED=0x04
PKT_FLAG_DATA_WAPPKEY=0x02
PKT_FLAG_DATA_ISBINARY=0x01
#------------------------------------------------------------
#last pkt information
#------------------------------------------------------------
dst=0
ptype=0
ptypestr="N/A"
src=0
seq=0
datalen=0
SNR=0
RSSI=0
bw=0
cr=0
sf=0
#------------------------------------------------------------
#------------------------------------------------------------
#will ignore lines beginning with '?'
#------------------------------------------------------------
_ignoreComment=1
#------------------------------------------------------------
#with mongoDB support?
#------------------------------------------------------------
_mongodb = False
#------------------------------------------------------------
#log gateway message?
#------------------------------------------------------------
_logGateway=0
#------------------------------------------------------------
#raw output from gateway?
#------------------------------------------------------------
_rawFormat=0
#------------------------------------------------------------
_ourcustomFormat=0;
_lorawanFormat=0
#------------------------------------------------------------
#------------------------------------------------------------
#check for app key?
#------------------------------------------------------------
_wappkey=0
#------------------------------------------------------------
the_app_key = '\x00\x00\x00\x00'
#valid app key? by default we do not check for the app key
_validappkey=1
#------------------------------------------------------------
#for local AES decrypting
#------------------------------------------------------------
_aes=0
_hasClearData=0
#------------------------------------------------------------
#open json file to recover gateway_address
#------------------------------------------------------------
f = open(os.path.expanduser("local_conf.json"),"r")
lines = f.readlines()
f.close()
array = ""
#get all the lines in a string
for line in lines :
array += line
#change it into a python array
json_array = json.loads(array)
#set the gateway_address for having different log filenames
_gwaddr = json_array["gateway_conf"]["gateway_ID"]
#////////////////////////////////////////////////////////////
# CHANGE HERE THE VARIOUS PATHS FOR YOUR LOG FILES
#////////////////////////////////////////////////////////////
_folder_path = "/home/pi/Dropbox/LoRa-test/"
_gwlog_filename = _folder_path+"gateway_"+str(_gwaddr)+".log"
_telemetrylog_filename = _folder_path+"telemetry_"+str(_gwaddr)+".log"
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#initialize gateway DHT22 sensor
#------------------------------------------------------------
_gw_dht22 = json_array["gateway_conf"]["dht22"]
_date_save_dht22 = None
if(_gw_dht22):
print "Use DHT22 to get gateway temperature and humidity level"
#read values from dht22 in the gateway box
sys.path.insert(0, os.path.expanduser('./sensors_in_raspi/dht22'))
from read_dht22 import get_dht22_values
_temperature = 0
_humidity = 0
# retrieve dht22 values
def save_dht22_values():
global _temperature, _humidity, _date_save_dht22
_humidity, _temperature = get_dht22_values()
_date_save_dht22 = datetime.datetime.utcnow()
print "Gateway TC : "+_temperature+" C | HU : "+_humidity+" % at "+str(_date_save_dht22)
#save values from the gateway box's DHT22 sensor, if _mongodb is true
if(_mongodb):
#saving data in a JSON var
str_json_data = "{\"th\":"+_temperature+", \"hu\":"+_humidity+"}"
#creating document to add
doc = {
"type" : "DATA_GW_DHT22",
"gateway_eui" : _gwaddr,
"node_eui" : "gw",
"snr" : "",
"rssi" : "",
"cr" : "",
"datarate" : "",
"time" : _date_save_dht22,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
def dht22_target():
while True:
print "Getting gateway temperature"
save_dht22_values()
sys.stdout.flush()
global _gw_dht22
time.sleep(_gw_dht22)
#------------------------------------------------------------
#for managing the input data when we can have aes encryption
#------------------------------------------------------------
_linebuf="the line buffer"
_linebuf_idx=0
_has_linebuf=0
def getSingleChar():
global _has_linebuf
# if we have a valid _linebuf then read from _linebuf
if _has_linebuf==1:
global _linebuf_idx
global _linebuf
if _linebuf_idx < len(_linebuf):
_linebuf_idx = _linebuf_idx + 1
return _linebuf[_linebuf_idx-1]
else:
# no more character from _linebuf, so read from stdin
_has_linebuf = 0
return sys.stdin.read(1)
else:
return sys.stdin.read(1)
def getAllLine():
global _linebuf_idx
p=_linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 0
global _linebuf
# return the remaining of the string and clear the _linebuf
return _linebuf[p:]
def fillLinebuf(n):
|
#////////////////////////////////////////////////////////////
# ADD HERE OPTIONS THAT YOU MAY WANT TO ADD
# BE CAREFUL, IT IS NOT ADVISED TO REMOVE OPTIONS UNLESS YOU
# REALLY KNOW WHAT YOU ARE DOING
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#for parsing the options
#------------------------------------------------------------
def main(argv):
try:
opts, args = getopt.getopt(argv,'iftLam:',[\
'ignorecomment',\
'firebase',\
'thingspeak',\
'retrythsk',\
'thingspeaksnr',\
'fiware',\
'sensorcloud',\
'grovestreams',\
'loggw',\
'addr',\
'wappkey',\
'raw',\
'aes',\
'mongodb'])
except getopt.GetoptError:
print 'post_processing_gw '+\
'-i/--ignorecomment '+\
'-f/--firebase '+\
'-t/--thingspeak '+\
'--retrythsk '+\
'--thingspeaksnr '+\
'--fiware '+\
'--sensorcloud '+\
'--grovestreams '+\
'-L/--loggw '+\
'-a/--addr '+\
'--wappkey '+\
'--raw '+\
'--aes '+\
'-m/--mongodb'
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--ignorecomment"):
print("will ignore commented lines")
global _ignoreComment
_ignoreComment = 1
elif opt in ("-f", "--firebase"):
print("will enable firebase support")
global _firebase
_firebase = True
global firebase_uploadSingleData
from FireBase import firebase_uploadSingleData
elif opt in ("-t", "--thingspeak"):
print("will enable thingspeak support")
global _thingspeak
_thingspeak = True
global thingspeak_uploadSingleData, thingspeak_uploadMultipleData
from ThingSpeak import thingspeak_uploadSingleData, thingspeak_uploadMultipleData
elif opt in ("--retrythsk"):
print("will enable thingspeak retry")
global thingspeak_setRetry
from ThingSpeak import thingspeak_setRetry
#set retry to True
thingspeak_setRetry(True)
elif opt in ("--thingspeaksnr"):
print("will plot snr instead of seq")
global _thingspeaksnr
_thingspeaksnr = True
elif opt in ("--fiware"):
print("will enable fiware support")
global _fiware
_fiware = True
elif opt in ("--sensorcloud"):
print("will enable sensorcloud support")
global _sensorcloud
_sensorcloud = True
global sensorcloud_uploadSingleData
from SensorCloud import sensorcloud_uploadSingleData
elif opt in ("--grovestreams"):
print("will enable grovestreams support")
global _grovestreams
_grovestreams = True
global grovestreams_uploadSingleData
from GroveStreams import grovestreams_uploadSingleData
elif opt in ("-L", "--loggw"):
print("will log gateway message prefixed by ^$")
global _logGateway
_logGateway = 1
elif opt in ("-a", "--addr"):
global _gwaddr
_gwaddr = arg
print("overwrite: will use _"+str(_gwaddr)+" for gateway and telemetry log files")
elif opt in ("--wappkey"):
global _wappkey
_wappkey = 1
global _validappkey
_validappkey=0
print("will check for correct app key")
elif opt in ("--raw"):
global _rawFormat
_rawFormat = 1
print("raw output from gateway. post_processing_gw will handle packet format")
elif opt in ("--aes"):
global _aes
_aes = 1
global AES
from Crypto.Cipher import AES
print("enable AES encrypted data")
elif opt in ("-m", "--mongodb"):
print("will enable local MongoDB support, max months to store is "+arg)
global _mongodb
_mongodb = True
global add_document, remove_if_new_month, mongodb_set_max_months
from MongoDB import add_document, remove_if_new_month, mongodb_set_max_months
#setting max months
mongodb_set_max_months(int(arg))
# END
#////////////////////////////////////////////////////////////
if __name__ == "__main__":
main(sys.argv[1:])
#gateway dht22
if (_gw_dht22):
print "Starting thread to measure gateway temperature"
t = threading.Thread(target=dht22_target)
t.daemon = True
t.start()
print "Current working directory: "+os.getcwd()
while True:
sys.stdout.flush()
ch = getSingleChar()
#expected prefixes
# ^p indicates a ctrl pkt info ^pdst(%d),ptype(%d),src(%d),seq(%d),len(%d),SNR(%d),RSSI=(%d) for the last received packet
# example: ^p1,16,3,0,234,8,-45
#
# ^r indicate a ctrl radio info ^rbw,cr,sf for the last received packet
# example: ^r500,5,12
#
# ^$ indicates an output (debug or log purposes) from the gateway that should be logged in the (Dropbox) gateway.log file
# example: ^$Set LoRa mode 4
#
# ^l indicates a ctrl LAS info ^lsrc(%d),type(%d)
# type is 1 for DSP_REG, 2 for DSP_INIT, 3 for DSP_UPDT, 4 for DSP_DATA
# example: ^l3,4
#
# \$ indicates a message that should be logged in the (Dropbox) telemetry.log file
# example: \$hello -> hello will be logged in the following format
# (src=3 seq=0 len=6 SNR=8 RSSI=-54) 2015-10-16T14:47:44.072230> hello
#
# \& indicates a message that should be logged in the firebase cloud database
# example: \&hello -> hello will be logged in json format
#
# \! indicates a message that should be logged on a thingspeak channel
# example: \!SGSH52UGPVAUYG3S#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at default field, i.e. field 1
# \!2#9.4 -> 9.4 will be logged in the default channel at field 2
# \!SGSH52UGPVAUYG3S#2#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at field 2
#
# you can log other information such as src, seq, len, SNR and RSSI on specific fields
#
# \xFF\xFE indicates radio data prefix
#
#
#------------------------------------------------------------
# '^' is reserved for control information from the gateway
#------------------------------------------------------------
if (ch=='^'):
now = datetime.datetime.utcnow()
ch=sys.stdin.read(1)
if (ch=='p'):
data = sys.stdin.readline()
print now.isoformat()
print "rcv ctrl pkt info (^p): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
dst=arr[0]
ptype=arr[1]
ptypestr="N/A"
if ((ptype & 0xF0)==PKT_TYPE_DATA):
ptypestr="DATA"
if (ptype & PKT_FLAG_DATA_ISBINARY)==PKT_FLAG_DATA_ISBINARY:
ptypestr = ptypestr + " IS_BINARY"
if (ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY:
ptypestr = ptypestr + " WAPPKEY"
if (ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED:
ptypestr = ptypestr + " ENCRYPTED"
if (ptype & PKT_FLAG_ACK_REQ)==PKT_FLAG_ACK_REQ:
ptypestr = ptypestr + " ACK_REQ"
if ((ptype & 0xF0)==PKT_TYPE_ACK):
ptypestr="ACK"
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
if (_rawFormat==0):
info_str="(dst=%d type=0x%.2X(%s) src=%d seq=%d len=%d SNR=%d RSSI=%d)" % (dst,ptype,ptypestr,src,seq,datalen,SNR,RSSI)
else:
info_str="rawFormat(len=%d SNR=%d RSSI=%d)" % (datalen,SNR,RSSI)
print info_str
# TODO: maintain statistics from received messages and periodically add these informations in the gateway.log file
if (ch=='r'):
data = sys.stdin.readline()
print "rcv ctrl radio info (^r): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
bw=arr[0]
cr=arr[1]
sf=arr[2]
info_str="(BW=%d CR=%d SF=%d)" % (bw,cr,sf)
print info_str
if (ch=='t'):
rcv_timestamp = sys.stdin.readline()
print "rcv timestamp (^t): "+rcv_timestamp
if (ch=='l'):
# TODO: LAS service
print 'not implemented yet'
if (ch=='$' and _logGateway==1):
data = sys.stdin.readline()
print "rcv gw output to log (^$): "+data,
f=open(os.path.expanduser(_gwlog_filename),"a")
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
continue
#------------------------------------------------------------
# '\' is reserved for message logging service
#------------------------------------------------------------
if (ch=='\\'):
now = datetime.datetime.utcnow()
if _validappkey==1:
print 'valid app key: accept data'
ch=getSingleChar()
if (ch=='$'): #log on Dropbox
data = getAllLine()
print "rcv msg to log (\$) on dropbox: "+data,
f=open(os.path.expanduser(_telemetrylog_filename),"a")
f.write(info_str+' ')
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
#/////////////////////////////////////////////////////////////
# YOU CAN MODIFY HERE HOW YOU WANT DATA TO BE PUSHED TO CLOUDS
# WE PROVIDE EXAMPLES FOR THINGSPEAK, GROVESTREAM
# IT IS ADVISED TO USE A SEPERATE PYTHON SCRIPT PER CLOUD
#////////////////////////////////////////////////////////////
elif (ch=='&' and _firebase): #log on Firebase
ldata = getAllLine()
print 'rcv msg to log (\&) on firebase: '+data
firebase_msg = {
'dst':dst,
'type':ptypestr,
'gateway_eui' : _gwaddr,
'node_eui':src,
'seq':seq,
'len':datalen,
'snr':SNR,
'rssi':RSSI,
'cr' : cr,
'datarate' : "SF"+str(sf)+"BW"+str(bw),
'time':now.isoformat(),
'info_str':info_str+' '+now.isoformat()+'> '+ldata,
'data':ldata
}
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#get the data
data = ldata.split('/')
#change data in two arrays : nomenclature_array and value_array
iteration = 0
nomenclature_array = []
value_array = []
while iteration<len(data) :
if (iteration == 0 or iteration%2 == 0) :
nomenclature_array.append(data[iteration])
else :
value_array.append(data[iteration])
iteration += 1
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
iteration = 0
while iteration < len(nomenclature_array) :
#last iteration, do not add "," at the end
if iteration == len(nomenclature_array)-1 :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]
else :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]+", "
iteration += 1
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
sensor_entry='sensor%d'% (src)
msg_entry='msg%d' % (seq)
#upload data to firebase
firebase_uploadSingleData(firebase_msg, sensor_entry, msg_entry, now)
elif (ch=='!'): #log on thingspeak, grovestreams, sensorcloud and connectingnature
ldata = getAllLine()
# get number of '#' separator
nsharp = ldata.count('#')
#no separator
if nsharp==0:
#will use default channel and field
data=['','']
#contains ['', '', "s1", s1value, "s2", s2value, ...]
data_array = data + re.split("/", ldata)
elif nsharp==1:
#only 1 separator
data_array = re.split("#|/", ldata)
#if the first item has length > 1 then we assume that it is a channel write key
if len(data_array[0])>1:
#insert '' to indicate default field
data_array.insert(1,'');
else:
#insert '' to indicate default channel
data_array.insert(0,'');
else:
#contains [channel, field, "s1", s1value, "s2", s2value, ...]
data_array = re.split("#|/", ldata)
#just in case we have an ending CR or 0
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\n', '')
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\0', '')
#test if there are characters at the end of each value, then delete these characters
i = 3
while i < len(data_array) :
while not data_array[i][len(data_array[i])-1].isdigit() :
data_array[i] = data_array[i][:-1]
i += 2
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
#start from the first nomenclature
iteration = 2
while iteration < len(data_array)-1 :
#last iteration, do not add "," at the end
if iteration == len(data_array)-2 :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]
else :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]+", "
iteration += 2
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
# get number of '/' separator
nslash = ldata.count('/')
index_first_data = 2
if nslash==0:
# old syntax without nomenclature key
index_first_data=2
else:
# new syntax with nomenclature key
index_first_data=3
#------------------
#test for thingspeak
#------------------
if (_thingspeak):
second_data=str(seq)
if (_thingspeaksnr):
second_data=str(SNR)
#data to send to thingspeak
data = []
data.append(data_array[0]) #channel (if '' default)
data.append(data_array[1]) #field (if '' default)
data.append(data_array[index_first_data]) #value to add (the first sensor value in data_array)
#upload data to thingspeak
#JUST FOR UPLOAD A SINGLE DATA IN A SPECIFIC FIELD AND SECOND DATA
thingspeak_uploadSingleData(data, second_data)
# if you want to upload all data starting at field 1, uncomment next line, and comment previous line
#thingspeak_uploadMultipleData(data_array) # upload all data in the fields
#------------------
#test for FIWARE
#need FIWARE access
#------------------
if (_fiware):
print("FIWARE: upload")
#entity_id = 'test_item_'+now.isoformat()
entity_id = 'sensor%d'% (src)
#send the first sensor value in data_array
cmd = 'python ./fiware_UpdateEntityAttribute.py '+entity_id+' test temperature float '+data_array[index_first_data]
print("FiWare: will issue python script")
print(cmd)
args = cmd.split()
try:
out = subprocess.check_output(args, shell=False)
except subprocess.CalledProcessError:
print("FiWare: python script failed")
if out.find('"reasonPhrase" : "OK"') > 0:
print("FiWare: Entity updated with ENTITY_ID "+entity_id)
else:
print("FiWare: Entity update failed")
#------------------
#test for sensorcloud
#------------------
if (_sensorcloud) :
#send the first sensor value in data_array
sensorcloud_uploadSingleData(data_array[index_first_data])
#------------------
#test for grovestreams
#------------------
if (_grovestreams):
nomenclatures = []
data = []
if nslash==0:
# old syntax without nomemclature key, so insert only one key
nomenclatures.append("temp")
data.append(data_array[index_first_data])
else:
#completing nomenclatures and data
i=2
while i < len(data_array)-1 :
nomenclatures.append(data_array[i])
data.append(data_array[i+1])
i += 2
#upload data to grovestreams
grovestreams_uploadSingleData(nomenclatures, data, str(src))
# END
#////////////////////////////////////////////////////////////
else: # not a known data logging prefix
#you may want to upload to a default service
#so just implement it here
print('unrecognized data logging prefix: discard data')
getAllLine()
else:
print('invalid app key: discard data')
getAllLine()
continue
# handle binary prefixes
if (ch == '\xFF' or ch == '+'):
#if (ch == '\xFF'):
print("got first framing byte")
ch=getSingleChar()
# data prefix for non-encrypted data
if (ch == '\xFE' or ch == '+'):
#if (ch == '\xFE'):
#the data prefix is inserted by the gateway
#do not modify, unless you know what you are doing and that you modify lora_gateway (comment WITH_DATA_PREFIX)
print("--> got data prefix")
#we actually need to use DATA_PREFIX in order to differentiate data from radio coming to the post-processing stage
#if _wappkey is set then we have to first indicate that _validappkey=0
if (_wappkey==1):
_validappkey=0
else:
_validappkey=1
# if we have raw output from gw, then try to determine which kind of packet it is
if (_rawFormat==1):
ch=getSingleChar()
# probably our modified Libelium header where the destination is the gateway
# dissect our modified Libelium format
if ch==1:
dst=ord(ch)
ptype=ord(getSingleChar())
src=ord(getSingleChar())
seq=ord(getSingleChar())
print("Libelium[dst=%d ptype=0x%.2X src=%d seq=%d]" % (dst,ptype,src,seq))
# now we read datalen-4 (the header length) bytes in our line buffer
fillLinebuf(datalen-HEADER_SIZE)
# TODO: dissect LoRaWAN
# you can implement LoRaWAN decoding if this is necessary for your system
# look at the LoRaWAN packet format specification to dissect the packet in detail
#
# LoRaWAN uses the MHDR(1B)
# ----------------------------
# | 7 6 5 | 4 3 2 | 1 0 |
# ----------------------------
# MType RFU major
#
# the main MType is unconfirmed data up which value is 010
if (ch & 0x40)==0x40:
# Do the LoRaWAN decoding
print("LoRaWAN?")
# for the moment just discard the data
fillLinebuf(datalen-1)
getAllLine()
else:
# now we read datalen bytes in our line buffer
fillLinebuf(datalen)
# encrypted data payload?
if ((ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED):
print("--> DATA encrypted: encrypted payload size is %d" % datalen)
_hasClearData=0
if _aes==1:
print("--> decrypting")
decrypt_handler = AES.new(aes_key, AES.MODE_CBC, aes_iv)
# decrypt
s = decrypt_handler.decrypt(_linebuf)
for i in range(0, len(s)):
print "%.2X " % ord(s[i]),
print "\nEnd"
# get the real (decrypted) payload size
rsize = ord(s[APPKEY_SIZE])
print("--> real payload size is %d" % rsize)
# then add the appkey + the appkey framing bytes
rsize = rsize+APPKEY_SIZE+1
_linebuf = s[:APPKEY_SIZE] + s[APPKEY_SIZE+1:rsize]
for i in range(0, len(_linebuf)):
print "%.2X " % ord(_linebuf[i]),
print "\nEnd"
# normally next read from input will get data from the decrypted _linebuf
print "--> decrypted payload is: ",
print _linebuf[APPKEY_SIZE:]
_hasClearData=1
else:
print("--> DATA encrypted: aes not activated")
# drain stdin of all the encrypted data
enc_data=getAllLine()
print("--> discard encrypted data")
else:
_hasClearData=1
# with_appkey?
if ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY and _hasClearData==1):
print("--> DATA with_appkey: read app key sequence")
the_app_key = getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
print "app key is ",
print " ".join("0x{:02x}".format(ord(c)) for c in the_app_key)
if the_app_key in app_key_list:
print("in app key list")
if _wappkey==1:
_validappkey=1
else:
print("not in app key list")
if _wappkey==1:
_validappkey=0
else:
#we do not check for app key
_validappkey=1
print("but app key disabled")
continue
if (ch == '?' and _ignoreComment==1):
sys.stdin.readline()
continue
sys.stdout.write(ch)
| global _linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 1
global _linebuf
# fill in our _linebuf from stdin
_linebuf=sys.stdin.read(n) | identifier_body |
post_processing_gw.py | #------------------------------------------------------------
# Copyright 2016 Congduc Pham, University of Pau, France.
#
# Congduc.Pham@univ-pau.fr
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------
# IMPORTANT
# Parts that can be modified are identified with
#////////////////////////////////////////////////////////////
# TEXT
# END
#////////////////////////////////////////////////////////////
import sys
import select
import threading
from threading import Timer
import time
import datetime
import getopt
import os
import json
import re
#////////////////////////////////////////////////////////////
# ADD HERE BOOLEAN VARIABLES TO SUPPORT OTHER CLOUDS
# OR VARIABLES FOR YOUR OWN NEEDS
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#with firebase support?
#------------------------------------------------------------
_firebase=False
#------------------------------------------------------------
#with thingspeak support?
#------------------------------------------------------------
_thingspeak=False
#plot snr instead of seq
_thingspeaksnr=False
#------------------------------------------------------------
#with sensorcloud support?
#------------------------------------------------------------
_sensorcloud=False
#------------------------------------------------------------
#with grovestreams support?
#------------------------------------------------------------
_grovestreams=False
#------------------------------------------------------------
#with fiware support?
#------------------------------------------------------------
_fiware=False
#////////////////////////////////////////////////////////////
# ADD HERE APP KEYS THAT YOU WANT TO ALLOW FOR YOUR GATEWAY
#////////////////////////////////////////////////////////////
# NOTE: the format of the application key list has changed from
# a list of list, to a list of string that will be process as
# a byte array. Doing so wilL allow for dictionary construction
# using the appkey to retrieve information such as encryption key,...
app_key_list = [
#for testing
'****',
#change here your application key
'\x01\x02\x03\x04',
'\x05\x06\x07\x08'
]
#////////////////////////////////////////////////////////////
#FOR AES DECRYPTION
#////////////////////////////////////////////////////////////
#put your key here, should match the end-device's key
aes_key="0123456789010123"
#put your initialisation vector here, should match the end-device's initialisation vector
aes_iv="\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
#aes_iv="\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
#association between appkey and aes_key
appkey_aeskey = {
'\x01\x02\x03\x04':"0123456789010123",
'\x05\x06\x07\x08':"0123456789010123"
}
#association between appkey and aes_iv
appkey_aesiv = {
'\x01\x02\x03\x04':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00",
'\x05\x06\x07\x08':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
}
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#header packet information
#------------------------------------------------------------
HEADER_SIZE=4
APPKEY_SIZE=4
PKT_TYPE_DATA=0x10
PKT_TYPE_ACK=0x20
PKT_FLAG_ACK_REQ=0x08
PKT_FLAG_DATA_ENCRYPTED=0x04
PKT_FLAG_DATA_WAPPKEY=0x02
PKT_FLAG_DATA_ISBINARY=0x01
#------------------------------------------------------------
#last pkt information
#------------------------------------------------------------
dst=0
ptype=0
ptypestr="N/A"
src=0
seq=0
datalen=0
SNR=0
RSSI=0
bw=0
cr=0
sf=0
#------------------------------------------------------------
#------------------------------------------------------------
#will ignore lines beginning with '?'
#------------------------------------------------------------
_ignoreComment=1
#------------------------------------------------------------
#with mongoDB support?
#------------------------------------------------------------
_mongodb = False
#------------------------------------------------------------
#log gateway message?
#------------------------------------------------------------
_logGateway=0
#------------------------------------------------------------
#raw output from gateway?
#------------------------------------------------------------
_rawFormat=0
#------------------------------------------------------------
_ourcustomFormat=0;
_lorawanFormat=0
#------------------------------------------------------------
#------------------------------------------------------------
#check for app key?
#------------------------------------------------------------
_wappkey=0
#------------------------------------------------------------
the_app_key = '\x00\x00\x00\x00'
#valid app key? by default we do not check for the app key
_validappkey=1
#------------------------------------------------------------
#for local AES decrypting
#------------------------------------------------------------
_aes=0
_hasClearData=0
#------------------------------------------------------------
#open json file to recover gateway_address
#------------------------------------------------------------
f = open(os.path.expanduser("local_conf.json"),"r")
lines = f.readlines()
f.close()
array = ""
#get all the lines in a string
for line in lines :
array += line
#change it into a python array
json_array = json.loads(array)
#set the gateway_address for having different log filenames
_gwaddr = json_array["gateway_conf"]["gateway_ID"]
#////////////////////////////////////////////////////////////
# CHANGE HERE THE VARIOUS PATHS FOR YOUR LOG FILES
#////////////////////////////////////////////////////////////
_folder_path = "/home/pi/Dropbox/LoRa-test/"
_gwlog_filename = _folder_path+"gateway_"+str(_gwaddr)+".log"
_telemetrylog_filename = _folder_path+"telemetry_"+str(_gwaddr)+".log"
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#initialize gateway DHT22 sensor
#------------------------------------------------------------
_gw_dht22 = json_array["gateway_conf"]["dht22"]
_date_save_dht22 = None
if(_gw_dht22):
print "Use DHT22 to get gateway temperature and humidity level"
#read values from dht22 in the gateway box
sys.path.insert(0, os.path.expanduser('./sensors_in_raspi/dht22'))
from read_dht22 import get_dht22_values
_temperature = 0
_humidity = 0
# retrieve dht22 values
def save_dht22_values():
global _temperature, _humidity, _date_save_dht22
_humidity, _temperature = get_dht22_values()
_date_save_dht22 = datetime.datetime.utcnow()
print "Gateway TC : "+_temperature+" C | HU : "+_humidity+" % at "+str(_date_save_dht22)
#save values from the gateway box's DHT22 sensor, if _mongodb is true
if(_mongodb):
#saving data in a JSON var
str_json_data = "{\"th\":"+_temperature+", \"hu\":"+_humidity+"}"
#creating document to add
doc = {
"type" : "DATA_GW_DHT22",
"gateway_eui" : _gwaddr,
"node_eui" : "gw",
"snr" : "",
"rssi" : "",
"cr" : "",
"datarate" : "",
"time" : _date_save_dht22,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
def dht22_target():
while True:
print "Getting gateway temperature"
save_dht22_values()
sys.stdout.flush()
global _gw_dht22
time.sleep(_gw_dht22)
#------------------------------------------------------------
#for managing the input data when we can have aes encryption
#------------------------------------------------------------
_linebuf="the line buffer"
_linebuf_idx=0
_has_linebuf=0
def getSingleChar():
global _has_linebuf
# if we have a valid _linebuf then read from _linebuf
if _has_linebuf==1:
global _linebuf_idx
global _linebuf
if _linebuf_idx < len(_linebuf):
_linebuf_idx = _linebuf_idx + 1
return _linebuf[_linebuf_idx-1]
else:
# no more character from _linebuf, so read from stdin
_has_linebuf = 0
return sys.stdin.read(1)
else:
return sys.stdin.read(1)
def getAllLine():
global _linebuf_idx
p=_linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 0
global _linebuf
# return the remaining of the string and clear the _linebuf
return _linebuf[p:]
def fillLinebuf(n):
global _linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 1
global _linebuf
# fill in our _linebuf from stdin
_linebuf=sys.stdin.read(n)
#////////////////////////////////////////////////////////////
# ADD HERE OPTIONS THAT YOU MAY WANT TO ADD
# BE CAREFUL, IT IS NOT ADVISED TO REMOVE OPTIONS UNLESS YOU
# REALLY KNOW WHAT YOU ARE DOING
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#for parsing the options
#------------------------------------------------------------
def main(argv):
try:
opts, args = getopt.getopt(argv,'iftLam:',[\
'ignorecomment',\
'firebase',\
'thingspeak',\
'retrythsk',\
'thingspeaksnr',\
'fiware',\
'sensorcloud',\
'grovestreams',\
'loggw',\
'addr',\
'wappkey',\
'raw',\
'aes',\
'mongodb'])
except getopt.GetoptError:
print 'post_processing_gw '+\
'-i/--ignorecomment '+\
'-f/--firebase '+\
'-t/--thingspeak '+\
'--retrythsk '+\
'--thingspeaksnr '+\
'--fiware '+\
'--sensorcloud '+\
'--grovestreams '+\
'-L/--loggw '+\
'-a/--addr '+\
'--wappkey '+\
'--raw '+\
'--aes '+\
'-m/--mongodb'
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--ignorecomment"):
print("will ignore commented lines")
global _ignoreComment
_ignoreComment = 1
elif opt in ("-f", "--firebase"):
print("will enable firebase support")
global _firebase
_firebase = True
global firebase_uploadSingleData
from FireBase import firebase_uploadSingleData
elif opt in ("-t", "--thingspeak"):
print("will enable thingspeak support")
global _thingspeak
_thingspeak = True
global thingspeak_uploadSingleData, thingspeak_uploadMultipleData
from ThingSpeak import thingspeak_uploadSingleData, thingspeak_uploadMultipleData
elif opt in ("--retrythsk"):
print("will enable thingspeak retry")
global thingspeak_setRetry
from ThingSpeak import thingspeak_setRetry
#set retry to True
thingspeak_setRetry(True)
elif opt in ("--thingspeaksnr"):
print("will plot snr instead of seq")
global _thingspeaksnr
_thingspeaksnr = True
elif opt in ("--fiware"):
print("will enable fiware support")
global _fiware
_fiware = True
elif opt in ("--sensorcloud"):
print("will enable sensorcloud support")
global _sensorcloud
_sensorcloud = True
global sensorcloud_uploadSingleData
from SensorCloud import sensorcloud_uploadSingleData
elif opt in ("--grovestreams"):
print("will enable grovestreams support")
global _grovestreams
_grovestreams = True
global grovestreams_uploadSingleData
from GroveStreams import grovestreams_uploadSingleData
elif opt in ("-L", "--loggw"):
print("will log gateway message prefixed by ^$")
global _logGateway
_logGateway = 1
elif opt in ("-a", "--addr"):
global _gwaddr
_gwaddr = arg
print("overwrite: will use _"+str(_gwaddr)+" for gateway and telemetry log files")
elif opt in ("--wappkey"):
global _wappkey
_wappkey = 1
global _validappkey
_validappkey=0
print("will check for correct app key")
elif opt in ("--raw"):
global _rawFormat
_rawFormat = 1
print("raw output from gateway. post_processing_gw will handle packet format")
elif opt in ("--aes"):
global _aes
_aes = 1
global AES
from Crypto.Cipher import AES
print("enable AES encrypted data")
elif opt in ("-m", "--mongodb"):
print("will enable local MongoDB support, max months to store is "+arg)
global _mongodb
_mongodb = True
global add_document, remove_if_new_month, mongodb_set_max_months
from MongoDB import add_document, remove_if_new_month, mongodb_set_max_months
#setting max months
mongodb_set_max_months(int(arg))
# END
#////////////////////////////////////////////////////////////
if __name__ == "__main__":
main(sys.argv[1:])
#gateway dht22
if (_gw_dht22):
print "Starting thread to measure gateway temperature"
t = threading.Thread(target=dht22_target)
t.daemon = True
t.start()
print "Current working directory: "+os.getcwd()
while True:
sys.stdout.flush()
ch = getSingleChar()
#expected prefixes
# ^p indicates a ctrl pkt info ^pdst(%d),ptype(%d),src(%d),seq(%d),len(%d),SNR(%d),RSSI=(%d) for the last received packet
# example: ^p1,16,3,0,234,8,-45
#
# ^r indicate a ctrl radio info ^rbw,cr,sf for the last received packet
# example: ^r500,5,12
#
# ^$ indicates an output (debug or log purposes) from the gateway that should be logged in the (Dropbox) gateway.log file
# example: ^$Set LoRa mode 4
#
# ^l indicates a ctrl LAS info ^lsrc(%d),type(%d)
# type is 1 for DSP_REG, 2 for DSP_INIT, 3 for DSP_UPDT, 4 for DSP_DATA
# example: ^l3,4
#
# \$ indicates a message that should be logged in the (Dropbox) telemetry.log file
# example: \$hello -> hello will be logged in the following format
# (src=3 seq=0 len=6 SNR=8 RSSI=-54) 2015-10-16T14:47:44.072230> hello
#
# \& indicates a message that should be logged in the firebase cloud database
# example: \&hello -> hello will be logged in json format
#
# \! indicates a message that should be logged on a thingspeak channel
# example: \!SGSH52UGPVAUYG3S#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at default field, i.e. field 1
# \!2#9.4 -> 9.4 will be logged in the default channel at field 2
# \!SGSH52UGPVAUYG3S#2#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at field 2
#
# you can log other information such as src, seq, len, SNR and RSSI on specific fields
#
# \xFF\xFE indicates radio data prefix
#
#
#------------------------------------------------------------
# '^' is reserved for control information from the gateway
#------------------------------------------------------------
if (ch=='^'):
now = datetime.datetime.utcnow()
ch=sys.stdin.read(1)
if (ch=='p'):
data = sys.stdin.readline()
print now.isoformat()
print "rcv ctrl pkt info (^p): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
dst=arr[0]
ptype=arr[1]
ptypestr="N/A"
if ((ptype & 0xF0)==PKT_TYPE_DATA):
ptypestr="DATA"
if (ptype & PKT_FLAG_DATA_ISBINARY)==PKT_FLAG_DATA_ISBINARY:
ptypestr = ptypestr + " IS_BINARY"
if (ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY:
ptypestr = ptypestr + " WAPPKEY"
if (ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED:
ptypestr = ptypestr + " ENCRYPTED"
if (ptype & PKT_FLAG_ACK_REQ)==PKT_FLAG_ACK_REQ:
ptypestr = ptypestr + " ACK_REQ"
if ((ptype & 0xF0)==PKT_TYPE_ACK):
ptypestr="ACK"
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
if (_rawFormat==0):
info_str="(dst=%d type=0x%.2X(%s) src=%d seq=%d len=%d SNR=%d RSSI=%d)" % (dst,ptype,ptypestr,src,seq,datalen,SNR,RSSI)
else:
info_str="rawFormat(len=%d SNR=%d RSSI=%d)" % (datalen,SNR,RSSI)
print info_str
# TODO: maintain statistics from received messages and periodically add these informations in the gateway.log file
if (ch=='r'):
data = sys.stdin.readline()
print "rcv ctrl radio info (^r): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
bw=arr[0]
cr=arr[1]
sf=arr[2]
info_str="(BW=%d CR=%d SF=%d)" % (bw,cr,sf)
print info_str
if (ch=='t'):
rcv_timestamp = sys.stdin.readline()
print "rcv timestamp (^t): "+rcv_timestamp
if (ch=='l'):
# TODO: LAS service
print 'not implemented yet'
if (ch=='$' and _logGateway==1):
data = sys.stdin.readline()
print "rcv gw output to log (^$): "+data,
f=open(os.path.expanduser(_gwlog_filename),"a")
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
continue
#------------------------------------------------------------
# '\' is reserved for message logging service
#------------------------------------------------------------
if (ch=='\\'):
now = datetime.datetime.utcnow()
if _validappkey==1:
print 'valid app key: accept data'
ch=getSingleChar()
if (ch=='$'): #log on Dropbox
data = getAllLine()
print "rcv msg to log (\$) on dropbox: "+data,
f=open(os.path.expanduser(_telemetrylog_filename),"a")
f.write(info_str+' ')
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
#/////////////////////////////////////////////////////////////
# YOU CAN MODIFY HERE HOW YOU WANT DATA TO BE PUSHED TO CLOUDS
# WE PROVIDE EXAMPLES FOR THINGSPEAK, GROVESTREAM
# IT IS ADVISED TO USE A SEPERATE PYTHON SCRIPT PER CLOUD
#////////////////////////////////////////////////////////////
elif (ch=='&' and _firebase): #log on Firebase
ldata = getAllLine()
print 'rcv msg to log (\&) on firebase: '+data
firebase_msg = {
'dst':dst,
'type':ptypestr,
'gateway_eui' : _gwaddr,
'node_eui':src,
'seq':seq,
'len':datalen,
'snr':SNR,
'rssi':RSSI,
'cr' : cr,
'datarate' : "SF"+str(sf)+"BW"+str(bw),
'time':now.isoformat(),
'info_str':info_str+' '+now.isoformat()+'> '+ldata,
'data':ldata
}
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#get the data
data = ldata.split('/')
#change data in two arrays : nomenclature_array and value_array
iteration = 0
nomenclature_array = []
value_array = []
while iteration<len(data) :
if (iteration == 0 or iteration%2 == 0) :
nomenclature_array.append(data[iteration])
else :
value_array.append(data[iteration])
iteration += 1
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
iteration = 0
while iteration < len(nomenclature_array) :
#last iteration, do not add "," at the end
if iteration == len(nomenclature_array)-1 :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]
else :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]+", "
iteration += 1
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
sensor_entry='sensor%d'% (src)
msg_entry='msg%d' % (seq)
#upload data to firebase
firebase_uploadSingleData(firebase_msg, sensor_entry, msg_entry, now)
elif (ch=='!'): #log on thingspeak, grovestreams, sensorcloud and connectingnature
ldata = getAllLine()
# get number of '#' separator
nsharp = ldata.count('#')
#no separator
if nsharp==0:
#will use default channel and field
data=['','']
#contains ['', '', "s1", s1value, "s2", s2value, ...]
data_array = data + re.split("/", ldata)
elif nsharp==1:
#only 1 separator
data_array = re.split("#|/", ldata)
#if the first item has length > 1 then we assume that it is a channel write key
if len(data_array[0])>1:
#insert '' to indicate default field
data_array.insert(1,'');
else:
#insert '' to indicate default channel
data_array.insert(0,'');
else:
#contains [channel, field, "s1", s1value, "s2", s2value, ...]
data_array = re.split("#|/", ldata)
#just in case we have an ending CR or 0
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\n', '')
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\0', '')
#test if there are characters at the end of each value, then delete these characters
i = 3
while i < len(data_array) :
while not data_array[i][len(data_array[i])-1].isdigit() :
data_array[i] = data_array[i][:-1]
i += 2
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
#start from the first nomenclature
iteration = 2
while iteration < len(data_array)-1 :
#last iteration, do not add "," at the end
if iteration == len(data_array)-2 :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]
else :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]+", "
iteration += 2
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
# get number of '/' separator
nslash = ldata.count('/')
index_first_data = 2
if nslash==0:
# old syntax without nomenclature key
index_first_data=2
else:
# new syntax with nomenclature key
index_first_data=3
#------------------
#test for thingspeak
#------------------
if (_thingspeak):
second_data=str(seq)
if (_thingspeaksnr):
second_data=str(SNR)
#data to send to thingspeak
data = []
data.append(data_array[0]) #channel (if '' default)
data.append(data_array[1]) #field (if '' default)
data.append(data_array[index_first_data]) #value to add (the first sensor value in data_array)
#upload data to thingspeak
#JUST FOR UPLOAD A SINGLE DATA IN A SPECIFIC FIELD AND SECOND DATA
thingspeak_uploadSingleData(data, second_data)
# if you want to upload all data starting at field 1, uncomment next line, and comment previous line
#thingspeak_uploadMultipleData(data_array) # upload all data in the fields
#------------------
#test for FIWARE
#need FIWARE access
#------------------
if (_fiware):
print("FIWARE: upload")
#entity_id = 'test_item_'+now.isoformat()
entity_id = 'sensor%d'% (src)
#send the first sensor value in data_array
cmd = 'python ./fiware_UpdateEntityAttribute.py '+entity_id+' test temperature float '+data_array[index_first_data]
print("FiWare: will issue python script")
print(cmd)
args = cmd.split()
try:
out = subprocess.check_output(args, shell=False)
except subprocess.CalledProcessError:
print("FiWare: python script failed")
if out.find('"reasonPhrase" : "OK"') > 0:
print("FiWare: Entity updated with ENTITY_ID "+entity_id)
else:
print("FiWare: Entity update failed")
#------------------
#test for sensorcloud
#------------------
if (_sensorcloud) :
#send the first sensor value in data_array
sensorcloud_uploadSingleData(data_array[index_first_data])
#------------------
#test for grovestreams
#------------------
if (_grovestreams):
nomenclatures = []
data = []
if nslash==0:
# old syntax without nomemclature key, so insert only one key
nomenclatures.append("temp")
data.append(data_array[index_first_data])
else:
#completing nomenclatures and data
i=2
while i < len(data_array)-1 :
nomenclatures.append(data_array[i])
data.append(data_array[i+1])
i += 2
#upload data to grovestreams
grovestreams_uploadSingleData(nomenclatures, data, str(src))
# END
#////////////////////////////////////////////////////////////
else: # not a known data logging prefix
#you may want to upload to a default service
#so just implement it here
print('unrecognized data logging prefix: discard data')
getAllLine()
else:
print('invalid app key: discard data')
getAllLine()
continue
# handle binary prefixes
if (ch == '\xFF' or ch == '+'):
#if (ch == '\xFF'):
print("got first framing byte")
ch=getSingleChar()
# data prefix for non-encrypted data
if (ch == '\xFE' or ch == '+'):
#if (ch == '\xFE'):
#the data prefix is inserted by the gateway
#do not modify, unless you know what you are doing and that you modify lora_gateway (comment WITH_DATA_PREFIX)
print("--> got data prefix")
#we actually need to use DATA_PREFIX in order to differentiate data from radio coming to the post-processing stage
#if _wappkey is set then we have to first indicate that _validappkey=0
if (_wappkey==1):
_validappkey=0
else:
_validappkey=1
# if we have raw output from gw, then try to determine which kind of packet it is
if (_rawFormat==1):
ch=getSingleChar()
# probably our modified Libelium header where the destination is the gateway
# dissect our modified Libelium format
if ch==1:
dst=ord(ch)
ptype=ord(getSingleChar())
src=ord(getSingleChar())
seq=ord(getSingleChar())
print("Libelium[dst=%d ptype=0x%.2X src=%d seq=%d]" % (dst,ptype,src,seq))
# now we read datalen-4 (the header length) bytes in our line buffer
fillLinebuf(datalen-HEADER_SIZE)
# TODO: dissect LoRaWAN
# you can implement LoRaWAN decoding if this is necessary for your system
# look at the LoRaWAN packet format specification to dissect the packet in detail
#
# LoRaWAN uses the MHDR(1B)
# ----------------------------
# | 7 6 5 | 4 3 2 | 1 0 |
# ----------------------------
# MType RFU major
#
# the main MType is unconfirmed data up which value is 010
if (ch & 0x40)==0x40:
# Do the LoRaWAN decoding
print("LoRaWAN?")
# for the moment just discard the data
fillLinebuf(datalen-1)
getAllLine()
else:
# now we read datalen bytes in our line buffer
fillLinebuf(datalen)
# encrypted data payload?
if ((ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED):
|
else:
_hasClearData=1
# with_appkey?
if ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY and _hasClearData==1):
print("--> DATA with_appkey: read app key sequence")
the_app_key = getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
print "app key is ",
print " ".join("0x{:02x}".format(ord(c)) for c in the_app_key)
if the_app_key in app_key_list:
print("in app key list")
if _wappkey==1:
_validappkey=1
else:
print("not in app key list")
if _wappkey==1:
_validappkey=0
else:
#we do not check for app key
_validappkey=1
print("but app key disabled")
continue
if (ch == '?' and _ignoreComment==1):
sys.stdin.readline()
continue
sys.stdout.write(ch)
| print("--> DATA encrypted: encrypted payload size is %d" % datalen)
_hasClearData=0
if _aes==1:
print("--> decrypting")
decrypt_handler = AES.new(aes_key, AES.MODE_CBC, aes_iv)
# decrypt
s = decrypt_handler.decrypt(_linebuf)
for i in range(0, len(s)):
print "%.2X " % ord(s[i]),
print "\nEnd"
# get the real (decrypted) payload size
rsize = ord(s[APPKEY_SIZE])
print("--> real payload size is %d" % rsize)
# then add the appkey + the appkey framing bytes
rsize = rsize+APPKEY_SIZE+1
_linebuf = s[:APPKEY_SIZE] + s[APPKEY_SIZE+1:rsize]
for i in range(0, len(_linebuf)):
print "%.2X " % ord(_linebuf[i]),
print "\nEnd"
# normally next read from input will get data from the decrypted _linebuf
print "--> decrypted payload is: ",
print _linebuf[APPKEY_SIZE:]
_hasClearData=1
else:
print("--> DATA encrypted: aes not activated")
# drain stdin of all the encrypted data
enc_data=getAllLine()
print("--> discard encrypted data") | conditional_block |
post_processing_gw.py | #------------------------------------------------------------
# Copyright 2016 Congduc Pham, University of Pau, France.
#
# Congduc.Pham@univ-pau.fr
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------
# IMPORTANT
# Parts that can be modified are identified with
#////////////////////////////////////////////////////////////
# TEXT
# END
#////////////////////////////////////////////////////////////
import sys
import select
import threading
from threading import Timer
import time
import datetime
import getopt
import os
import json
import re
#////////////////////////////////////////////////////////////
# ADD HERE BOOLEAN VARIABLES TO SUPPORT OTHER CLOUDS
# OR VARIABLES FOR YOUR OWN NEEDS
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#with firebase support?
#------------------------------------------------------------
_firebase=False
#------------------------------------------------------------
#with thingspeak support?
#------------------------------------------------------------
_thingspeak=False
#plot snr instead of seq
_thingspeaksnr=False
#------------------------------------------------------------
#with sensorcloud support?
#------------------------------------------------------------
_sensorcloud=False
#------------------------------------------------------------
#with grovestreams support?
#------------------------------------------------------------
_grovestreams=False
#------------------------------------------------------------
#with fiware support?
#------------------------------------------------------------
_fiware=False
#////////////////////////////////////////////////////////////
# ADD HERE APP KEYS THAT YOU WANT TO ALLOW FOR YOUR GATEWAY
#////////////////////////////////////////////////////////////
# NOTE: the format of the application key list has changed from
# a list of list, to a list of string that will be process as
# a byte array. Doing so wilL allow for dictionary construction
# using the appkey to retrieve information such as encryption key,...
app_key_list = [
#for testing
'****',
#change here your application key
'\x01\x02\x03\x04',
'\x05\x06\x07\x08'
]
#////////////////////////////////////////////////////////////
#FOR AES DECRYPTION
#////////////////////////////////////////////////////////////
#put your key here, should match the end-device's key
aes_key="0123456789010123"
#put your initialisation vector here, should match the end-device's initialisation vector
aes_iv="\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
#aes_iv="\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
#association between appkey and aes_key
appkey_aeskey = {
'\x01\x02\x03\x04':"0123456789010123",
'\x05\x06\x07\x08':"0123456789010123"
}
#association between appkey and aes_iv
appkey_aesiv = {
'\x01\x02\x03\x04':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00",
'\x05\x06\x07\x08':"\x9a\xd0\x30\x02\x00\x00\x00\x00\x9a\xd0\x30\x02\x00\x00\x00\x00"
}
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#header packet information
#------------------------------------------------------------
HEADER_SIZE=4
APPKEY_SIZE=4
PKT_TYPE_DATA=0x10
PKT_TYPE_ACK=0x20
PKT_FLAG_ACK_REQ=0x08
PKT_FLAG_DATA_ENCRYPTED=0x04
PKT_FLAG_DATA_WAPPKEY=0x02
PKT_FLAG_DATA_ISBINARY=0x01
#------------------------------------------------------------
#last pkt information
#------------------------------------------------------------
dst=0
ptype=0
ptypestr="N/A"
src=0
seq=0
datalen=0
SNR=0
RSSI=0
bw=0
cr=0
sf=0
#------------------------------------------------------------
#------------------------------------------------------------
#will ignore lines beginning with '?'
#------------------------------------------------------------
_ignoreComment=1
#------------------------------------------------------------
#with mongoDB support?
#------------------------------------------------------------
_mongodb = False
#------------------------------------------------------------
#log gateway message?
#------------------------------------------------------------
_logGateway=0
#------------------------------------------------------------
#raw output from gateway?
#------------------------------------------------------------
_rawFormat=0
#------------------------------------------------------------
_ourcustomFormat=0;
_lorawanFormat=0
#------------------------------------------------------------
#------------------------------------------------------------
#check for app key?
#------------------------------------------------------------
_wappkey=0
#------------------------------------------------------------
the_app_key = '\x00\x00\x00\x00'
#valid app key? by default we do not check for the app key
_validappkey=1
#------------------------------------------------------------
#for local AES decrypting
#------------------------------------------------------------
_aes=0
_hasClearData=0
#------------------------------------------------------------
#open json file to recover gateway_address
#------------------------------------------------------------
f = open(os.path.expanduser("local_conf.json"),"r")
lines = f.readlines()
f.close()
array = ""
#get all the lines in a string
for line in lines :
array += line
#change it into a python array
json_array = json.loads(array)
#set the gateway_address for having different log filenames
_gwaddr = json_array["gateway_conf"]["gateway_ID"]
#////////////////////////////////////////////////////////////
# CHANGE HERE THE VARIOUS PATHS FOR YOUR LOG FILES
#////////////////////////////////////////////////////////////
_folder_path = "/home/pi/Dropbox/LoRa-test/"
_gwlog_filename = _folder_path+"gateway_"+str(_gwaddr)+".log"
_telemetrylog_filename = _folder_path+"telemetry_"+str(_gwaddr)+".log"
# END
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#initialize gateway DHT22 sensor
#------------------------------------------------------------
_gw_dht22 = json_array["gateway_conf"]["dht22"]
_date_save_dht22 = None
if(_gw_dht22):
print "Use DHT22 to get gateway temperature and humidity level"
#read values from dht22 in the gateway box
sys.path.insert(0, os.path.expanduser('./sensors_in_raspi/dht22'))
from read_dht22 import get_dht22_values
_temperature = 0
_humidity = 0
# retrieve dht22 values
def save_dht22_values():
global _temperature, _humidity, _date_save_dht22
_humidity, _temperature = get_dht22_values()
_date_save_dht22 = datetime.datetime.utcnow()
print "Gateway TC : "+_temperature+" C | HU : "+_humidity+" % at "+str(_date_save_dht22)
#save values from the gateway box's DHT22 sensor, if _mongodb is true
if(_mongodb):
#saving data in a JSON var
str_json_data = "{\"th\":"+_temperature+", \"hu\":"+_humidity+"}"
#creating document to add
doc = {
"type" : "DATA_GW_DHT22",
"gateway_eui" : _gwaddr,
"node_eui" : "gw",
"snr" : "",
"rssi" : "",
"cr" : "",
"datarate" : "",
"time" : _date_save_dht22,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
def dht22_target():
while True:
print "Getting gateway temperature"
save_dht22_values()
sys.stdout.flush()
global _gw_dht22
time.sleep(_gw_dht22)
#------------------------------------------------------------
#for managing the input data when we can have aes encryption
#------------------------------------------------------------
_linebuf="the line buffer"
_linebuf_idx=0
_has_linebuf=0
def | ():
global _has_linebuf
# if we have a valid _linebuf then read from _linebuf
if _has_linebuf==1:
global _linebuf_idx
global _linebuf
if _linebuf_idx < len(_linebuf):
_linebuf_idx = _linebuf_idx + 1
return _linebuf[_linebuf_idx-1]
else:
# no more character from _linebuf, so read from stdin
_has_linebuf = 0
return sys.stdin.read(1)
else:
return sys.stdin.read(1)
def getAllLine():
global _linebuf_idx
p=_linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 0
global _linebuf
# return the remaining of the string and clear the _linebuf
return _linebuf[p:]
def fillLinebuf(n):
global _linebuf_idx
_linebuf_idx = 0
global _has_linebuf
_has_linebuf = 1
global _linebuf
# fill in our _linebuf from stdin
_linebuf=sys.stdin.read(n)
#////////////////////////////////////////////////////////////
# ADD HERE OPTIONS THAT YOU MAY WANT TO ADD
# BE CAREFUL, IT IS NOT ADVISED TO REMOVE OPTIONS UNLESS YOU
# REALLY KNOW WHAT YOU ARE DOING
#////////////////////////////////////////////////////////////
#------------------------------------------------------------
#for parsing the options
#------------------------------------------------------------
def main(argv):
try:
opts, args = getopt.getopt(argv,'iftLam:',[\
'ignorecomment',\
'firebase',\
'thingspeak',\
'retrythsk',\
'thingspeaksnr',\
'fiware',\
'sensorcloud',\
'grovestreams',\
'loggw',\
'addr',\
'wappkey',\
'raw',\
'aes',\
'mongodb'])
except getopt.GetoptError:
print 'post_processing_gw '+\
'-i/--ignorecomment '+\
'-f/--firebase '+\
'-t/--thingspeak '+\
'--retrythsk '+\
'--thingspeaksnr '+\
'--fiware '+\
'--sensorcloud '+\
'--grovestreams '+\
'-L/--loggw '+\
'-a/--addr '+\
'--wappkey '+\
'--raw '+\
'--aes '+\
'-m/--mongodb'
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--ignorecomment"):
print("will ignore commented lines")
global _ignoreComment
_ignoreComment = 1
elif opt in ("-f", "--firebase"):
print("will enable firebase support")
global _firebase
_firebase = True
global firebase_uploadSingleData
from FireBase import firebase_uploadSingleData
elif opt in ("-t", "--thingspeak"):
print("will enable thingspeak support")
global _thingspeak
_thingspeak = True
global thingspeak_uploadSingleData, thingspeak_uploadMultipleData
from ThingSpeak import thingspeak_uploadSingleData, thingspeak_uploadMultipleData
elif opt in ("--retrythsk"):
print("will enable thingspeak retry")
global thingspeak_setRetry
from ThingSpeak import thingspeak_setRetry
#set retry to True
thingspeak_setRetry(True)
elif opt in ("--thingspeaksnr"):
print("will plot snr instead of seq")
global _thingspeaksnr
_thingspeaksnr = True
elif opt in ("--fiware"):
print("will enable fiware support")
global _fiware
_fiware = True
elif opt in ("--sensorcloud"):
print("will enable sensorcloud support")
global _sensorcloud
_sensorcloud = True
global sensorcloud_uploadSingleData
from SensorCloud import sensorcloud_uploadSingleData
elif opt in ("--grovestreams"):
print("will enable grovestreams support")
global _grovestreams
_grovestreams = True
global grovestreams_uploadSingleData
from GroveStreams import grovestreams_uploadSingleData
elif opt in ("-L", "--loggw"):
print("will log gateway message prefixed by ^$")
global _logGateway
_logGateway = 1
elif opt in ("-a", "--addr"):
global _gwaddr
_gwaddr = arg
print("overwrite: will use _"+str(_gwaddr)+" for gateway and telemetry log files")
elif opt in ("--wappkey"):
global _wappkey
_wappkey = 1
global _validappkey
_validappkey=0
print("will check for correct app key")
elif opt in ("--raw"):
global _rawFormat
_rawFormat = 1
print("raw output from gateway. post_processing_gw will handle packet format")
elif opt in ("--aes"):
global _aes
_aes = 1
global AES
from Crypto.Cipher import AES
print("enable AES encrypted data")
elif opt in ("-m", "--mongodb"):
print("will enable local MongoDB support, max months to store is "+arg)
global _mongodb
_mongodb = True
global add_document, remove_if_new_month, mongodb_set_max_months
from MongoDB import add_document, remove_if_new_month, mongodb_set_max_months
#setting max months
mongodb_set_max_months(int(arg))
# END
#////////////////////////////////////////////////////////////
if __name__ == "__main__":
main(sys.argv[1:])
#gateway dht22
if (_gw_dht22):
print "Starting thread to measure gateway temperature"
t = threading.Thread(target=dht22_target)
t.daemon = True
t.start()
print "Current working directory: "+os.getcwd()
while True:
sys.stdout.flush()
ch = getSingleChar()
#expected prefixes
# ^p indicates a ctrl pkt info ^pdst(%d),ptype(%d),src(%d),seq(%d),len(%d),SNR(%d),RSSI=(%d) for the last received packet
# example: ^p1,16,3,0,234,8,-45
#
# ^r indicate a ctrl radio info ^rbw,cr,sf for the last received packet
# example: ^r500,5,12
#
# ^$ indicates an output (debug or log purposes) from the gateway that should be logged in the (Dropbox) gateway.log file
# example: ^$Set LoRa mode 4
#
# ^l indicates a ctrl LAS info ^lsrc(%d),type(%d)
# type is 1 for DSP_REG, 2 for DSP_INIT, 3 for DSP_UPDT, 4 for DSP_DATA
# example: ^l3,4
#
# \$ indicates a message that should be logged in the (Dropbox) telemetry.log file
# example: \$hello -> hello will be logged in the following format
# (src=3 seq=0 len=6 SNR=8 RSSI=-54) 2015-10-16T14:47:44.072230> hello
#
# \& indicates a message that should be logged in the firebase cloud database
# example: \&hello -> hello will be logged in json format
#
# \! indicates a message that should be logged on a thingspeak channel
# example: \!SGSH52UGPVAUYG3S#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at default field, i.e. field 1
# \!2#9.4 -> 9.4 will be logged in the default channel at field 2
# \!SGSH52UGPVAUYG3S#2#9.4 -> 9.4 will be logged in the SGSH52UGPVAUYG3S ThingSpeak channel at field 2
#
# you can log other information such as src, seq, len, SNR and RSSI on specific fields
#
# \xFF\xFE indicates radio data prefix
#
#
#------------------------------------------------------------
# '^' is reserved for control information from the gateway
#------------------------------------------------------------
if (ch=='^'):
now = datetime.datetime.utcnow()
ch=sys.stdin.read(1)
if (ch=='p'):
data = sys.stdin.readline()
print now.isoformat()
print "rcv ctrl pkt info (^p): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
dst=arr[0]
ptype=arr[1]
ptypestr="N/A"
if ((ptype & 0xF0)==PKT_TYPE_DATA):
ptypestr="DATA"
if (ptype & PKT_FLAG_DATA_ISBINARY)==PKT_FLAG_DATA_ISBINARY:
ptypestr = ptypestr + " IS_BINARY"
if (ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY:
ptypestr = ptypestr + " WAPPKEY"
if (ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED:
ptypestr = ptypestr + " ENCRYPTED"
if (ptype & PKT_FLAG_ACK_REQ)==PKT_FLAG_ACK_REQ:
ptypestr = ptypestr + " ACK_REQ"
if ((ptype & 0xF0)==PKT_TYPE_ACK):
ptypestr="ACK"
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
if (_rawFormat==0):
info_str="(dst=%d type=0x%.2X(%s) src=%d seq=%d len=%d SNR=%d RSSI=%d)" % (dst,ptype,ptypestr,src,seq,datalen,SNR,RSSI)
else:
info_str="rawFormat(len=%d SNR=%d RSSI=%d)" % (datalen,SNR,RSSI)
print info_str
# TODO: maintain statistics from received messages and periodically add these informations in the gateway.log file
if (ch=='r'):
data = sys.stdin.readline()
print "rcv ctrl radio info (^r): "+data,
arr = map(int,data.split(','))
print "splitted in: ",
print arr
bw=arr[0]
cr=arr[1]
sf=arr[2]
info_str="(BW=%d CR=%d SF=%d)" % (bw,cr,sf)
print info_str
if (ch=='t'):
rcv_timestamp = sys.stdin.readline()
print "rcv timestamp (^t): "+rcv_timestamp
if (ch=='l'):
# TODO: LAS service
print 'not implemented yet'
if (ch=='$' and _logGateway==1):
data = sys.stdin.readline()
print "rcv gw output to log (^$): "+data,
f=open(os.path.expanduser(_gwlog_filename),"a")
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
continue
#------------------------------------------------------------
# '\' is reserved for message logging service
#------------------------------------------------------------
if (ch=='\\'):
now = datetime.datetime.utcnow()
if _validappkey==1:
print 'valid app key: accept data'
ch=getSingleChar()
if (ch=='$'): #log on Dropbox
data = getAllLine()
print "rcv msg to log (\$) on dropbox: "+data,
f=open(os.path.expanduser(_telemetrylog_filename),"a")
f.write(info_str+' ')
f.write(now.isoformat()+'> ')
f.write(data)
f.close()
#/////////////////////////////////////////////////////////////
# YOU CAN MODIFY HERE HOW YOU WANT DATA TO BE PUSHED TO CLOUDS
# WE PROVIDE EXAMPLES FOR THINGSPEAK, GROVESTREAM
# IT IS ADVISED TO USE A SEPERATE PYTHON SCRIPT PER CLOUD
#////////////////////////////////////////////////////////////
elif (ch=='&' and _firebase): #log on Firebase
ldata = getAllLine()
print 'rcv msg to log (\&) on firebase: '+data
firebase_msg = {
'dst':dst,
'type':ptypestr,
'gateway_eui' : _gwaddr,
'node_eui':src,
'seq':seq,
'len':datalen,
'snr':SNR,
'rssi':RSSI,
'cr' : cr,
'datarate' : "SF"+str(sf)+"BW"+str(bw),
'time':now.isoformat(),
'info_str':info_str+' '+now.isoformat()+'> '+ldata,
'data':ldata
}
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#get the data
data = ldata.split('/')
#change data in two arrays : nomenclature_array and value_array
iteration = 0
nomenclature_array = []
value_array = []
while iteration<len(data) :
if (iteration == 0 or iteration%2 == 0) :
nomenclature_array.append(data[iteration])
else :
value_array.append(data[iteration])
iteration += 1
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
iteration = 0
while iteration < len(nomenclature_array) :
#last iteration, do not add "," at the end
if iteration == len(nomenclature_array)-1 :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]
else :
str_json_data += "\""+nomenclature_array[iteration]+"\" : "+value_array[iteration]+", "
iteration += 1
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
sensor_entry='sensor%d'% (src)
msg_entry='msg%d' % (seq)
#upload data to firebase
firebase_uploadSingleData(firebase_msg, sensor_entry, msg_entry, now)
elif (ch=='!'): #log on thingspeak, grovestreams, sensorcloud and connectingnature
ldata = getAllLine()
# get number of '#' separator
nsharp = ldata.count('#')
#no separator
if nsharp==0:
#will use default channel and field
data=['','']
#contains ['', '', "s1", s1value, "s2", s2value, ...]
data_array = data + re.split("/", ldata)
elif nsharp==1:
#only 1 separator
data_array = re.split("#|/", ldata)
#if the first item has length > 1 then we assume that it is a channel write key
if len(data_array[0])>1:
#insert '' to indicate default field
data_array.insert(1,'');
else:
#insert '' to indicate default channel
data_array.insert(0,'');
else:
#contains [channel, field, "s1", s1value, "s2", s2value, ...]
data_array = re.split("#|/", ldata)
#just in case we have an ending CR or 0
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\n', '')
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\0', '')
#test if there are characters at the end of each value, then delete these characters
i = 3
while i < len(data_array) :
while not data_array[i][len(data_array[i])-1].isdigit() :
data_array[i] = data_array[i][:-1]
i += 2
if _mongodb :
#------------------
#saving in MongoDB
#------------------
#check if new month
remove_if_new_month(now)
print("MongoDB: saving the document in the collection...")
#saving data in a JSON var
str_json_data = "{"
#start from the first nomenclature
iteration = 2
while iteration < len(data_array)-1 :
#last iteration, do not add "," at the end
if iteration == len(data_array)-2 :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]
else :
str_json_data += "\""+data_array[iteration]+"\" : "+data_array[iteration+1]+", "
iteration += 2
str_json_data += "}"
#creating document to add
doc = {
"type" : ptypestr,
"gateway_eui" : _gwaddr,
"node_eui" : src,
"snr" : SNR,
"rssi" : RSSI,
"cr" : cr,
"datarate" : "SF"+str(sf)+"BW"+str(bw),
"time" : now,
"data" : json.dumps(json.loads(str_json_data))
}
#adding the document
add_document(doc)
print("MongoDB: saving done")
# get number of '/' separator
nslash = ldata.count('/')
index_first_data = 2
if nslash==0:
# old syntax without nomenclature key
index_first_data=2
else:
# new syntax with nomenclature key
index_first_data=3
#------------------
#test for thingspeak
#------------------
if (_thingspeak):
second_data=str(seq)
if (_thingspeaksnr):
second_data=str(SNR)
#data to send to thingspeak
data = []
data.append(data_array[0]) #channel (if '' default)
data.append(data_array[1]) #field (if '' default)
data.append(data_array[index_first_data]) #value to add (the first sensor value in data_array)
#upload data to thingspeak
#JUST FOR UPLOAD A SINGLE DATA IN A SPECIFIC FIELD AND SECOND DATA
thingspeak_uploadSingleData(data, second_data)
# if you want to upload all data starting at field 1, uncomment next line, and comment previous line
#thingspeak_uploadMultipleData(data_array) # upload all data in the fields
#------------------
#test for FIWARE
#need FIWARE access
#------------------
if (_fiware):
print("FIWARE: upload")
#entity_id = 'test_item_'+now.isoformat()
entity_id = 'sensor%d'% (src)
#send the first sensor value in data_array
cmd = 'python ./fiware_UpdateEntityAttribute.py '+entity_id+' test temperature float '+data_array[index_first_data]
print("FiWare: will issue python script")
print(cmd)
args = cmd.split()
try:
out = subprocess.check_output(args, shell=False)
except subprocess.CalledProcessError:
print("FiWare: python script failed")
if out.find('"reasonPhrase" : "OK"') > 0:
print("FiWare: Entity updated with ENTITY_ID "+entity_id)
else:
print("FiWare: Entity update failed")
#------------------
#test for sensorcloud
#------------------
if (_sensorcloud) :
#send the first sensor value in data_array
sensorcloud_uploadSingleData(data_array[index_first_data])
#------------------
#test for grovestreams
#------------------
if (_grovestreams):
nomenclatures = []
data = []
if nslash==0:
# old syntax without nomemclature key, so insert only one key
nomenclatures.append("temp")
data.append(data_array[index_first_data])
else:
#completing nomenclatures and data
i=2
while i < len(data_array)-1 :
nomenclatures.append(data_array[i])
data.append(data_array[i+1])
i += 2
#upload data to grovestreams
grovestreams_uploadSingleData(nomenclatures, data, str(src))
# END
#////////////////////////////////////////////////////////////
else: # not a known data logging prefix
#you may want to upload to a default service
#so just implement it here
print('unrecognized data logging prefix: discard data')
getAllLine()
else:
print('invalid app key: discard data')
getAllLine()
continue
# handle binary prefixes
if (ch == '\xFF' or ch == '+'):
#if (ch == '\xFF'):
print("got first framing byte")
ch=getSingleChar()
# data prefix for non-encrypted data
if (ch == '\xFE' or ch == '+'):
#if (ch == '\xFE'):
#the data prefix is inserted by the gateway
#do not modify, unless you know what you are doing and that you modify lora_gateway (comment WITH_DATA_PREFIX)
print("--> got data prefix")
#we actually need to use DATA_PREFIX in order to differentiate data from radio coming to the post-processing stage
#if _wappkey is set then we have to first indicate that _validappkey=0
if (_wappkey==1):
_validappkey=0
else:
_validappkey=1
# if we have raw output from gw, then try to determine which kind of packet it is
if (_rawFormat==1):
ch=getSingleChar()
# probably our modified Libelium header where the destination is the gateway
# dissect our modified Libelium format
if ch==1:
dst=ord(ch)
ptype=ord(getSingleChar())
src=ord(getSingleChar())
seq=ord(getSingleChar())
print("Libelium[dst=%d ptype=0x%.2X src=%d seq=%d]" % (dst,ptype,src,seq))
# now we read datalen-4 (the header length) bytes in our line buffer
fillLinebuf(datalen-HEADER_SIZE)
# TODO: dissect LoRaWAN
# you can implement LoRaWAN decoding if this is necessary for your system
# look at the LoRaWAN packet format specification to dissect the packet in detail
#
# LoRaWAN uses the MHDR(1B)
# ----------------------------
# | 7 6 5 | 4 3 2 | 1 0 |
# ----------------------------
# MType RFU major
#
# the main MType is unconfirmed data up which value is 010
if (ch & 0x40)==0x40:
# Do the LoRaWAN decoding
print("LoRaWAN?")
# for the moment just discard the data
fillLinebuf(datalen-1)
getAllLine()
else:
# now we read datalen bytes in our line buffer
fillLinebuf(datalen)
# encrypted data payload?
if ((ptype & PKT_FLAG_DATA_ENCRYPTED)==PKT_FLAG_DATA_ENCRYPTED):
print("--> DATA encrypted: encrypted payload size is %d" % datalen)
_hasClearData=0
if _aes==1:
print("--> decrypting")
decrypt_handler = AES.new(aes_key, AES.MODE_CBC, aes_iv)
# decrypt
s = decrypt_handler.decrypt(_linebuf)
for i in range(0, len(s)):
print "%.2X " % ord(s[i]),
print "\nEnd"
# get the real (decrypted) payload size
rsize = ord(s[APPKEY_SIZE])
print("--> real payload size is %d" % rsize)
# then add the appkey + the appkey framing bytes
rsize = rsize+APPKEY_SIZE+1
_linebuf = s[:APPKEY_SIZE] + s[APPKEY_SIZE+1:rsize]
for i in range(0, len(_linebuf)):
print "%.2X " % ord(_linebuf[i]),
print "\nEnd"
# normally next read from input will get data from the decrypted _linebuf
print "--> decrypted payload is: ",
print _linebuf[APPKEY_SIZE:]
_hasClearData=1
else:
print("--> DATA encrypted: aes not activated")
# drain stdin of all the encrypted data
enc_data=getAllLine()
print("--> discard encrypted data")
else:
_hasClearData=1
# with_appkey?
if ((ptype & PKT_FLAG_DATA_WAPPKEY)==PKT_FLAG_DATA_WAPPKEY and _hasClearData==1):
print("--> DATA with_appkey: read app key sequence")
the_app_key = getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
the_app_key = the_app_key + getSingleChar()
print "app key is ",
print " ".join("0x{:02x}".format(ord(c)) for c in the_app_key)
if the_app_key in app_key_list:
print("in app key list")
if _wappkey==1:
_validappkey=1
else:
print("not in app key list")
if _wappkey==1:
_validappkey=0
else:
#we do not check for app key
_validappkey=1
print("but app key disabled")
continue
if (ch == '?' and _ignoreComment==1):
sys.stdin.readline()
continue
sys.stdout.write(ch)
| getSingleChar | identifier_name |
edid.rs | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of the EDID specification provided by software.
//! EDID spec: <https://glenwing.github.io/docs/VESA-EEDID-A2.pdf>
use std::fmt;
use std::fmt::Debug;
use super::protocol::GpuResponse::*;
use super::protocol::VirtioGpuResult;
const EDID_DATA_LENGTH: usize = 128;
const DEFAULT_HORIZONTAL_BLANKING: u16 = 560;
const DEFAULT_VERTICAL_BLANKING: u16 = 50;
const DEFAULT_HORIZONTAL_FRONT_PORCH: u16 = 64;
const DEFAULT_VERTICAL_FRONT_PORCH: u16 = 1;
const DEFAULT_HORIZONTAL_SYNC_PULSE: u16 = 192;
const DEFAULT_VERTICAL_SYNC_PULSE: u16 = 3;
/// This class is used to create the Extended Display Identification Data (EDID), which will be
/// exposed to the guest system.
///
/// We ignore most of the spec, the point here being for us to provide enough for graphics to work
/// and to allow us to configure the resolution and refresh rate (via the preferred timing mode
/// pixel clock).
///
/// The EDID spec defines a number of methods to provide mode information, but in priority order the
/// "detailed" timing information is first, so we provide a single block of detailed timing
/// information and no other form of timing information.
#[repr(C)]
pub struct EdidBytes {
bytes: [u8; EDID_DATA_LENGTH],
}
impl EdidBytes {
pub fn len(&self) -> usize {
self.bytes.len()
}
pub fn as_bytes(&self) -> &[u8] {
&self.bytes
}
}
impl Debug for EdidBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.bytes[..].fmt(f)
}
}
impl PartialEq for EdidBytes {
fn eq(&self, other: &EdidBytes) -> bool {
self.bytes[..] == other.bytes[..]
}
}
#[derive(Copy, Clone)]
pub struct Resolution {
width: u32,
height: u32,
}
impl Resolution {
fn new(width: u32, height: u32) -> Resolution {
Resolution { width, height }
}
fn get_aspect_ratio(&self) -> (u32, u32) {
let divisor = gcd(self.width, self.height);
(self.width / divisor, self.height / divisor)
}
}
fn gcd(x: u32, y: u32) -> u32 {
match y {
0 => x,
_ => gcd(y, x % y),
}
}
#[derive(Copy, Clone)]
pub struct DisplayInfo {
resolution: Resolution,
refresh_rate: u32,
horizontal_blanking: u16,
vertical_blanking: u16,
horizontal_front: u16,
vertical_front: u16,
horizontal_sync: u16,
vertical_sync: u16,
}
impl DisplayInfo {
/// Only width, height and refresh rate are required for the graphics stack to work, so instead
/// of pulling actual numbers from the system, we just use some typical values to populate other
/// fields for now.
pub fn new(width: u32, height: u32, refresh_rate: u32) -> Self {
Self {
resolution: Resolution::new(width, height),
refresh_rate,
horizontal_blanking: DEFAULT_HORIZONTAL_BLANKING,
vertical_blanking: DEFAULT_VERTICAL_BLANKING,
horizontal_front: DEFAULT_HORIZONTAL_FRONT_PORCH,
vertical_front: DEFAULT_VERTICAL_FRONT_PORCH,
horizontal_sync: DEFAULT_HORIZONTAL_SYNC_PULSE,
vertical_sync: DEFAULT_VERTICAL_SYNC_PULSE,
}
}
pub fn width(&self) -> u32 {
self.resolution.width
}
pub fn height(&self) -> u32 {
self.resolution.height
}
}
impl EdidBytes {
/// Creates a virtual EDID block.
pub fn new(info: &DisplayInfo) -> VirtioGpuResult {
let mut edid: [u8; EDID_DATA_LENGTH] = [0; EDID_DATA_LENGTH];
populate_header(&mut edid);
populate_edid_version(&mut edid);
populate_standard_timings(&mut edid)?;
// 4 available descriptor blocks
let block0 = &mut edid[54..72];
populate_detailed_timing(block0, info);
let block1 = &mut edid[72..90];
populate_display_name(block1);
calculate_checksum(&mut edid);
Ok(OkEdid(Self { bytes: edid }))
}
}
fn populate_display_name(edid_block: &mut [u8]) {
// Display Product Name String Descriptor Tag
edid_block[0..5].clone_from_slice(&[0x00, 0x00, 0x00, 0xFC, 0x00]);
edid_block[5..].clone_from_slice("CrosvmDisplay".as_bytes());
}
fn populate_detailed_timing(edid_block: &mut [u8], info: &DisplayInfo) {
assert_eq!(edid_block.len(), 18);
// Detailed timings
//
// 18 Byte Descriptors - 72 Bytes
// The 72 bytes in this section are divided into four data fields. Each of the four data fields
// are 18 bytes in length. These 18 byte data fields shall contain either detailed timing data
// as described in Section 3.10.2 or other types of data as described in Section 3.10.3. The
// addresses and the contents of the four 18 byte descriptors are shown in Table 3.20.
//
// We leave the bottom 6 bytes of this block purposefully empty.
let horizontal_blanking_lsb: u8 = (info.horizontal_blanking & 0xFF) as u8;
let horizontal_blanking_msb: u8 = ((info.horizontal_blanking >> 8) & 0x0F) as u8;
let vertical_blanking_lsb: u8 = (info.vertical_blanking & 0xFF) as u8;
let vertical_blanking_msb: u8 = ((info.vertical_blanking >> 8) & 0x0F) as u8;
// The pixel clock is what controls the refresh timing information.
//
// The formula for getting refresh rate out of this value is:
// refresh_rate = clk * 10000 / (htotal * vtotal)
// Solving for clk:
// clk = (refresh_rate * htotal * votal) / 10000
//
// where:
// clk - The setting here
// vtotal - Total lines
// htotal - Total pixels per line
//
// Value here is pixel clock + 10,000, in 10khz steps.
//
// Pseudocode of kernel logic for vrefresh:
// vtotal := mode->vtotal;
// calc_val := (clock * 1000) / htotal
// refresh := (calc_val + vtotal / 2) / vtotal
// if flags & INTERLACE: refresh *= 2
// if flags & DBLSCAN: refresh /= 2
// if vscan > 1: refresh /= vscan
//
let htotal = info.width() + (info.horizontal_blanking as u32);
let vtotal = info.height() + (info.vertical_blanking as u32);
let mut clock: u16 = ((info.refresh_rate * htotal * vtotal) / 10000) as u16;
// Round to nearest 10khz.
clock = ((clock + 5) / 10) * 10;
edid_block[0..2].copy_from_slice(&clock.to_le_bytes());
let width_lsb: u8 = (info.width() & 0xFF) as u8;
let width_msb: u8 = ((info.width() >> 8) & 0x0F) as u8;
// Horizointal Addressable Video in pixels.
edid_block[2] = width_lsb;
// Horizontal blanking in pixels.
edid_block[3] = horizontal_blanking_lsb;
// Upper bits of the two above vals.
edid_block[4] = horizontal_blanking_msb | (width_msb << 4) as u8;
let vertical_active: u32 = info.height();
let vertical_active_lsb: u8 = (vertical_active & 0xFF) as u8;
let vertical_active_msb: u8 = ((vertical_active >> 8) & 0x0F) as u8;
// Vertical addressable video in *lines*
edid_block[5] = vertical_active_lsb;
// Vertical blanking in lines
edid_block[6] = vertical_blanking_lsb;
// Sigbits of the above.
edid_block[7] = vertical_blanking_msb | (vertical_active_msb << 4);
let horizontal_front_lsb: u8 = (info.horizontal_front & 0xFF) as u8; // least sig 8 bits
let horizontal_front_msb: u8 = ((info.horizontal_front >> 8) & 0x03) as u8; // most sig 2 bits
let horizontal_sync_lsb: u8 = (info.horizontal_sync & 0xFF) as u8; // least sig 8 bits
let horizontal_sync_msb: u8 = ((info.horizontal_sync >> 8) & 0x03) as u8; // most sig 2 bits
let vertical_front_lsb: u8 = (info.vertical_front & 0x0F) as u8; // least sig 4 bits
let vertical_front_msb: u8 = ((info.vertical_front >> 8) & 0x0F) as u8; // most sig 2 bits
let vertical_sync_lsb: u8 = (info.vertical_sync & 0xFF) as u8; // least sig 4 bits
let vertical_sync_msb: u8 = ((info.vertical_sync >> 8) & 0x0F) as u8; // most sig 2 bits
// Horizontal front porch in pixels.
edid_block[8] = horizontal_front_lsb;
// Horizontal sync pulse width in pixels.
edid_block[9] = horizontal_sync_lsb;
// LSB of vertical front porch and sync pulse
edid_block[10] = vertical_sync_lsb | (vertical_front_lsb << 4);
// Upper 2 bits of these values.
edid_block[11] = vertical_sync_msb
| (vertical_front_msb << 2)
| (horizontal_sync_msb << 4)
| (horizontal_front_msb << 6);
}
// The EDID header. This is defined by the EDID spec.
fn populate_header(edid: &mut [u8]) {
edid[0] = 0x00;
edid[1] = 0xFF;
edid[2] = 0xFF;
edid[3] = 0xFF;
edid[4] = 0xFF;
edid[5] = 0xFF;
edid[6] = 0xFF;
edid[7] = 0x00;
let manufacturer_name: [char; 3] = ['G', 'G', 'L'];
// 00001 -> A, 00010 -> B, etc
let manufacturer_id: u16 = manufacturer_name
.iter()
.map(|c| (*c as u8 - b'A' + 1) & 0x1F)
.fold(0u16, |res, lsb| (res << 5) | (lsb as u16));
edid[8..10].copy_from_slice(&manufacturer_id.to_be_bytes());
let manufacture_product_id: u16 = 1;
edid[10..12].copy_from_slice(&manufacture_product_id.to_le_bytes());
let serial_id: u32 = 1;
edid[12..16].copy_from_slice(&serial_id.to_le_bytes());
let manufacture_week: u8 = 8;
edid[16] = manufacture_week;
let manufacture_year: u32 = 2022;
edid[17] = (manufacture_year - 1990u32) as u8;
}
// The standard timings are 8 timing modes with a lower priority (and different data format)
// than the 4 detailed timing modes.
fn populate_standard_timings(edid: &mut [u8]) -> VirtioGpuResult {
let resolutions = [
Resolution::new(1440, 900),
Resolution::new(1600, 900),
Resolution::new(800, 600),
Resolution::new(1680, 1050),
Resolution::new(1856, 1392),
Resolution::new(1280, 1024),
Resolution::new(1400, 1050),
Resolution::new(1920, 1200),
];
// Index 0 is horizontal pixels / 8 - 31
// Index 1 is a combination of the refresh_rate - 60 (so we are setting to 0, for now) and two
// bits for the aspect ratio.
for (index, r) in resolutions.iter().enumerate() {
edid[0x26 + (index * 2)] = (r.width / 8 - 31) as u8;
let ar_bits = match r.get_aspect_ratio() {
(8, 5) => 0x0,
(4, 3) => 0x1,
(5, 4) => 0x2,
(16, 9) => 0x3,
(x, y) => return Err(ErrEdid(format!("Unsupported aspect ratio: {} {}", x, y))),
};
edid[0x27 + (index * 2)] = ar_bits;
}
Ok(OkNoData)
}
// Per the EDID spec, needs to be 1 and 4.
fn populate_edid_version(edid: &mut [u8]) |
fn calculate_checksum(edid: &mut [u8]) {
let mut checksum: u8 = 0;
for byte in edid.iter().take(EDID_DATA_LENGTH - 1) {
checksum = checksum.wrapping_add(*byte);
}
if checksum != 0 {
checksum = 255 - checksum + 1;
}
edid[127] = checksum;
}
| {
edid[18] = 1;
edid[19] = 4;
} | identifier_body |
edid.rs | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of the EDID specification provided by software.
//! EDID spec: <https://glenwing.github.io/docs/VESA-EEDID-A2.pdf>
use std::fmt;
use std::fmt::Debug;
use super::protocol::GpuResponse::*;
use super::protocol::VirtioGpuResult;
const EDID_DATA_LENGTH: usize = 128;
const DEFAULT_HORIZONTAL_BLANKING: u16 = 560;
const DEFAULT_VERTICAL_BLANKING: u16 = 50;
const DEFAULT_HORIZONTAL_FRONT_PORCH: u16 = 64;
const DEFAULT_VERTICAL_FRONT_PORCH: u16 = 1;
const DEFAULT_HORIZONTAL_SYNC_PULSE: u16 = 192;
const DEFAULT_VERTICAL_SYNC_PULSE: u16 = 3;
/// This class is used to create the Extended Display Identification Data (EDID), which will be
/// exposed to the guest system.
///
/// We ignore most of the spec, the point here being for us to provide enough for graphics to work
/// and to allow us to configure the resolution and refresh rate (via the preferred timing mode
/// pixel clock).
///
/// The EDID spec defines a number of methods to provide mode information, but in priority order the
/// "detailed" timing information is first, so we provide a single block of detailed timing
/// information and no other form of timing information.
#[repr(C)]
pub struct EdidBytes {
bytes: [u8; EDID_DATA_LENGTH],
}
impl EdidBytes {
pub fn len(&self) -> usize {
self.bytes.len()
}
pub fn as_bytes(&self) -> &[u8] {
&self.bytes
}
}
impl Debug for EdidBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.bytes[..].fmt(f)
}
}
impl PartialEq for EdidBytes {
fn eq(&self, other: &EdidBytes) -> bool {
self.bytes[..] == other.bytes[..]
}
}
#[derive(Copy, Clone)]
pub struct Resolution {
width: u32,
height: u32,
}
impl Resolution {
fn | (width: u32, height: u32) -> Resolution {
Resolution { width, height }
}
fn get_aspect_ratio(&self) -> (u32, u32) {
let divisor = gcd(self.width, self.height);
(self.width / divisor, self.height / divisor)
}
}
fn gcd(x: u32, y: u32) -> u32 {
match y {
0 => x,
_ => gcd(y, x % y),
}
}
#[derive(Copy, Clone)]
pub struct DisplayInfo {
resolution: Resolution,
refresh_rate: u32,
horizontal_blanking: u16,
vertical_blanking: u16,
horizontal_front: u16,
vertical_front: u16,
horizontal_sync: u16,
vertical_sync: u16,
}
impl DisplayInfo {
/// Only width, height and refresh rate are required for the graphics stack to work, so instead
/// of pulling actual numbers from the system, we just use some typical values to populate other
/// fields for now.
pub fn new(width: u32, height: u32, refresh_rate: u32) -> Self {
Self {
resolution: Resolution::new(width, height),
refresh_rate,
horizontal_blanking: DEFAULT_HORIZONTAL_BLANKING,
vertical_blanking: DEFAULT_VERTICAL_BLANKING,
horizontal_front: DEFAULT_HORIZONTAL_FRONT_PORCH,
vertical_front: DEFAULT_VERTICAL_FRONT_PORCH,
horizontal_sync: DEFAULT_HORIZONTAL_SYNC_PULSE,
vertical_sync: DEFAULT_VERTICAL_SYNC_PULSE,
}
}
pub fn width(&self) -> u32 {
self.resolution.width
}
pub fn height(&self) -> u32 {
self.resolution.height
}
}
impl EdidBytes {
/// Creates a virtual EDID block.
pub fn new(info: &DisplayInfo) -> VirtioGpuResult {
let mut edid: [u8; EDID_DATA_LENGTH] = [0; EDID_DATA_LENGTH];
populate_header(&mut edid);
populate_edid_version(&mut edid);
populate_standard_timings(&mut edid)?;
// 4 available descriptor blocks
let block0 = &mut edid[54..72];
populate_detailed_timing(block0, info);
let block1 = &mut edid[72..90];
populate_display_name(block1);
calculate_checksum(&mut edid);
Ok(OkEdid(Self { bytes: edid }))
}
}
fn populate_display_name(edid_block: &mut [u8]) {
// Display Product Name String Descriptor Tag
edid_block[0..5].clone_from_slice(&[0x00, 0x00, 0x00, 0xFC, 0x00]);
edid_block[5..].clone_from_slice("CrosvmDisplay".as_bytes());
}
fn populate_detailed_timing(edid_block: &mut [u8], info: &DisplayInfo) {
assert_eq!(edid_block.len(), 18);
// Detailed timings
//
// 18 Byte Descriptors - 72 Bytes
// The 72 bytes in this section are divided into four data fields. Each of the four data fields
// are 18 bytes in length. These 18 byte data fields shall contain either detailed timing data
// as described in Section 3.10.2 or other types of data as described in Section 3.10.3. The
// addresses and the contents of the four 18 byte descriptors are shown in Table 3.20.
//
// We leave the bottom 6 bytes of this block purposefully empty.
let horizontal_blanking_lsb: u8 = (info.horizontal_blanking & 0xFF) as u8;
let horizontal_blanking_msb: u8 = ((info.horizontal_blanking >> 8) & 0x0F) as u8;
let vertical_blanking_lsb: u8 = (info.vertical_blanking & 0xFF) as u8;
let vertical_blanking_msb: u8 = ((info.vertical_blanking >> 8) & 0x0F) as u8;
// The pixel clock is what controls the refresh timing information.
//
// The formula for getting refresh rate out of this value is:
// refresh_rate = clk * 10000 / (htotal * vtotal)
// Solving for clk:
// clk = (refresh_rate * htotal * votal) / 10000
//
// where:
// clk - The setting here
// vtotal - Total lines
// htotal - Total pixels per line
//
// Value here is pixel clock + 10,000, in 10khz steps.
//
// Pseudocode of kernel logic for vrefresh:
// vtotal := mode->vtotal;
// calc_val := (clock * 1000) / htotal
// refresh := (calc_val + vtotal / 2) / vtotal
// if flags & INTERLACE: refresh *= 2
// if flags & DBLSCAN: refresh /= 2
// if vscan > 1: refresh /= vscan
//
let htotal = info.width() + (info.horizontal_blanking as u32);
let vtotal = info.height() + (info.vertical_blanking as u32);
let mut clock: u16 = ((info.refresh_rate * htotal * vtotal) / 10000) as u16;
// Round to nearest 10khz.
clock = ((clock + 5) / 10) * 10;
edid_block[0..2].copy_from_slice(&clock.to_le_bytes());
let width_lsb: u8 = (info.width() & 0xFF) as u8;
let width_msb: u8 = ((info.width() >> 8) & 0x0F) as u8;
// Horizointal Addressable Video in pixels.
edid_block[2] = width_lsb;
// Horizontal blanking in pixels.
edid_block[3] = horizontal_blanking_lsb;
// Upper bits of the two above vals.
edid_block[4] = horizontal_blanking_msb | (width_msb << 4) as u8;
let vertical_active: u32 = info.height();
let vertical_active_lsb: u8 = (vertical_active & 0xFF) as u8;
let vertical_active_msb: u8 = ((vertical_active >> 8) & 0x0F) as u8;
// Vertical addressable video in *lines*
edid_block[5] = vertical_active_lsb;
// Vertical blanking in lines
edid_block[6] = vertical_blanking_lsb;
// Sigbits of the above.
edid_block[7] = vertical_blanking_msb | (vertical_active_msb << 4);
let horizontal_front_lsb: u8 = (info.horizontal_front & 0xFF) as u8; // least sig 8 bits
let horizontal_front_msb: u8 = ((info.horizontal_front >> 8) & 0x03) as u8; // most sig 2 bits
let horizontal_sync_lsb: u8 = (info.horizontal_sync & 0xFF) as u8; // least sig 8 bits
let horizontal_sync_msb: u8 = ((info.horizontal_sync >> 8) & 0x03) as u8; // most sig 2 bits
let vertical_front_lsb: u8 = (info.vertical_front & 0x0F) as u8; // least sig 4 bits
let vertical_front_msb: u8 = ((info.vertical_front >> 8) & 0x0F) as u8; // most sig 2 bits
let vertical_sync_lsb: u8 = (info.vertical_sync & 0xFF) as u8; // least sig 4 bits
let vertical_sync_msb: u8 = ((info.vertical_sync >> 8) & 0x0F) as u8; // most sig 2 bits
// Horizontal front porch in pixels.
edid_block[8] = horizontal_front_lsb;
// Horizontal sync pulse width in pixels.
edid_block[9] = horizontal_sync_lsb;
// LSB of vertical front porch and sync pulse
edid_block[10] = vertical_sync_lsb | (vertical_front_lsb << 4);
// Upper 2 bits of these values.
edid_block[11] = vertical_sync_msb
| (vertical_front_msb << 2)
| (horizontal_sync_msb << 4)
| (horizontal_front_msb << 6);
}
// The EDID header. This is defined by the EDID spec.
fn populate_header(edid: &mut [u8]) {
edid[0] = 0x00;
edid[1] = 0xFF;
edid[2] = 0xFF;
edid[3] = 0xFF;
edid[4] = 0xFF;
edid[5] = 0xFF;
edid[6] = 0xFF;
edid[7] = 0x00;
let manufacturer_name: [char; 3] = ['G', 'G', 'L'];
// 00001 -> A, 00010 -> B, etc
let manufacturer_id: u16 = manufacturer_name
.iter()
.map(|c| (*c as u8 - b'A' + 1) & 0x1F)
.fold(0u16, |res, lsb| (res << 5) | (lsb as u16));
edid[8..10].copy_from_slice(&manufacturer_id.to_be_bytes());
let manufacture_product_id: u16 = 1;
edid[10..12].copy_from_slice(&manufacture_product_id.to_le_bytes());
let serial_id: u32 = 1;
edid[12..16].copy_from_slice(&serial_id.to_le_bytes());
let manufacture_week: u8 = 8;
edid[16] = manufacture_week;
let manufacture_year: u32 = 2022;
edid[17] = (manufacture_year - 1990u32) as u8;
}
// The standard timings are 8 timing modes with a lower priority (and different data format)
// than the 4 detailed timing modes.
fn populate_standard_timings(edid: &mut [u8]) -> VirtioGpuResult {
let resolutions = [
Resolution::new(1440, 900),
Resolution::new(1600, 900),
Resolution::new(800, 600),
Resolution::new(1680, 1050),
Resolution::new(1856, 1392),
Resolution::new(1280, 1024),
Resolution::new(1400, 1050),
Resolution::new(1920, 1200),
];
// Index 0 is horizontal pixels / 8 - 31
// Index 1 is a combination of the refresh_rate - 60 (so we are setting to 0, for now) and two
// bits for the aspect ratio.
for (index, r) in resolutions.iter().enumerate() {
edid[0x26 + (index * 2)] = (r.width / 8 - 31) as u8;
let ar_bits = match r.get_aspect_ratio() {
(8, 5) => 0x0,
(4, 3) => 0x1,
(5, 4) => 0x2,
(16, 9) => 0x3,
(x, y) => return Err(ErrEdid(format!("Unsupported aspect ratio: {} {}", x, y))),
};
edid[0x27 + (index * 2)] = ar_bits;
}
Ok(OkNoData)
}
// Per the EDID spec, needs to be 1 and 4.
fn populate_edid_version(edid: &mut [u8]) {
edid[18] = 1;
edid[19] = 4;
}
fn calculate_checksum(edid: &mut [u8]) {
let mut checksum: u8 = 0;
for byte in edid.iter().take(EDID_DATA_LENGTH - 1) {
checksum = checksum.wrapping_add(*byte);
}
if checksum != 0 {
checksum = 255 - checksum + 1;
}
edid[127] = checksum;
}
| new | identifier_name |
edid.rs | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of the EDID specification provided by software.
//! EDID spec: <https://glenwing.github.io/docs/VESA-EEDID-A2.pdf>
use std::fmt;
use std::fmt::Debug;
use super::protocol::GpuResponse::*;
use super::protocol::VirtioGpuResult;
const EDID_DATA_LENGTH: usize = 128;
const DEFAULT_HORIZONTAL_BLANKING: u16 = 560;
const DEFAULT_VERTICAL_BLANKING: u16 = 50;
const DEFAULT_HORIZONTAL_FRONT_PORCH: u16 = 64;
const DEFAULT_VERTICAL_FRONT_PORCH: u16 = 1;
const DEFAULT_HORIZONTAL_SYNC_PULSE: u16 = 192;
const DEFAULT_VERTICAL_SYNC_PULSE: u16 = 3;
/// This class is used to create the Extended Display Identification Data (EDID), which will be
/// exposed to the guest system.
///
/// We ignore most of the spec, the point here being for us to provide enough for graphics to work
/// and to allow us to configure the resolution and refresh rate (via the preferred timing mode
/// pixel clock).
///
/// The EDID spec defines a number of methods to provide mode information, but in priority order the
/// "detailed" timing information is first, so we provide a single block of detailed timing
/// information and no other form of timing information.
#[repr(C)]
pub struct EdidBytes {
bytes: [u8; EDID_DATA_LENGTH],
}
impl EdidBytes {
pub fn len(&self) -> usize {
self.bytes.len()
}
pub fn as_bytes(&self) -> &[u8] {
&self.bytes
}
}
impl Debug for EdidBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.bytes[..].fmt(f)
}
}
impl PartialEq for EdidBytes {
fn eq(&self, other: &EdidBytes) -> bool {
self.bytes[..] == other.bytes[..]
}
}
#[derive(Copy, Clone)]
pub struct Resolution {
width: u32,
height: u32,
}
impl Resolution {
fn new(width: u32, height: u32) -> Resolution {
Resolution { width, height }
}
fn get_aspect_ratio(&self) -> (u32, u32) {
let divisor = gcd(self.width, self.height);
(self.width / divisor, self.height / divisor)
}
}
fn gcd(x: u32, y: u32) -> u32 {
match y {
0 => x,
_ => gcd(y, x % y),
}
}
#[derive(Copy, Clone)]
pub struct DisplayInfo {
resolution: Resolution,
refresh_rate: u32,
horizontal_blanking: u16,
vertical_blanking: u16,
horizontal_front: u16,
vertical_front: u16,
horizontal_sync: u16,
vertical_sync: u16,
}
impl DisplayInfo {
/// Only width, height and refresh rate are required for the graphics stack to work, so instead
/// of pulling actual numbers from the system, we just use some typical values to populate other
/// fields for now.
pub fn new(width: u32, height: u32, refresh_rate: u32) -> Self {
Self {
resolution: Resolution::new(width, height),
refresh_rate,
horizontal_blanking: DEFAULT_HORIZONTAL_BLANKING,
vertical_blanking: DEFAULT_VERTICAL_BLANKING,
horizontal_front: DEFAULT_HORIZONTAL_FRONT_PORCH,
vertical_front: DEFAULT_VERTICAL_FRONT_PORCH,
horizontal_sync: DEFAULT_HORIZONTAL_SYNC_PULSE,
vertical_sync: DEFAULT_VERTICAL_SYNC_PULSE,
}
}
pub fn width(&self) -> u32 {
self.resolution.width
}
pub fn height(&self) -> u32 {
self.resolution.height
}
}
impl EdidBytes {
/// Creates a virtual EDID block.
pub fn new(info: &DisplayInfo) -> VirtioGpuResult {
let mut edid: [u8; EDID_DATA_LENGTH] = [0; EDID_DATA_LENGTH];
populate_header(&mut edid);
populate_edid_version(&mut edid);
populate_standard_timings(&mut edid)?;
// 4 available descriptor blocks
let block0 = &mut edid[54..72];
populate_detailed_timing(block0, info);
let block1 = &mut edid[72..90];
populate_display_name(block1);
| }
}
fn populate_display_name(edid_block: &mut [u8]) {
// Display Product Name String Descriptor Tag
edid_block[0..5].clone_from_slice(&[0x00, 0x00, 0x00, 0xFC, 0x00]);
edid_block[5..].clone_from_slice("CrosvmDisplay".as_bytes());
}
fn populate_detailed_timing(edid_block: &mut [u8], info: &DisplayInfo) {
assert_eq!(edid_block.len(), 18);
// Detailed timings
//
// 18 Byte Descriptors - 72 Bytes
// The 72 bytes in this section are divided into four data fields. Each of the four data fields
// are 18 bytes in length. These 18 byte data fields shall contain either detailed timing data
// as described in Section 3.10.2 or other types of data as described in Section 3.10.3. The
// addresses and the contents of the four 18 byte descriptors are shown in Table 3.20.
//
// We leave the bottom 6 bytes of this block purposefully empty.
let horizontal_blanking_lsb: u8 = (info.horizontal_blanking & 0xFF) as u8;
let horizontal_blanking_msb: u8 = ((info.horizontal_blanking >> 8) & 0x0F) as u8;
let vertical_blanking_lsb: u8 = (info.vertical_blanking & 0xFF) as u8;
let vertical_blanking_msb: u8 = ((info.vertical_blanking >> 8) & 0x0F) as u8;
// The pixel clock is what controls the refresh timing information.
//
// The formula for getting refresh rate out of this value is:
// refresh_rate = clk * 10000 / (htotal * vtotal)
// Solving for clk:
// clk = (refresh_rate * htotal * votal) / 10000
//
// where:
// clk - The setting here
// vtotal - Total lines
// htotal - Total pixels per line
//
// Value here is pixel clock + 10,000, in 10khz steps.
//
// Pseudocode of kernel logic for vrefresh:
// vtotal := mode->vtotal;
// calc_val := (clock * 1000) / htotal
// refresh := (calc_val + vtotal / 2) / vtotal
// if flags & INTERLACE: refresh *= 2
// if flags & DBLSCAN: refresh /= 2
// if vscan > 1: refresh /= vscan
//
let htotal = info.width() + (info.horizontal_blanking as u32);
let vtotal = info.height() + (info.vertical_blanking as u32);
let mut clock: u16 = ((info.refresh_rate * htotal * vtotal) / 10000) as u16;
// Round to nearest 10khz.
clock = ((clock + 5) / 10) * 10;
edid_block[0..2].copy_from_slice(&clock.to_le_bytes());
let width_lsb: u8 = (info.width() & 0xFF) as u8;
let width_msb: u8 = ((info.width() >> 8) & 0x0F) as u8;
// Horizointal Addressable Video in pixels.
edid_block[2] = width_lsb;
// Horizontal blanking in pixels.
edid_block[3] = horizontal_blanking_lsb;
// Upper bits of the two above vals.
edid_block[4] = horizontal_blanking_msb | (width_msb << 4) as u8;
let vertical_active: u32 = info.height();
let vertical_active_lsb: u8 = (vertical_active & 0xFF) as u8;
let vertical_active_msb: u8 = ((vertical_active >> 8) & 0x0F) as u8;
// Vertical addressable video in *lines*
edid_block[5] = vertical_active_lsb;
// Vertical blanking in lines
edid_block[6] = vertical_blanking_lsb;
// Sigbits of the above.
edid_block[7] = vertical_blanking_msb | (vertical_active_msb << 4);
let horizontal_front_lsb: u8 = (info.horizontal_front & 0xFF) as u8; // least sig 8 bits
let horizontal_front_msb: u8 = ((info.horizontal_front >> 8) & 0x03) as u8; // most sig 2 bits
let horizontal_sync_lsb: u8 = (info.horizontal_sync & 0xFF) as u8; // least sig 8 bits
let horizontal_sync_msb: u8 = ((info.horizontal_sync >> 8) & 0x03) as u8; // most sig 2 bits
let vertical_front_lsb: u8 = (info.vertical_front & 0x0F) as u8; // least sig 4 bits
let vertical_front_msb: u8 = ((info.vertical_front >> 8) & 0x0F) as u8; // most sig 2 bits
let vertical_sync_lsb: u8 = (info.vertical_sync & 0xFF) as u8; // least sig 4 bits
let vertical_sync_msb: u8 = ((info.vertical_sync >> 8) & 0x0F) as u8; // most sig 2 bits
// Horizontal front porch in pixels.
edid_block[8] = horizontal_front_lsb;
// Horizontal sync pulse width in pixels.
edid_block[9] = horizontal_sync_lsb;
// LSB of vertical front porch and sync pulse
edid_block[10] = vertical_sync_lsb | (vertical_front_lsb << 4);
// Upper 2 bits of these values.
edid_block[11] = vertical_sync_msb
| (vertical_front_msb << 2)
| (horizontal_sync_msb << 4)
| (horizontal_front_msb << 6);
}
// The EDID header. This is defined by the EDID spec.
fn populate_header(edid: &mut [u8]) {
edid[0] = 0x00;
edid[1] = 0xFF;
edid[2] = 0xFF;
edid[3] = 0xFF;
edid[4] = 0xFF;
edid[5] = 0xFF;
edid[6] = 0xFF;
edid[7] = 0x00;
let manufacturer_name: [char; 3] = ['G', 'G', 'L'];
// 00001 -> A, 00010 -> B, etc
let manufacturer_id: u16 = manufacturer_name
.iter()
.map(|c| (*c as u8 - b'A' + 1) & 0x1F)
.fold(0u16, |res, lsb| (res << 5) | (lsb as u16));
edid[8..10].copy_from_slice(&manufacturer_id.to_be_bytes());
let manufacture_product_id: u16 = 1;
edid[10..12].copy_from_slice(&manufacture_product_id.to_le_bytes());
let serial_id: u32 = 1;
edid[12..16].copy_from_slice(&serial_id.to_le_bytes());
let manufacture_week: u8 = 8;
edid[16] = manufacture_week;
let manufacture_year: u32 = 2022;
edid[17] = (manufacture_year - 1990u32) as u8;
}
// The standard timings are 8 timing modes with a lower priority (and different data format)
// than the 4 detailed timing modes.
fn populate_standard_timings(edid: &mut [u8]) -> VirtioGpuResult {
let resolutions = [
Resolution::new(1440, 900),
Resolution::new(1600, 900),
Resolution::new(800, 600),
Resolution::new(1680, 1050),
Resolution::new(1856, 1392),
Resolution::new(1280, 1024),
Resolution::new(1400, 1050),
Resolution::new(1920, 1200),
];
// Index 0 is horizontal pixels / 8 - 31
// Index 1 is a combination of the refresh_rate - 60 (so we are setting to 0, for now) and two
// bits for the aspect ratio.
for (index, r) in resolutions.iter().enumerate() {
edid[0x26 + (index * 2)] = (r.width / 8 - 31) as u8;
let ar_bits = match r.get_aspect_ratio() {
(8, 5) => 0x0,
(4, 3) => 0x1,
(5, 4) => 0x2,
(16, 9) => 0x3,
(x, y) => return Err(ErrEdid(format!("Unsupported aspect ratio: {} {}", x, y))),
};
edid[0x27 + (index * 2)] = ar_bits;
}
Ok(OkNoData)
}
// Per the EDID spec, needs to be 1 and 4.
fn populate_edid_version(edid: &mut [u8]) {
edid[18] = 1;
edid[19] = 4;
}
fn calculate_checksum(edid: &mut [u8]) {
let mut checksum: u8 = 0;
for byte in edid.iter().take(EDID_DATA_LENGTH - 1) {
checksum = checksum.wrapping_add(*byte);
}
if checksum != 0 {
checksum = 255 - checksum + 1;
}
edid[127] = checksum;
} | calculate_checksum(&mut edid);
Ok(OkEdid(Self { bytes: edid })) | random_line_split |
edid.rs | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Implementation of the EDID specification provided by software.
//! EDID spec: <https://glenwing.github.io/docs/VESA-EEDID-A2.pdf>
use std::fmt;
use std::fmt::Debug;
use super::protocol::GpuResponse::*;
use super::protocol::VirtioGpuResult;
const EDID_DATA_LENGTH: usize = 128;
const DEFAULT_HORIZONTAL_BLANKING: u16 = 560;
const DEFAULT_VERTICAL_BLANKING: u16 = 50;
const DEFAULT_HORIZONTAL_FRONT_PORCH: u16 = 64;
const DEFAULT_VERTICAL_FRONT_PORCH: u16 = 1;
const DEFAULT_HORIZONTAL_SYNC_PULSE: u16 = 192;
const DEFAULT_VERTICAL_SYNC_PULSE: u16 = 3;
/// This class is used to create the Extended Display Identification Data (EDID), which will be
/// exposed to the guest system.
///
/// We ignore most of the spec, the point here being for us to provide enough for graphics to work
/// and to allow us to configure the resolution and refresh rate (via the preferred timing mode
/// pixel clock).
///
/// The EDID spec defines a number of methods to provide mode information, but in priority order the
/// "detailed" timing information is first, so we provide a single block of detailed timing
/// information and no other form of timing information.
#[repr(C)]
pub struct EdidBytes {
bytes: [u8; EDID_DATA_LENGTH],
}
impl EdidBytes {
pub fn len(&self) -> usize {
self.bytes.len()
}
pub fn as_bytes(&self) -> &[u8] {
&self.bytes
}
}
impl Debug for EdidBytes {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.bytes[..].fmt(f)
}
}
impl PartialEq for EdidBytes {
fn eq(&self, other: &EdidBytes) -> bool {
self.bytes[..] == other.bytes[..]
}
}
#[derive(Copy, Clone)]
pub struct Resolution {
width: u32,
height: u32,
}
impl Resolution {
fn new(width: u32, height: u32) -> Resolution {
Resolution { width, height }
}
fn get_aspect_ratio(&self) -> (u32, u32) {
let divisor = gcd(self.width, self.height);
(self.width / divisor, self.height / divisor)
}
}
fn gcd(x: u32, y: u32) -> u32 {
match y {
0 => x,
_ => gcd(y, x % y),
}
}
#[derive(Copy, Clone)]
pub struct DisplayInfo {
resolution: Resolution,
refresh_rate: u32,
horizontal_blanking: u16,
vertical_blanking: u16,
horizontal_front: u16,
vertical_front: u16,
horizontal_sync: u16,
vertical_sync: u16,
}
impl DisplayInfo {
/// Only width, height and refresh rate are required for the graphics stack to work, so instead
/// of pulling actual numbers from the system, we just use some typical values to populate other
/// fields for now.
pub fn new(width: u32, height: u32, refresh_rate: u32) -> Self {
Self {
resolution: Resolution::new(width, height),
refresh_rate,
horizontal_blanking: DEFAULT_HORIZONTAL_BLANKING,
vertical_blanking: DEFAULT_VERTICAL_BLANKING,
horizontal_front: DEFAULT_HORIZONTAL_FRONT_PORCH,
vertical_front: DEFAULT_VERTICAL_FRONT_PORCH,
horizontal_sync: DEFAULT_HORIZONTAL_SYNC_PULSE,
vertical_sync: DEFAULT_VERTICAL_SYNC_PULSE,
}
}
pub fn width(&self) -> u32 {
self.resolution.width
}
pub fn height(&self) -> u32 {
self.resolution.height
}
}
impl EdidBytes {
/// Creates a virtual EDID block.
pub fn new(info: &DisplayInfo) -> VirtioGpuResult {
let mut edid: [u8; EDID_DATA_LENGTH] = [0; EDID_DATA_LENGTH];
populate_header(&mut edid);
populate_edid_version(&mut edid);
populate_standard_timings(&mut edid)?;
// 4 available descriptor blocks
let block0 = &mut edid[54..72];
populate_detailed_timing(block0, info);
let block1 = &mut edid[72..90];
populate_display_name(block1);
calculate_checksum(&mut edid);
Ok(OkEdid(Self { bytes: edid }))
}
}
fn populate_display_name(edid_block: &mut [u8]) {
// Display Product Name String Descriptor Tag
edid_block[0..5].clone_from_slice(&[0x00, 0x00, 0x00, 0xFC, 0x00]);
edid_block[5..].clone_from_slice("CrosvmDisplay".as_bytes());
}
fn populate_detailed_timing(edid_block: &mut [u8], info: &DisplayInfo) {
assert_eq!(edid_block.len(), 18);
// Detailed timings
//
// 18 Byte Descriptors - 72 Bytes
// The 72 bytes in this section are divided into four data fields. Each of the four data fields
// are 18 bytes in length. These 18 byte data fields shall contain either detailed timing data
// as described in Section 3.10.2 or other types of data as described in Section 3.10.3. The
// addresses and the contents of the four 18 byte descriptors are shown in Table 3.20.
//
// We leave the bottom 6 bytes of this block purposefully empty.
let horizontal_blanking_lsb: u8 = (info.horizontal_blanking & 0xFF) as u8;
let horizontal_blanking_msb: u8 = ((info.horizontal_blanking >> 8) & 0x0F) as u8;
let vertical_blanking_lsb: u8 = (info.vertical_blanking & 0xFF) as u8;
let vertical_blanking_msb: u8 = ((info.vertical_blanking >> 8) & 0x0F) as u8;
// The pixel clock is what controls the refresh timing information.
//
// The formula for getting refresh rate out of this value is:
// refresh_rate = clk * 10000 / (htotal * vtotal)
// Solving for clk:
// clk = (refresh_rate * htotal * votal) / 10000
//
// where:
// clk - The setting here
// vtotal - Total lines
// htotal - Total pixels per line
//
// Value here is pixel clock + 10,000, in 10khz steps.
//
// Pseudocode of kernel logic for vrefresh:
// vtotal := mode->vtotal;
// calc_val := (clock * 1000) / htotal
// refresh := (calc_val + vtotal / 2) / vtotal
// if flags & INTERLACE: refresh *= 2
// if flags & DBLSCAN: refresh /= 2
// if vscan > 1: refresh /= vscan
//
let htotal = info.width() + (info.horizontal_blanking as u32);
let vtotal = info.height() + (info.vertical_blanking as u32);
let mut clock: u16 = ((info.refresh_rate * htotal * vtotal) / 10000) as u16;
// Round to nearest 10khz.
clock = ((clock + 5) / 10) * 10;
edid_block[0..2].copy_from_slice(&clock.to_le_bytes());
let width_lsb: u8 = (info.width() & 0xFF) as u8;
let width_msb: u8 = ((info.width() >> 8) & 0x0F) as u8;
// Horizointal Addressable Video in pixels.
edid_block[2] = width_lsb;
// Horizontal blanking in pixels.
edid_block[3] = horizontal_blanking_lsb;
// Upper bits of the two above vals.
edid_block[4] = horizontal_blanking_msb | (width_msb << 4) as u8;
let vertical_active: u32 = info.height();
let vertical_active_lsb: u8 = (vertical_active & 0xFF) as u8;
let vertical_active_msb: u8 = ((vertical_active >> 8) & 0x0F) as u8;
// Vertical addressable video in *lines*
edid_block[5] = vertical_active_lsb;
// Vertical blanking in lines
edid_block[6] = vertical_blanking_lsb;
// Sigbits of the above.
edid_block[7] = vertical_blanking_msb | (vertical_active_msb << 4);
let horizontal_front_lsb: u8 = (info.horizontal_front & 0xFF) as u8; // least sig 8 bits
let horizontal_front_msb: u8 = ((info.horizontal_front >> 8) & 0x03) as u8; // most sig 2 bits
let horizontal_sync_lsb: u8 = (info.horizontal_sync & 0xFF) as u8; // least sig 8 bits
let horizontal_sync_msb: u8 = ((info.horizontal_sync >> 8) & 0x03) as u8; // most sig 2 bits
let vertical_front_lsb: u8 = (info.vertical_front & 0x0F) as u8; // least sig 4 bits
let vertical_front_msb: u8 = ((info.vertical_front >> 8) & 0x0F) as u8; // most sig 2 bits
let vertical_sync_lsb: u8 = (info.vertical_sync & 0xFF) as u8; // least sig 4 bits
let vertical_sync_msb: u8 = ((info.vertical_sync >> 8) & 0x0F) as u8; // most sig 2 bits
// Horizontal front porch in pixels.
edid_block[8] = horizontal_front_lsb;
// Horizontal sync pulse width in pixels.
edid_block[9] = horizontal_sync_lsb;
// LSB of vertical front porch and sync pulse
edid_block[10] = vertical_sync_lsb | (vertical_front_lsb << 4);
// Upper 2 bits of these values.
edid_block[11] = vertical_sync_msb
| (vertical_front_msb << 2)
| (horizontal_sync_msb << 4)
| (horizontal_front_msb << 6);
}
// The EDID header. This is defined by the EDID spec.
fn populate_header(edid: &mut [u8]) {
edid[0] = 0x00;
edid[1] = 0xFF;
edid[2] = 0xFF;
edid[3] = 0xFF;
edid[4] = 0xFF;
edid[5] = 0xFF;
edid[6] = 0xFF;
edid[7] = 0x00;
let manufacturer_name: [char; 3] = ['G', 'G', 'L'];
// 00001 -> A, 00010 -> B, etc
let manufacturer_id: u16 = manufacturer_name
.iter()
.map(|c| (*c as u8 - b'A' + 1) & 0x1F)
.fold(0u16, |res, lsb| (res << 5) | (lsb as u16));
edid[8..10].copy_from_slice(&manufacturer_id.to_be_bytes());
let manufacture_product_id: u16 = 1;
edid[10..12].copy_from_slice(&manufacture_product_id.to_le_bytes());
let serial_id: u32 = 1;
edid[12..16].copy_from_slice(&serial_id.to_le_bytes());
let manufacture_week: u8 = 8;
edid[16] = manufacture_week;
let manufacture_year: u32 = 2022;
edid[17] = (manufacture_year - 1990u32) as u8;
}
// The standard timings are 8 timing modes with a lower priority (and different data format)
// than the 4 detailed timing modes.
fn populate_standard_timings(edid: &mut [u8]) -> VirtioGpuResult {
let resolutions = [
Resolution::new(1440, 900),
Resolution::new(1600, 900),
Resolution::new(800, 600),
Resolution::new(1680, 1050),
Resolution::new(1856, 1392),
Resolution::new(1280, 1024),
Resolution::new(1400, 1050),
Resolution::new(1920, 1200),
];
// Index 0 is horizontal pixels / 8 - 31
// Index 1 is a combination of the refresh_rate - 60 (so we are setting to 0, for now) and two
// bits for the aspect ratio.
for (index, r) in resolutions.iter().enumerate() {
edid[0x26 + (index * 2)] = (r.width / 8 - 31) as u8;
let ar_bits = match r.get_aspect_ratio() {
(8, 5) => 0x0,
(4, 3) => 0x1,
(5, 4) => 0x2,
(16, 9) => 0x3,
(x, y) => return Err(ErrEdid(format!("Unsupported aspect ratio: {} {}", x, y))),
};
edid[0x27 + (index * 2)] = ar_bits;
}
Ok(OkNoData)
}
// Per the EDID spec, needs to be 1 and 4.
fn populate_edid_version(edid: &mut [u8]) {
edid[18] = 1;
edid[19] = 4;
}
fn calculate_checksum(edid: &mut [u8]) {
let mut checksum: u8 = 0;
for byte in edid.iter().take(EDID_DATA_LENGTH - 1) {
checksum = checksum.wrapping_add(*byte);
}
if checksum != 0 |
edid[127] = checksum;
}
| {
checksum = 255 - checksum + 1;
} | conditional_block |
off.rs | use petgraph::{graph::NodeIndex, visit::Dfs, Graph};
use std::{collections::HashMap, io::Result, path::Path, str::FromStr};
use super::{Abstract, Concrete, Element, ElementList, Point, Polytope, RankVec};
/// Gets the name for an element with a given rank.
fn element_name(rank: isize) -> String {
match super::ELEMENT_NAMES.get(rank as usize) {
Some(&name) => String::from(name),
None => rank.to_string() + "-elements",
}
}
/// Returns an iterator over the OFF file, with all whitespace and comments
/// removed.
fn data_tokens(src: &str) -> impl Iterator<Item = &str> {
let mut comment = false;
str::split(&src, move |c: char| {
if c == '#' {
comment = true;
} else if c == '\n' {
comment = false;
}
comment || c.is_whitespace()
})
.filter(|s| !s.is_empty())
}
/// Reads the next integer or float from the OFF file.
fn next_tok<'a, T>(toks: &mut impl Iterator<Item = &'a str>) -> T
where
T: FromStr,
<T as FromStr>::Err: std::fmt::Debug,
{
toks.next()
.expect("OFF file ended unexpectedly.")
.parse()
.expect("Could not parse number.")
}
/// Gets the number of elements from the OFF file.
/// This includes components iff dim ≤ 2, as this makes things easier down the
/// line.
fn get_el_nums<'a>(rank: isize, toks: &mut impl Iterator<Item = &'a str>) -> Vec<usize> {
let rank = rank as usize;
let mut el_nums = Vec::with_capacity(rank);
// Reads entries one by one.
for _ in 0..rank {
el_nums.push(next_tok(toks));
}
// A point has a single component (itself)
if rank == 0 {
el_nums.push(1);
}
// A dyad has twice as many vertices as components.
else if rank == 1 {
let comps = el_nums[0] / 2;
el_nums.push(comps);
} else {
// A polygon always has as many vertices as edges.
if rank == 2 {
el_nums.push(el_nums[0]);
}
// 2-elements go before 1-elements, we're undoing that.
el_nums.swap(1, 2);
}
el_nums
}
/// Parses all vertex coordinates from the OFF file.
fn pa | a>(
num: usize,
dim: usize,
toks: &mut impl Iterator<Item = &'a str>,
) -> Vec<Point> {
// Reads all vertices.
let mut vertices = Vec::with_capacity(num);
// Add each vertex to the vector.
for _ in 0..num {
let mut vert = Vec::with_capacity(dim);
for _ in 0..dim {
vert.push(next_tok(toks));
}
vertices.push(vert.into());
}
vertices
}
/// Reads the faces from the OFF file and gets the edges and faces from them.
/// Since the OFF file doesn't store edges explicitly, this is harder than reading
/// general elements.
fn parse_edges_and_faces<'a>(
rank: isize,
num_edges: usize,
num_faces: usize,
toks: &mut impl Iterator<Item = &'a str>,
) -> (ElementList, ElementList) {
let mut edges = ElementList::with_capacity(num_edges);
let mut faces = ElementList::with_capacity(num_faces);
let mut hash_edges = HashMap::new();
// Add each face to the element list.
for _ in 0..num_faces {
let face_sub_num = next_tok(toks);
let mut face = Element::new();
let mut face_verts = Vec::with_capacity(face_sub_num);
// Reads all vertices of the face.
for _ in 0..face_sub_num {
face_verts.push(next_tok(toks));
}
// Gets all edges of the face.
for i in 0..face_sub_num {
let mut edge = Element {
subs: vec![face_verts[i], face_verts[(i + 1) % face_sub_num]],
};
edge.subs.sort_unstable();
if let Some(idx) = hash_edges.get(&edge) {
face.subs.push(*idx);
} else {
hash_edges.insert(edge.clone(), edges.len());
face.subs.push(edges.len());
edges.push(edge);
}
}
// If these are truly faces and not just components, we add them.
if rank != 2 {
faces.push(face);
}
}
// If this is a polygon, we add a single maximal element as a face.
if rank == 2 {
faces = ElementList::max(edges.len());
}
// The number of edges in the file should match the number of read edges, though this isn't obligatory.
if edges.len() != num_edges {
println!("Edge count doesn't match expected edge count!");
}
(edges, faces)
}
pub fn parse_els<'a>(num_el: usize, toks: &mut impl Iterator<Item = &'a str>) -> ElementList {
let mut els_subs = ElementList::with_capacity(num_el);
// Adds every d-element to the element list.
for _ in 0..num_el {
let el_sub_num = next_tok(toks);
let mut subs = Vec::with_capacity(el_sub_num);
// Reads all sub-elements of the d-element.
for _ in 0..el_sub_num {
let el_sub = toks.next().expect("OFF file ended unexpectedly.");
subs.push(el_sub.parse().expect("Integer parsing failed!"));
}
els_subs.push(Element { subs });
}
els_subs
}
/// Builds a [`Polytope`] from the string representation of an OFF file.
pub fn from_src(src: String) -> Concrete {
let mut toks = data_tokens(&src);
let rank = {
let first = toks.next().expect("OFF file empty");
let rank = first.strip_suffix("OFF").expect("no \"OFF\" detected");
if rank.is_empty() {
3
} else {
rank.parse()
.expect("could not parse dimension as an integer")
}
};
// Deals with dumb degenerate cases.
if rank == -1 {
return Concrete::nullitope();
} else if rank == 0 {
return Concrete::point();
} else if rank == 1 {
return Concrete::dyad();
}
let num_elems = get_el_nums(rank, &mut toks);
let vertices = parse_vertices(num_elems[0], rank as usize, &mut toks);
let mut abs = Abstract::with_rank(rank);
// Adds nullitope and vertices.
abs.push_min();
abs.push_vertices(vertices.len());
// Reads edges and faces.
if rank >= 2 {
let (edges, faces) = parse_edges_and_faces(rank, num_elems[1], num_elems[2], &mut toks);
abs.push(edges);
abs.push(faces);
}
// Adds all higher elements.
for &num_el in num_elems.iter().take(rank as usize).skip(3) {
abs.push(parse_els(num_el, &mut toks));
}
// Caps the abstract polytope, returns the concrete one.
if rank != 2 {
abs.push_max();
}
Concrete { vertices, abs }
}
/// Loads a polytope from a file path.
pub fn from_path(fp: &impl AsRef<Path>) -> Result<Concrete> {
Ok(from_src(String::from_utf8(std::fs::read(fp)?).unwrap()))
}
/// A set of options to be used when saving the OFF file.
#[derive(Clone, Copy)]
pub struct OffOptions {
/// Whether the OFF file should have comments specifying each face type.
pub comments: bool,
}
impl Default for OffOptions {
fn default() -> Self {
OffOptions { comments: true }
}
}
fn write_el_counts(off: &mut String, opt: &OffOptions, mut el_counts: RankVec<usize>) {
let rank = el_counts.rank();
// # Vertices, Faces, Edges, ...
if opt.comments {
off.push_str("\n# Vertices");
let mut element_names = Vec::with_capacity((rank - 1) as usize);
for r in 1..rank {
element_names.push(element_name(r));
}
if element_names.len() >= 2 {
element_names.swap(0, 1);
}
for element_name in element_names {
off.push_str(", ");
off.push_str(&element_name);
}
off.push('\n');
}
// Swaps edges and faces, because OFF format bad.
if rank >= 3 {
el_counts.swap(1, 2);
}
for r in 0..rank {
off.push_str(&el_counts[r].to_string());
off.push(' ');
}
off.push('\n');
}
/// Writes the vertices of a polytope into an OFF file.
fn write_vertices(off: &mut String, opt: &OffOptions, vertices: &[Point]) {
// # Vertices
if opt.comments {
off.push_str("\n# ");
off.push_str(&element_name(0));
off.push('\n');
}
// Adds the coordinates.
for v in vertices {
for c in v.into_iter() {
off.push_str(&c.to_string());
off.push(' ');
}
off.push('\n');
}
}
/// Gets and writes the faces of a polytope into an OFF file.
fn write_faces(
off: &mut String,
opt: &OffOptions,
rank: usize,
edges: &ElementList,
faces: &ElementList,
) {
// # Faces
if opt.comments {
let el_name = if rank > 2 {
element_name(2)
} else {
super::COMPONENTS.to_string()
};
off.push_str("\n# ");
off.push_str(&el_name);
off.push('\n');
}
// TODO: write components instead of faces in 2D case.
for face in faces.iter() {
off.push_str(&face.subs.len().to_string());
// Maps an OFF index into a graph index.
let mut hash_edges = HashMap::new();
let mut graph = Graph::new_undirected();
// Maps the vertex indices to consecutive integers from 0.
for &edge_idx in &face.subs {
let edge = &edges[edge_idx];
let mut hash_edge = Vec::with_capacity(2);
for &vertex_idx in &edge.subs {
match hash_edges.get(&vertex_idx) {
Some(&idx) => hash_edge.push(idx),
None => {
let idx = hash_edges.len();
hash_edges.insert(vertex_idx, idx);
hash_edge.push(idx);
graph.add_node(vertex_idx);
}
}
}
}
// There should be as many graph indices as edges on the face.
// Otherwise, something went wrong.
debug_assert_eq!(
hash_edges.len(),
face.subs.len(),
"Faces don't have the same number of edges as there are in the polytope!"
);
// Adds the edges to the graph.
for &edge_idx in &face.subs {
let edge = &edges[edge_idx];
graph.add_edge(
NodeIndex::new(*hash_edges.get(&edge.subs[0]).unwrap()),
NodeIndex::new(*hash_edges.get(&edge.subs[1]).unwrap()),
(),
);
}
// Retrieves the cycle of vertices.
let mut dfs = Dfs::new(&graph, NodeIndex::new(0));
while let Some(nx) = dfs.next(&graph) {
off.push(' ');
off.push_str(&graph[nx].to_string());
}
off.push('\n');
}
}
/// Writes the n-elements of a polytope into an OFF file.
fn write_els(off: &mut String, opt: &OffOptions, rank: isize, els: &[Element]) {
// # n-elements
if opt.comments {
off.push_str("\n# ");
off.push_str(&element_name(rank));
off.push('\n');
}
// Adds the elements' indices.
for el in els {
off.push_str(&el.subs.len().to_string());
for &sub in &el.subs {
off.push(' ');
off.push_str(&sub.to_string());
}
off.push('\n');
}
}
/// Converts a polytope into an OFF file.
impl Concrete {
pub fn to_src(&self, opt: OffOptions) -> String {
let rank = self.rank();
let vertices = &self.vertices;
let abs = &self.abs;
let mut off = String::new();
// Blatant advertising.
if opt.comments {
off += &format!(
"# Generated using Miratope v{} (https://github.com/OfficialURL/miratope-rs)\n",
env!("CARGO_PKG_VERSION")
);
}
// Writes header.
if rank != 3 {
off += &rank.to_string();
}
off += "OFF\n";
// If we have a nullitope or point on our hands, that is all.
if rank < 1 {
return off;
}
// Adds the element counts.
write_el_counts(&mut off, &opt, self.el_counts());
// Adds vertex coordinates.
write_vertices(&mut off, &opt, vertices);
// Adds faces.
if rank >= 2 {
write_faces(&mut off, &opt, rank as usize, &abs[1], &abs[2]);
}
// Adds the rest of the elements.
for r in 3..rank {
write_els(&mut off, &opt, r, &abs[r]);
}
off
}
/// Writes a polytope's OFF file in a specified file path.
pub fn to_path(&self, fp: &impl AsRef<Path>, opt: OffOptions) -> Result<()> {
std::fs::write(fp, self.to_src(opt))
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Used to test a particular polytope.
fn test_shape(p: Concrete, el_nums: Vec<usize>) {
// Checks that element counts match up.
assert_eq!(p.el_counts().0, el_nums);
// Checks that the polytope can be reloaded correctly.
assert_eq!(
from_src(p.to_src(OffOptions::default())).el_counts().0,
el_nums
);
}
#[test]
/// Checks that a point has the correct amount of elements.
fn point_nums() {
let point = from_src("0OFF".to_string());
test_shape(point, vec![1, 1])
}
#[test]
/// Checks that a dyad has the correct amount of elements.
fn dyad_nums() {
let dyad = from_src("1OFF 2 -1 1 0 1".to_string());
test_shape(dyad, vec![1, 2, 1])
}
/*
#[test]
/// Checks that a hexagon has the correct amount of elements.
fn hig_nums() {
let hig =from_src(
"2OFF 6 1 1 0 0.5 0.8660254037844386 -0.5 0.8660254037844386 -1 0 -0.5 -0.8660254037844386 0.5 -0.8660254037844386 6 0 1 2 3 4 5".to_string()
);
test_shape(hig, vec![1, 6, 6, 1])
}
#[test]
/// Checks that a hexagram has the correct amount of elements.
fn shig_nums() {
let shig: Concrete = from_src(
"2OFF 6 2 1 0 0.5 0.8660254037844386 -0.5 0.8660254037844386 -1 0 -0.5 -0.8660254037844386 0.5 -0.8660254037844386 3 0 2 4 3 1 3 5".to_string()
).into();
test_shape(shig, vec![1, 6, 6, 1])
}
*/
#[test]
/// Checks that a tetrahedron has the correct amount of elements.
fn tet_nums() {
let tet = from_src(
"OFF 4 4 6 1 1 1 1 -1 -1 -1 1 -1 -1 -1 1 3 0 1 2 3 3 0 2 3 0 1 3 3 3 1 2".to_string(),
);
test_shape(tet, vec![1, 4, 6, 4, 1])
}
#[test]
/// Checks that a 2-tetrahedron compund has the correct amount of elements.
fn so_nums() {
let so = from_src(
"OFF 8 8 12 1 1 1 1 -1 -1 -1 1 -1 -1 -1 1 -1 -1 -1 -1 1 1 1 -1 1 1 1 -1 3 0 1 2 3 3 0 2 3 0 1 3 3 3 1 2 3 4 5 6 3 7 4 6 3 4 5 7 3 7 5 6 ".to_string(),
);
test_shape(so, vec![1, 8, 12, 8, 1])
}
#[test]
/// Checks that a pentachoron has the correct amount of elements.
fn pen_nums() {
let pen = from_src(
"4OFF 5 10 10 5 0.158113883008419 0.204124145231932 0.288675134594813 0.5 0.158113883008419 0.204124145231932 0.288675134594813 -0.5 0.158113883008419 0.204124145231932 -0.577350269189626 0 0.158113883008419 -0.612372435695794 0 0 -0.632455532033676 0 0 0 3 0 3 4 3 0 2 4 3 2 3 4 3 0 2 3 3 0 1 4 3 1 3 4 3 0 1 3 3 1 2 4 3 0 1 2 3 1 2 3 4 0 1 2 3 4 0 4 5 6 4 1 4 7 8 4 2 5 7 9 4 3 6 8 9"
.to_string(),
);
test_shape(pen, vec![1, 5, 10, 10, 5, 1])
}
#[test]
/// Checks that comments are correctly parsed.
fn comments() {
let tet = from_src(
"# So
OFF # this
4 4 6 # is
# a # test # of
1 1 1 # the 1234 5678
1 -1 -1 # comment 987
-1 1 -1 # removal 654
-1 -1 1 # system 321
3 0 1 2 #let #us #see
3 3 0 2# if
3 0 1 3#it
3 3 1 2#works!#"
.to_string(),
);
test_shape(tet, vec![1, 4, 6, 4, 1])
}
#[test]
#[should_panic(expected = "OFF file empty")]
fn empty() {
Concrete::from(from_src("".to_string()));
}
#[test]
#[should_panic(expected = "no \"OFF\" detected")]
fn magic_num() {
Concrete::from(from_src("foo bar".to_string()));
}
}
| rse_vertices<' | identifier_name |
off.rs | use petgraph::{graph::NodeIndex, visit::Dfs, Graph};
use std::{collections::HashMap, io::Result, path::Path, str::FromStr};
use super::{Abstract, Concrete, Element, ElementList, Point, Polytope, RankVec};
/// Gets the name for an element with a given rank.
fn element_name(rank: isize) -> String {
match super::ELEMENT_NAMES.get(rank as usize) {
Some(&name) => String::from(name),
None => rank.to_string() + "-elements",
}
}
/// Returns an iterator over the OFF file, with all whitespace and comments
/// removed.
fn data_tokens(src: &str) -> impl Iterator<Item = &str> {
let mut comment = false;
str::split(&src, move |c: char| {
if c == '#' {
comment = true;
} else if c == '\n' {
comment = false;
}
comment || c.is_whitespace()
})
.filter(|s| !s.is_empty())
}
/// Reads the next integer or float from the OFF file.
fn next_tok<'a, T>(toks: &mut impl Iterator<Item = &'a str>) -> T
where
T: FromStr,
<T as FromStr>::Err: std::fmt::Debug,
{
toks.next()
.expect("OFF file ended unexpectedly.")
.parse()
.expect("Could not parse number.")
}
/// Gets the number of elements from the OFF file.
/// This includes components iff dim ≤ 2, as this makes things easier down the
/// line.
fn get_el_nums<'a>(rank: isize, toks: &mut impl Iterator<Item = &'a str>) -> Vec<usize> {
let rank = rank as usize;
let mut el_nums = Vec::with_capacity(rank);
// Reads entries one by one.
for _ in 0..rank {
el_nums.push(next_tok(toks));
}
// A point has a single component (itself)
if rank == 0 {
el_nums.push(1);
}
// A dyad has twice as many vertices as components.
else if rank == 1 {
let comps = el_nums[0] / 2;
el_nums.push(comps);
} else {
// A polygon always has as many vertices as edges.
if rank == 2 {
el_nums.push(el_nums[0]);
}
// 2-elements go before 1-elements, we're undoing that.
el_nums.swap(1, 2);
}
el_nums
}
/// Parses all vertex coordinates from the OFF file.
fn parse_vertices<'a>(
num: usize,
dim: usize,
toks: &mut impl Iterator<Item = &'a str>,
) -> Vec<Point> {
// Reads all vertices.
let mut vertices = Vec::with_capacity(num);
// Add each vertex to the vector.
for _ in 0..num {
let mut vert = Vec::with_capacity(dim);
for _ in 0..dim {
vert.push(next_tok(toks));
}
vertices.push(vert.into());
}
vertices
}
/// Reads the faces from the OFF file and gets the edges and faces from them.
/// Since the OFF file doesn't store edges explicitly, this is harder than reading
/// general elements.
fn parse_edges_and_faces<'a>(
rank: isize,
num_edges: usize,
num_faces: usize,
toks: &mut impl Iterator<Item = &'a str>,
) -> (ElementList, ElementList) {
let mut edges = ElementList::with_capacity(num_edges);
let mut faces = ElementList::with_capacity(num_faces);
let mut hash_edges = HashMap::new();
// Add each face to the element list.
for _ in 0..num_faces {
let face_sub_num = next_tok(toks);
let mut face = Element::new();
let mut face_verts = Vec::with_capacity(face_sub_num);
// Reads all vertices of the face.
for _ in 0..face_sub_num {
face_verts.push(next_tok(toks));
}
// Gets all edges of the face.
for i in 0..face_sub_num {
let mut edge = Element {
subs: vec![face_verts[i], face_verts[(i + 1) % face_sub_num]],
};
edge.subs.sort_unstable();
if let Some(idx) = hash_edges.get(&edge) {
face.subs.push(*idx);
} else {
hash_edges.insert(edge.clone(), edges.len());
face.subs.push(edges.len());
edges.push(edge);
}
}
// If these are truly faces and not just components, we add them.
if rank != 2 {
faces.push(face);
}
}
// If this is a polygon, we add a single maximal element as a face.
if rank == 2 {
faces = ElementList::max(edges.len());
}
// The number of edges in the file should match the number of read edges, though this isn't obligatory.
if edges.len() != num_edges {
println!("Edge count doesn't match expected edge count!");
}
(edges, faces)
}
pub fn parse_els<'a>(num_el: usize, toks: &mut impl Iterator<Item = &'a str>) -> ElementList {
let mut els_subs = ElementList::with_capacity(num_el);
// Adds every d-element to the element list.
for _ in 0..num_el {
let el_sub_num = next_tok(toks);
let mut subs = Vec::with_capacity(el_sub_num);
// Reads all sub-elements of the d-element.
for _ in 0..el_sub_num {
let el_sub = toks.next().expect("OFF file ended unexpectedly.");
subs.push(el_sub.parse().expect("Integer parsing failed!"));
}
els_subs.push(Element { subs });
}
els_subs
}
/// Builds a [`Polytope`] from the string representation of an OFF file.
pub fn from_src(src: String) -> Concrete {
let mut toks = data_tokens(&src);
let rank = {
let first = toks.next().expect("OFF file empty");
let rank = first.strip_suffix("OFF").expect("no \"OFF\" detected");
if rank.is_empty() {
3
} else {
rank.parse()
.expect("could not parse dimension as an integer")
}
};
// Deals with dumb degenerate cases.
if rank == -1 {
return Concrete::nullitope();
} else if rank == 0 {
return Concrete::point();
} else if rank == 1 {
return Concrete::dyad();
}
let num_elems = get_el_nums(rank, &mut toks);
let vertices = parse_vertices(num_elems[0], rank as usize, &mut toks);
let mut abs = Abstract::with_rank(rank);
// Adds nullitope and vertices.
abs.push_min();
abs.push_vertices(vertices.len());
// Reads edges and faces.
if rank >= 2 {
let (edges, faces) = parse_edges_and_faces(rank, num_elems[1], num_elems[2], &mut toks);
abs.push(edges);
abs.push(faces);
}
// Adds all higher elements.
for &num_el in num_elems.iter().take(rank as usize).skip(3) {
abs.push(parse_els(num_el, &mut toks));
}
// Caps the abstract polytope, returns the concrete one.
if rank != 2 {
abs.push_max();
}
Concrete { vertices, abs }
}
/// Loads a polytope from a file path.
pub fn from_path(fp: &impl AsRef<Path>) -> Result<Concrete> {
| /// A set of options to be used when saving the OFF file.
#[derive(Clone, Copy)]
pub struct OffOptions {
/// Whether the OFF file should have comments specifying each face type.
pub comments: bool,
}
impl Default for OffOptions {
fn default() -> Self {
OffOptions { comments: true }
}
}
fn write_el_counts(off: &mut String, opt: &OffOptions, mut el_counts: RankVec<usize>) {
let rank = el_counts.rank();
// # Vertices, Faces, Edges, ...
if opt.comments {
off.push_str("\n# Vertices");
let mut element_names = Vec::with_capacity((rank - 1) as usize);
for r in 1..rank {
element_names.push(element_name(r));
}
if element_names.len() >= 2 {
element_names.swap(0, 1);
}
for element_name in element_names {
off.push_str(", ");
off.push_str(&element_name);
}
off.push('\n');
}
// Swaps edges and faces, because OFF format bad.
if rank >= 3 {
el_counts.swap(1, 2);
}
for r in 0..rank {
off.push_str(&el_counts[r].to_string());
off.push(' ');
}
off.push('\n');
}
/// Writes the vertices of a polytope into an OFF file.
fn write_vertices(off: &mut String, opt: &OffOptions, vertices: &[Point]) {
// # Vertices
if opt.comments {
off.push_str("\n# ");
off.push_str(&element_name(0));
off.push('\n');
}
// Adds the coordinates.
for v in vertices {
for c in v.into_iter() {
off.push_str(&c.to_string());
off.push(' ');
}
off.push('\n');
}
}
/// Gets and writes the faces of a polytope into an OFF file.
fn write_faces(
off: &mut String,
opt: &OffOptions,
rank: usize,
edges: &ElementList,
faces: &ElementList,
) {
// # Faces
if opt.comments {
let el_name = if rank > 2 {
element_name(2)
} else {
super::COMPONENTS.to_string()
};
off.push_str("\n# ");
off.push_str(&el_name);
off.push('\n');
}
// TODO: write components instead of faces in 2D case.
for face in faces.iter() {
off.push_str(&face.subs.len().to_string());
// Maps an OFF index into a graph index.
let mut hash_edges = HashMap::new();
let mut graph = Graph::new_undirected();
// Maps the vertex indices to consecutive integers from 0.
for &edge_idx in &face.subs {
let edge = &edges[edge_idx];
let mut hash_edge = Vec::with_capacity(2);
for &vertex_idx in &edge.subs {
match hash_edges.get(&vertex_idx) {
Some(&idx) => hash_edge.push(idx),
None => {
let idx = hash_edges.len();
hash_edges.insert(vertex_idx, idx);
hash_edge.push(idx);
graph.add_node(vertex_idx);
}
}
}
}
// There should be as many graph indices as edges on the face.
// Otherwise, something went wrong.
debug_assert_eq!(
hash_edges.len(),
face.subs.len(),
"Faces don't have the same number of edges as there are in the polytope!"
);
// Adds the edges to the graph.
for &edge_idx in &face.subs {
let edge = &edges[edge_idx];
graph.add_edge(
NodeIndex::new(*hash_edges.get(&edge.subs[0]).unwrap()),
NodeIndex::new(*hash_edges.get(&edge.subs[1]).unwrap()),
(),
);
}
// Retrieves the cycle of vertices.
let mut dfs = Dfs::new(&graph, NodeIndex::new(0));
while let Some(nx) = dfs.next(&graph) {
off.push(' ');
off.push_str(&graph[nx].to_string());
}
off.push('\n');
}
}
/// Writes the n-elements of a polytope into an OFF file.
fn write_els(off: &mut String, opt: &OffOptions, rank: isize, els: &[Element]) {
// # n-elements
if opt.comments {
off.push_str("\n# ");
off.push_str(&element_name(rank));
off.push('\n');
}
// Adds the elements' indices.
for el in els {
off.push_str(&el.subs.len().to_string());
for &sub in &el.subs {
off.push(' ');
off.push_str(&sub.to_string());
}
off.push('\n');
}
}
/// Converts a polytope into an OFF file.
impl Concrete {
pub fn to_src(&self, opt: OffOptions) -> String {
let rank = self.rank();
let vertices = &self.vertices;
let abs = &self.abs;
let mut off = String::new();
// Blatant advertising.
if opt.comments {
off += &format!(
"# Generated using Miratope v{} (https://github.com/OfficialURL/miratope-rs)\n",
env!("CARGO_PKG_VERSION")
);
}
// Writes header.
if rank != 3 {
off += &rank.to_string();
}
off += "OFF\n";
// If we have a nullitope or point on our hands, that is all.
if rank < 1 {
return off;
}
// Adds the element counts.
write_el_counts(&mut off, &opt, self.el_counts());
// Adds vertex coordinates.
write_vertices(&mut off, &opt, vertices);
// Adds faces.
if rank >= 2 {
write_faces(&mut off, &opt, rank as usize, &abs[1], &abs[2]);
}
// Adds the rest of the elements.
for r in 3..rank {
write_els(&mut off, &opt, r, &abs[r]);
}
off
}
/// Writes a polytope's OFF file in a specified file path.
pub fn to_path(&self, fp: &impl AsRef<Path>, opt: OffOptions) -> Result<()> {
std::fs::write(fp, self.to_src(opt))
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Used to test a particular polytope.
fn test_shape(p: Concrete, el_nums: Vec<usize>) {
// Checks that element counts match up.
assert_eq!(p.el_counts().0, el_nums);
// Checks that the polytope can be reloaded correctly.
assert_eq!(
from_src(p.to_src(OffOptions::default())).el_counts().0,
el_nums
);
}
#[test]
/// Checks that a point has the correct amount of elements.
fn point_nums() {
let point = from_src("0OFF".to_string());
test_shape(point, vec![1, 1])
}
#[test]
/// Checks that a dyad has the correct amount of elements.
fn dyad_nums() {
let dyad = from_src("1OFF 2 -1 1 0 1".to_string());
test_shape(dyad, vec![1, 2, 1])
}
/*
#[test]
/// Checks that a hexagon has the correct amount of elements.
fn hig_nums() {
let hig =from_src(
"2OFF 6 1 1 0 0.5 0.8660254037844386 -0.5 0.8660254037844386 -1 0 -0.5 -0.8660254037844386 0.5 -0.8660254037844386 6 0 1 2 3 4 5".to_string()
);
test_shape(hig, vec![1, 6, 6, 1])
}
#[test]
/// Checks that a hexagram has the correct amount of elements.
fn shig_nums() {
let shig: Concrete = from_src(
"2OFF 6 2 1 0 0.5 0.8660254037844386 -0.5 0.8660254037844386 -1 0 -0.5 -0.8660254037844386 0.5 -0.8660254037844386 3 0 2 4 3 1 3 5".to_string()
).into();
test_shape(shig, vec![1, 6, 6, 1])
}
*/
#[test]
/// Checks that a tetrahedron has the correct amount of elements.
fn tet_nums() {
let tet = from_src(
"OFF 4 4 6 1 1 1 1 -1 -1 -1 1 -1 -1 -1 1 3 0 1 2 3 3 0 2 3 0 1 3 3 3 1 2".to_string(),
);
test_shape(tet, vec![1, 4, 6, 4, 1])
}
#[test]
/// Checks that a 2-tetrahedron compund has the correct amount of elements.
fn so_nums() {
let so = from_src(
"OFF 8 8 12 1 1 1 1 -1 -1 -1 1 -1 -1 -1 1 -1 -1 -1 -1 1 1 1 -1 1 1 1 -1 3 0 1 2 3 3 0 2 3 0 1 3 3 3 1 2 3 4 5 6 3 7 4 6 3 4 5 7 3 7 5 6 ".to_string(),
);
test_shape(so, vec![1, 8, 12, 8, 1])
}
#[test]
/// Checks that a pentachoron has the correct amount of elements.
fn pen_nums() {
let pen = from_src(
"4OFF 5 10 10 5 0.158113883008419 0.204124145231932 0.288675134594813 0.5 0.158113883008419 0.204124145231932 0.288675134594813 -0.5 0.158113883008419 0.204124145231932 -0.577350269189626 0 0.158113883008419 -0.612372435695794 0 0 -0.632455532033676 0 0 0 3 0 3 4 3 0 2 4 3 2 3 4 3 0 2 3 3 0 1 4 3 1 3 4 3 0 1 3 3 1 2 4 3 0 1 2 3 1 2 3 4 0 1 2 3 4 0 4 5 6 4 1 4 7 8 4 2 5 7 9 4 3 6 8 9"
.to_string(),
);
test_shape(pen, vec![1, 5, 10, 10, 5, 1])
}
#[test]
/// Checks that comments are correctly parsed.
fn comments() {
let tet = from_src(
"# So
OFF # this
4 4 6 # is
# a # test # of
1 1 1 # the 1234 5678
1 -1 -1 # comment 987
-1 1 -1 # removal 654
-1 -1 1 # system 321
3 0 1 2 #let #us #see
3 3 0 2# if
3 0 1 3#it
3 3 1 2#works!#"
.to_string(),
);
test_shape(tet, vec![1, 4, 6, 4, 1])
}
#[test]
#[should_panic(expected = "OFF file empty")]
fn empty() {
Concrete::from(from_src("".to_string()));
}
#[test]
#[should_panic(expected = "no \"OFF\" detected")]
fn magic_num() {
Concrete::from(from_src("foo bar".to_string()));
}
}
| Ok(from_src(String::from_utf8(std::fs::read(fp)?).unwrap()))
}
| identifier_body |
off.rs | use petgraph::{graph::NodeIndex, visit::Dfs, Graph};
use std::{collections::HashMap, io::Result, path::Path, str::FromStr};
use super::{Abstract, Concrete, Element, ElementList, Point, Polytope, RankVec};
/// Gets the name for an element with a given rank.
fn element_name(rank: isize) -> String {
match super::ELEMENT_NAMES.get(rank as usize) {
Some(&name) => String::from(name),
None => rank.to_string() + "-elements",
}
}
/// Returns an iterator over the OFF file, with all whitespace and comments
/// removed.
fn data_tokens(src: &str) -> impl Iterator<Item = &str> {
let mut comment = false;
str::split(&src, move |c: char| {
if c == '#' | else if c == '\n' {
comment = false;
}
comment || c.is_whitespace()
})
.filter(|s| !s.is_empty())
}
/// Reads the next integer or float from the OFF file.
fn next_tok<'a, T>(toks: &mut impl Iterator<Item = &'a str>) -> T
where
T: FromStr,
<T as FromStr>::Err: std::fmt::Debug,
{
toks.next()
.expect("OFF file ended unexpectedly.")
.parse()
.expect("Could not parse number.")
}
/// Gets the number of elements from the OFF file.
/// This includes components iff dim ≤ 2, as this makes things easier down the
/// line.
fn get_el_nums<'a>(rank: isize, toks: &mut impl Iterator<Item = &'a str>) -> Vec<usize> {
let rank = rank as usize;
let mut el_nums = Vec::with_capacity(rank);
// Reads entries one by one.
for _ in 0..rank {
el_nums.push(next_tok(toks));
}
// A point has a single component (itself)
if rank == 0 {
el_nums.push(1);
}
// A dyad has twice as many vertices as components.
else if rank == 1 {
let comps = el_nums[0] / 2;
el_nums.push(comps);
} else {
// A polygon always has as many vertices as edges.
if rank == 2 {
el_nums.push(el_nums[0]);
}
// 2-elements go before 1-elements, we're undoing that.
el_nums.swap(1, 2);
}
el_nums
}
/// Parses all vertex coordinates from the OFF file.
fn parse_vertices<'a>(
num: usize,
dim: usize,
toks: &mut impl Iterator<Item = &'a str>,
) -> Vec<Point> {
// Reads all vertices.
let mut vertices = Vec::with_capacity(num);
// Add each vertex to the vector.
for _ in 0..num {
let mut vert = Vec::with_capacity(dim);
for _ in 0..dim {
vert.push(next_tok(toks));
}
vertices.push(vert.into());
}
vertices
}
/// Reads the faces from the OFF file and gets the edges and faces from them.
/// Since the OFF file doesn't store edges explicitly, this is harder than reading
/// general elements.
fn parse_edges_and_faces<'a>(
rank: isize,
num_edges: usize,
num_faces: usize,
toks: &mut impl Iterator<Item = &'a str>,
) -> (ElementList, ElementList) {
let mut edges = ElementList::with_capacity(num_edges);
let mut faces = ElementList::with_capacity(num_faces);
let mut hash_edges = HashMap::new();
// Add each face to the element list.
for _ in 0..num_faces {
let face_sub_num = next_tok(toks);
let mut face = Element::new();
let mut face_verts = Vec::with_capacity(face_sub_num);
// Reads all vertices of the face.
for _ in 0..face_sub_num {
face_verts.push(next_tok(toks));
}
// Gets all edges of the face.
for i in 0..face_sub_num {
let mut edge = Element {
subs: vec![face_verts[i], face_verts[(i + 1) % face_sub_num]],
};
edge.subs.sort_unstable();
if let Some(idx) = hash_edges.get(&edge) {
face.subs.push(*idx);
} else {
hash_edges.insert(edge.clone(), edges.len());
face.subs.push(edges.len());
edges.push(edge);
}
}
// If these are truly faces and not just components, we add them.
if rank != 2 {
faces.push(face);
}
}
// If this is a polygon, we add a single maximal element as a face.
if rank == 2 {
faces = ElementList::max(edges.len());
}
// The number of edges in the file should match the number of read edges, though this isn't obligatory.
if edges.len() != num_edges {
println!("Edge count doesn't match expected edge count!");
}
(edges, faces)
}
pub fn parse_els<'a>(num_el: usize, toks: &mut impl Iterator<Item = &'a str>) -> ElementList {
let mut els_subs = ElementList::with_capacity(num_el);
// Adds every d-element to the element list.
for _ in 0..num_el {
let el_sub_num = next_tok(toks);
let mut subs = Vec::with_capacity(el_sub_num);
// Reads all sub-elements of the d-element.
for _ in 0..el_sub_num {
let el_sub = toks.next().expect("OFF file ended unexpectedly.");
subs.push(el_sub.parse().expect("Integer parsing failed!"));
}
els_subs.push(Element { subs });
}
els_subs
}
/// Builds a [`Polytope`] from the string representation of an OFF file.
pub fn from_src(src: String) -> Concrete {
let mut toks = data_tokens(&src);
let rank = {
let first = toks.next().expect("OFF file empty");
let rank = first.strip_suffix("OFF").expect("no \"OFF\" detected");
if rank.is_empty() {
3
} else {
rank.parse()
.expect("could not parse dimension as an integer")
}
};
// Deals with dumb degenerate cases.
if rank == -1 {
return Concrete::nullitope();
} else if rank == 0 {
return Concrete::point();
} else if rank == 1 {
return Concrete::dyad();
}
let num_elems = get_el_nums(rank, &mut toks);
let vertices = parse_vertices(num_elems[0], rank as usize, &mut toks);
let mut abs = Abstract::with_rank(rank);
// Adds nullitope and vertices.
abs.push_min();
abs.push_vertices(vertices.len());
// Reads edges and faces.
if rank >= 2 {
let (edges, faces) = parse_edges_and_faces(rank, num_elems[1], num_elems[2], &mut toks);
abs.push(edges);
abs.push(faces);
}
// Adds all higher elements.
for &num_el in num_elems.iter().take(rank as usize).skip(3) {
abs.push(parse_els(num_el, &mut toks));
}
// Caps the abstract polytope, returns the concrete one.
if rank != 2 {
abs.push_max();
}
Concrete { vertices, abs }
}
/// Loads a polytope from a file path.
pub fn from_path(fp: &impl AsRef<Path>) -> Result<Concrete> {
Ok(from_src(String::from_utf8(std::fs::read(fp)?).unwrap()))
}
/// A set of options to be used when saving the OFF file.
#[derive(Clone, Copy)]
pub struct OffOptions {
/// Whether the OFF file should have comments specifying each face type.
pub comments: bool,
}
impl Default for OffOptions {
fn default() -> Self {
OffOptions { comments: true }
}
}
fn write_el_counts(off: &mut String, opt: &OffOptions, mut el_counts: RankVec<usize>) {
let rank = el_counts.rank();
// # Vertices, Faces, Edges, ...
if opt.comments {
off.push_str("\n# Vertices");
let mut element_names = Vec::with_capacity((rank - 1) as usize);
for r in 1..rank {
element_names.push(element_name(r));
}
if element_names.len() >= 2 {
element_names.swap(0, 1);
}
for element_name in element_names {
off.push_str(", ");
off.push_str(&element_name);
}
off.push('\n');
}
// Swaps edges and faces, because OFF format bad.
if rank >= 3 {
el_counts.swap(1, 2);
}
for r in 0..rank {
off.push_str(&el_counts[r].to_string());
off.push(' ');
}
off.push('\n');
}
/// Writes the vertices of a polytope into an OFF file.
fn write_vertices(off: &mut String, opt: &OffOptions, vertices: &[Point]) {
// # Vertices
if opt.comments {
off.push_str("\n# ");
off.push_str(&element_name(0));
off.push('\n');
}
// Adds the coordinates.
for v in vertices {
for c in v.into_iter() {
off.push_str(&c.to_string());
off.push(' ');
}
off.push('\n');
}
}
/// Gets and writes the faces of a polytope into an OFF file.
fn write_faces(
off: &mut String,
opt: &OffOptions,
rank: usize,
edges: &ElementList,
faces: &ElementList,
) {
// # Faces
if opt.comments {
let el_name = if rank > 2 {
element_name(2)
} else {
super::COMPONENTS.to_string()
};
off.push_str("\n# ");
off.push_str(&el_name);
off.push('\n');
}
// TODO: write components instead of faces in 2D case.
for face in faces.iter() {
off.push_str(&face.subs.len().to_string());
// Maps an OFF index into a graph index.
let mut hash_edges = HashMap::new();
let mut graph = Graph::new_undirected();
// Maps the vertex indices to consecutive integers from 0.
for &edge_idx in &face.subs {
let edge = &edges[edge_idx];
let mut hash_edge = Vec::with_capacity(2);
for &vertex_idx in &edge.subs {
match hash_edges.get(&vertex_idx) {
Some(&idx) => hash_edge.push(idx),
None => {
let idx = hash_edges.len();
hash_edges.insert(vertex_idx, idx);
hash_edge.push(idx);
graph.add_node(vertex_idx);
}
}
}
}
// There should be as many graph indices as edges on the face.
// Otherwise, something went wrong.
debug_assert_eq!(
hash_edges.len(),
face.subs.len(),
"Faces don't have the same number of edges as there are in the polytope!"
);
// Adds the edges to the graph.
for &edge_idx in &face.subs {
let edge = &edges[edge_idx];
graph.add_edge(
NodeIndex::new(*hash_edges.get(&edge.subs[0]).unwrap()),
NodeIndex::new(*hash_edges.get(&edge.subs[1]).unwrap()),
(),
);
}
// Retrieves the cycle of vertices.
let mut dfs = Dfs::new(&graph, NodeIndex::new(0));
while let Some(nx) = dfs.next(&graph) {
off.push(' ');
off.push_str(&graph[nx].to_string());
}
off.push('\n');
}
}
/// Writes the n-elements of a polytope into an OFF file.
fn write_els(off: &mut String, opt: &OffOptions, rank: isize, els: &[Element]) {
// # n-elements
if opt.comments {
off.push_str("\n# ");
off.push_str(&element_name(rank));
off.push('\n');
}
// Adds the elements' indices.
for el in els {
off.push_str(&el.subs.len().to_string());
for &sub in &el.subs {
off.push(' ');
off.push_str(&sub.to_string());
}
off.push('\n');
}
}
/// Converts a polytope into an OFF file.
impl Concrete {
pub fn to_src(&self, opt: OffOptions) -> String {
let rank = self.rank();
let vertices = &self.vertices;
let abs = &self.abs;
let mut off = String::new();
// Blatant advertising.
if opt.comments {
off += &format!(
"# Generated using Miratope v{} (https://github.com/OfficialURL/miratope-rs)\n",
env!("CARGO_PKG_VERSION")
);
}
// Writes header.
if rank != 3 {
off += &rank.to_string();
}
off += "OFF\n";
// If we have a nullitope or point on our hands, that is all.
if rank < 1 {
return off;
}
// Adds the element counts.
write_el_counts(&mut off, &opt, self.el_counts());
// Adds vertex coordinates.
write_vertices(&mut off, &opt, vertices);
// Adds faces.
if rank >= 2 {
write_faces(&mut off, &opt, rank as usize, &abs[1], &abs[2]);
}
// Adds the rest of the elements.
for r in 3..rank {
write_els(&mut off, &opt, r, &abs[r]);
}
off
}
/// Writes a polytope's OFF file in a specified file path.
pub fn to_path(&self, fp: &impl AsRef<Path>, opt: OffOptions) -> Result<()> {
std::fs::write(fp, self.to_src(opt))
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Used to test a particular polytope.
fn test_shape(p: Concrete, el_nums: Vec<usize>) {
// Checks that element counts match up.
assert_eq!(p.el_counts().0, el_nums);
// Checks that the polytope can be reloaded correctly.
assert_eq!(
from_src(p.to_src(OffOptions::default())).el_counts().0,
el_nums
);
}
#[test]
/// Checks that a point has the correct amount of elements.
fn point_nums() {
let point = from_src("0OFF".to_string());
test_shape(point, vec![1, 1])
}
#[test]
/// Checks that a dyad has the correct amount of elements.
fn dyad_nums() {
let dyad = from_src("1OFF 2 -1 1 0 1".to_string());
test_shape(dyad, vec![1, 2, 1])
}
/*
#[test]
/// Checks that a hexagon has the correct amount of elements.
fn hig_nums() {
let hig =from_src(
"2OFF 6 1 1 0 0.5 0.8660254037844386 -0.5 0.8660254037844386 -1 0 -0.5 -0.8660254037844386 0.5 -0.8660254037844386 6 0 1 2 3 4 5".to_string()
);
test_shape(hig, vec![1, 6, 6, 1])
}
#[test]
/// Checks that a hexagram has the correct amount of elements.
fn shig_nums() {
let shig: Concrete = from_src(
"2OFF 6 2 1 0 0.5 0.8660254037844386 -0.5 0.8660254037844386 -1 0 -0.5 -0.8660254037844386 0.5 -0.8660254037844386 3 0 2 4 3 1 3 5".to_string()
).into();
test_shape(shig, vec![1, 6, 6, 1])
}
*/
#[test]
/// Checks that a tetrahedron has the correct amount of elements.
fn tet_nums() {
let tet = from_src(
"OFF 4 4 6 1 1 1 1 -1 -1 -1 1 -1 -1 -1 1 3 0 1 2 3 3 0 2 3 0 1 3 3 3 1 2".to_string(),
);
test_shape(tet, vec![1, 4, 6, 4, 1])
}
#[test]
/// Checks that a 2-tetrahedron compund has the correct amount of elements.
fn so_nums() {
let so = from_src(
"OFF 8 8 12 1 1 1 1 -1 -1 -1 1 -1 -1 -1 1 -1 -1 -1 -1 1 1 1 -1 1 1 1 -1 3 0 1 2 3 3 0 2 3 0 1 3 3 3 1 2 3 4 5 6 3 7 4 6 3 4 5 7 3 7 5 6 ".to_string(),
);
test_shape(so, vec![1, 8, 12, 8, 1])
}
#[test]
/// Checks that a pentachoron has the correct amount of elements.
fn pen_nums() {
let pen = from_src(
"4OFF 5 10 10 5 0.158113883008419 0.204124145231932 0.288675134594813 0.5 0.158113883008419 0.204124145231932 0.288675134594813 -0.5 0.158113883008419 0.204124145231932 -0.577350269189626 0 0.158113883008419 -0.612372435695794 0 0 -0.632455532033676 0 0 0 3 0 3 4 3 0 2 4 3 2 3 4 3 0 2 3 3 0 1 4 3 1 3 4 3 0 1 3 3 1 2 4 3 0 1 2 3 1 2 3 4 0 1 2 3 4 0 4 5 6 4 1 4 7 8 4 2 5 7 9 4 3 6 8 9"
.to_string(),
);
test_shape(pen, vec![1, 5, 10, 10, 5, 1])
}
#[test]
/// Checks that comments are correctly parsed.
fn comments() {
let tet = from_src(
"# So
OFF # this
4 4 6 # is
# a # test # of
1 1 1 # the 1234 5678
1 -1 -1 # comment 987
-1 1 -1 # removal 654
-1 -1 1 # system 321
3 0 1 2 #let #us #see
3 3 0 2# if
3 0 1 3#it
3 3 1 2#works!#"
.to_string(),
);
test_shape(tet, vec![1, 4, 6, 4, 1])
}
#[test]
#[should_panic(expected = "OFF file empty")]
fn empty() {
Concrete::from(from_src("".to_string()));
}
#[test]
#[should_panic(expected = "no \"OFF\" detected")]
fn magic_num() {
Concrete::from(from_src("foo bar".to_string()));
}
}
| {
comment = true;
} | conditional_block |
off.rs | use petgraph::{graph::NodeIndex, visit::Dfs, Graph};
use std::{collections::HashMap, io::Result, path::Path, str::FromStr};
use super::{Abstract, Concrete, Element, ElementList, Point, Polytope, RankVec};
/// Gets the name for an element with a given rank.
fn element_name(rank: isize) -> String {
match super::ELEMENT_NAMES.get(rank as usize) {
Some(&name) => String::from(name),
None => rank.to_string() + "-elements",
}
}
/// Returns an iterator over the OFF file, with all whitespace and comments
/// removed.
fn data_tokens(src: &str) -> impl Iterator<Item = &str> {
let mut comment = false;
str::split(&src, move |c: char| {
if c == '#' {
comment = true;
} else if c == '\n' {
comment = false;
}
comment || c.is_whitespace()
})
.filter(|s| !s.is_empty())
}
/// Reads the next integer or float from the OFF file.
fn next_tok<'a, T>(toks: &mut impl Iterator<Item = &'a str>) -> T
where
T: FromStr,
<T as FromStr>::Err: std::fmt::Debug,
{
toks.next()
.expect("OFF file ended unexpectedly.")
.parse()
.expect("Could not parse number.")
}
/// Gets the number of elements from the OFF file.
/// This includes components iff dim ≤ 2, as this makes things easier down the
/// line.
fn get_el_nums<'a>(rank: isize, toks: &mut impl Iterator<Item = &'a str>) -> Vec<usize> {
let rank = rank as usize;
let mut el_nums = Vec::with_capacity(rank);
// Reads entries one by one.
for _ in 0..rank {
el_nums.push(next_tok(toks));
}
// A point has a single component (itself)
if rank == 0 {
el_nums.push(1);
}
// A dyad has twice as many vertices as components.
else if rank == 1 {
let comps = el_nums[0] / 2;
el_nums.push(comps);
} else {
// A polygon always has as many vertices as edges.
if rank == 2 {
el_nums.push(el_nums[0]);
}
// 2-elements go before 1-elements, we're undoing that.
el_nums.swap(1, 2);
}
el_nums
}
/// Parses all vertex coordinates from the OFF file.
fn parse_vertices<'a>(
num: usize,
dim: usize,
toks: &mut impl Iterator<Item = &'a str>,
) -> Vec<Point> {
// Reads all vertices.
let mut vertices = Vec::with_capacity(num);
// Add each vertex to the vector.
for _ in 0..num {
let mut vert = Vec::with_capacity(dim);
for _ in 0..dim {
vert.push(next_tok(toks));
}
vertices.push(vert.into());
}
vertices
}
/// Reads the faces from the OFF file and gets the edges and faces from them.
/// Since the OFF file doesn't store edges explicitly, this is harder than reading
/// general elements.
fn parse_edges_and_faces<'a>(
rank: isize,
num_edges: usize,
num_faces: usize,
toks: &mut impl Iterator<Item = &'a str>,
) -> (ElementList, ElementList) {
let mut edges = ElementList::with_capacity(num_edges);
let mut faces = ElementList::with_capacity(num_faces);
let mut hash_edges = HashMap::new();
// Add each face to the element list.
for _ in 0..num_faces {
let face_sub_num = next_tok(toks);
let mut face = Element::new();
let mut face_verts = Vec::with_capacity(face_sub_num);
// Reads all vertices of the face.
for _ in 0..face_sub_num {
face_verts.push(next_tok(toks));
}
// Gets all edges of the face.
for i in 0..face_sub_num {
let mut edge = Element {
subs: vec![face_verts[i], face_verts[(i + 1) % face_sub_num]],
};
edge.subs.sort_unstable();
if let Some(idx) = hash_edges.get(&edge) {
face.subs.push(*idx);
} else {
hash_edges.insert(edge.clone(), edges.len());
face.subs.push(edges.len());
edges.push(edge);
}
}
// If these are truly faces and not just components, we add them.
if rank != 2 {
faces.push(face);
}
}
// If this is a polygon, we add a single maximal element as a face.
if rank == 2 {
faces = ElementList::max(edges.len());
}
// The number of edges in the file should match the number of read edges, though this isn't obligatory.
if edges.len() != num_edges {
println!("Edge count doesn't match expected edge count!");
}
(edges, faces)
}
pub fn parse_els<'a>(num_el: usize, toks: &mut impl Iterator<Item = &'a str>) -> ElementList {
let mut els_subs = ElementList::with_capacity(num_el);
// Adds every d-element to the element list.
for _ in 0..num_el { |
// Reads all sub-elements of the d-element.
for _ in 0..el_sub_num {
let el_sub = toks.next().expect("OFF file ended unexpectedly.");
subs.push(el_sub.parse().expect("Integer parsing failed!"));
}
els_subs.push(Element { subs });
}
els_subs
}
/// Builds a [`Polytope`] from the string representation of an OFF file.
pub fn from_src(src: String) -> Concrete {
let mut toks = data_tokens(&src);
let rank = {
let first = toks.next().expect("OFF file empty");
let rank = first.strip_suffix("OFF").expect("no \"OFF\" detected");
if rank.is_empty() {
3
} else {
rank.parse()
.expect("could not parse dimension as an integer")
}
};
// Deals with dumb degenerate cases.
if rank == -1 {
return Concrete::nullitope();
} else if rank == 0 {
return Concrete::point();
} else if rank == 1 {
return Concrete::dyad();
}
let num_elems = get_el_nums(rank, &mut toks);
let vertices = parse_vertices(num_elems[0], rank as usize, &mut toks);
let mut abs = Abstract::with_rank(rank);
// Adds nullitope and vertices.
abs.push_min();
abs.push_vertices(vertices.len());
// Reads edges and faces.
if rank >= 2 {
let (edges, faces) = parse_edges_and_faces(rank, num_elems[1], num_elems[2], &mut toks);
abs.push(edges);
abs.push(faces);
}
// Adds all higher elements.
for &num_el in num_elems.iter().take(rank as usize).skip(3) {
abs.push(parse_els(num_el, &mut toks));
}
// Caps the abstract polytope, returns the concrete one.
if rank != 2 {
abs.push_max();
}
Concrete { vertices, abs }
}
/// Loads a polytope from a file path.
pub fn from_path(fp: &impl AsRef<Path>) -> Result<Concrete> {
Ok(from_src(String::from_utf8(std::fs::read(fp)?).unwrap()))
}
/// A set of options to be used when saving the OFF file.
#[derive(Clone, Copy)]
pub struct OffOptions {
/// Whether the OFF file should have comments specifying each face type.
pub comments: bool,
}
impl Default for OffOptions {
fn default() -> Self {
OffOptions { comments: true }
}
}
fn write_el_counts(off: &mut String, opt: &OffOptions, mut el_counts: RankVec<usize>) {
let rank = el_counts.rank();
// # Vertices, Faces, Edges, ...
if opt.comments {
off.push_str("\n# Vertices");
let mut element_names = Vec::with_capacity((rank - 1) as usize);
for r in 1..rank {
element_names.push(element_name(r));
}
if element_names.len() >= 2 {
element_names.swap(0, 1);
}
for element_name in element_names {
off.push_str(", ");
off.push_str(&element_name);
}
off.push('\n');
}
// Swaps edges and faces, because OFF format bad.
if rank >= 3 {
el_counts.swap(1, 2);
}
for r in 0..rank {
off.push_str(&el_counts[r].to_string());
off.push(' ');
}
off.push('\n');
}
/// Writes the vertices of a polytope into an OFF file.
fn write_vertices(off: &mut String, opt: &OffOptions, vertices: &[Point]) {
// # Vertices
if opt.comments {
off.push_str("\n# ");
off.push_str(&element_name(0));
off.push('\n');
}
// Adds the coordinates.
for v in vertices {
for c in v.into_iter() {
off.push_str(&c.to_string());
off.push(' ');
}
off.push('\n');
}
}
/// Gets and writes the faces of a polytope into an OFF file.
fn write_faces(
off: &mut String,
opt: &OffOptions,
rank: usize,
edges: &ElementList,
faces: &ElementList,
) {
// # Faces
if opt.comments {
let el_name = if rank > 2 {
element_name(2)
} else {
super::COMPONENTS.to_string()
};
off.push_str("\n# ");
off.push_str(&el_name);
off.push('\n');
}
// TODO: write components instead of faces in 2D case.
for face in faces.iter() {
off.push_str(&face.subs.len().to_string());
// Maps an OFF index into a graph index.
let mut hash_edges = HashMap::new();
let mut graph = Graph::new_undirected();
// Maps the vertex indices to consecutive integers from 0.
for &edge_idx in &face.subs {
let edge = &edges[edge_idx];
let mut hash_edge = Vec::with_capacity(2);
for &vertex_idx in &edge.subs {
match hash_edges.get(&vertex_idx) {
Some(&idx) => hash_edge.push(idx),
None => {
let idx = hash_edges.len();
hash_edges.insert(vertex_idx, idx);
hash_edge.push(idx);
graph.add_node(vertex_idx);
}
}
}
}
// There should be as many graph indices as edges on the face.
// Otherwise, something went wrong.
debug_assert_eq!(
hash_edges.len(),
face.subs.len(),
"Faces don't have the same number of edges as there are in the polytope!"
);
// Adds the edges to the graph.
for &edge_idx in &face.subs {
let edge = &edges[edge_idx];
graph.add_edge(
NodeIndex::new(*hash_edges.get(&edge.subs[0]).unwrap()),
NodeIndex::new(*hash_edges.get(&edge.subs[1]).unwrap()),
(),
);
}
// Retrieves the cycle of vertices.
let mut dfs = Dfs::new(&graph, NodeIndex::new(0));
while let Some(nx) = dfs.next(&graph) {
off.push(' ');
off.push_str(&graph[nx].to_string());
}
off.push('\n');
}
}
/// Writes the n-elements of a polytope into an OFF file.
fn write_els(off: &mut String, opt: &OffOptions, rank: isize, els: &[Element]) {
// # n-elements
if opt.comments {
off.push_str("\n# ");
off.push_str(&element_name(rank));
off.push('\n');
}
// Adds the elements' indices.
for el in els {
off.push_str(&el.subs.len().to_string());
for &sub in &el.subs {
off.push(' ');
off.push_str(&sub.to_string());
}
off.push('\n');
}
}
/// Converts a polytope into an OFF file.
impl Concrete {
pub fn to_src(&self, opt: OffOptions) -> String {
let rank = self.rank();
let vertices = &self.vertices;
let abs = &self.abs;
let mut off = String::new();
// Blatant advertising.
if opt.comments {
off += &format!(
"# Generated using Miratope v{} (https://github.com/OfficialURL/miratope-rs)\n",
env!("CARGO_PKG_VERSION")
);
}
// Writes header.
if rank != 3 {
off += &rank.to_string();
}
off += "OFF\n";
// If we have a nullitope or point on our hands, that is all.
if rank < 1 {
return off;
}
// Adds the element counts.
write_el_counts(&mut off, &opt, self.el_counts());
// Adds vertex coordinates.
write_vertices(&mut off, &opt, vertices);
// Adds faces.
if rank >= 2 {
write_faces(&mut off, &opt, rank as usize, &abs[1], &abs[2]);
}
// Adds the rest of the elements.
for r in 3..rank {
write_els(&mut off, &opt, r, &abs[r]);
}
off
}
/// Writes a polytope's OFF file in a specified file path.
pub fn to_path(&self, fp: &impl AsRef<Path>, opt: OffOptions) -> Result<()> {
std::fs::write(fp, self.to_src(opt))
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Used to test a particular polytope.
fn test_shape(p: Concrete, el_nums: Vec<usize>) {
// Checks that element counts match up.
assert_eq!(p.el_counts().0, el_nums);
// Checks that the polytope can be reloaded correctly.
assert_eq!(
from_src(p.to_src(OffOptions::default())).el_counts().0,
el_nums
);
}
#[test]
/// Checks that a point has the correct amount of elements.
fn point_nums() {
let point = from_src("0OFF".to_string());
test_shape(point, vec![1, 1])
}
#[test]
/// Checks that a dyad has the correct amount of elements.
fn dyad_nums() {
let dyad = from_src("1OFF 2 -1 1 0 1".to_string());
test_shape(dyad, vec![1, 2, 1])
}
/*
#[test]
/// Checks that a hexagon has the correct amount of elements.
fn hig_nums() {
let hig =from_src(
"2OFF 6 1 1 0 0.5 0.8660254037844386 -0.5 0.8660254037844386 -1 0 -0.5 -0.8660254037844386 0.5 -0.8660254037844386 6 0 1 2 3 4 5".to_string()
);
test_shape(hig, vec![1, 6, 6, 1])
}
#[test]
/// Checks that a hexagram has the correct amount of elements.
fn shig_nums() {
let shig: Concrete = from_src(
"2OFF 6 2 1 0 0.5 0.8660254037844386 -0.5 0.8660254037844386 -1 0 -0.5 -0.8660254037844386 0.5 -0.8660254037844386 3 0 2 4 3 1 3 5".to_string()
).into();
test_shape(shig, vec![1, 6, 6, 1])
}
*/
#[test]
/// Checks that a tetrahedron has the correct amount of elements.
fn tet_nums() {
let tet = from_src(
"OFF 4 4 6 1 1 1 1 -1 -1 -1 1 -1 -1 -1 1 3 0 1 2 3 3 0 2 3 0 1 3 3 3 1 2".to_string(),
);
test_shape(tet, vec![1, 4, 6, 4, 1])
}
#[test]
/// Checks that a 2-tetrahedron compund has the correct amount of elements.
fn so_nums() {
let so = from_src(
"OFF 8 8 12 1 1 1 1 -1 -1 -1 1 -1 -1 -1 1 -1 -1 -1 -1 1 1 1 -1 1 1 1 -1 3 0 1 2 3 3 0 2 3 0 1 3 3 3 1 2 3 4 5 6 3 7 4 6 3 4 5 7 3 7 5 6 ".to_string(),
);
test_shape(so, vec![1, 8, 12, 8, 1])
}
#[test]
/// Checks that a pentachoron has the correct amount of elements.
fn pen_nums() {
let pen = from_src(
"4OFF 5 10 10 5 0.158113883008419 0.204124145231932 0.288675134594813 0.5 0.158113883008419 0.204124145231932 0.288675134594813 -0.5 0.158113883008419 0.204124145231932 -0.577350269189626 0 0.158113883008419 -0.612372435695794 0 0 -0.632455532033676 0 0 0 3 0 3 4 3 0 2 4 3 2 3 4 3 0 2 3 3 0 1 4 3 1 3 4 3 0 1 3 3 1 2 4 3 0 1 2 3 1 2 3 4 0 1 2 3 4 0 4 5 6 4 1 4 7 8 4 2 5 7 9 4 3 6 8 9"
.to_string(),
);
test_shape(pen, vec![1, 5, 10, 10, 5, 1])
}
#[test]
/// Checks that comments are correctly parsed.
fn comments() {
let tet = from_src(
"# So
OFF # this
4 4 6 # is
# a # test # of
1 1 1 # the 1234 5678
1 -1 -1 # comment 987
-1 1 -1 # removal 654
-1 -1 1 # system 321
3 0 1 2 #let #us #see
3 3 0 2# if
3 0 1 3#it
3 3 1 2#works!#"
.to_string(),
);
test_shape(tet, vec![1, 4, 6, 4, 1])
}
#[test]
#[should_panic(expected = "OFF file empty")]
fn empty() {
Concrete::from(from_src("".to_string()));
}
#[test]
#[should_panic(expected = "no \"OFF\" detected")]
fn magic_num() {
Concrete::from(from_src("foo bar".to_string()));
}
} | let el_sub_num = next_tok(toks);
let mut subs = Vec::with_capacity(el_sub_num); | random_line_split |
plan.py | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# to allow for typing to refer to class being defined (Mission)...
from __future__ import annotations
import math
import random
import sys
import warnings
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import numpy as np
from smarts.core.coordinates import Dimensions, Heading, Point, Pose, RefLinePoint
from smarts.core.road_map import RoadMap
from smarts.core.utils.math import min_angles_difference_signed, vec_to_radians
from smarts.sstudio.types import EntryTactic, TrapEntryTactic
MISSING = sys.maxsize
class PlanningError(Exception):
"""Raised in cases when map related planning fails."""
pass
# XXX: consider using smarts.core.coordinates.Pose for this
@dataclass(frozen=True)
class Start:
"""A starting state for a route or mission."""
position: np.ndarray
heading: Heading
from_front_bumper: Optional[bool] = True
@property
def point(self) -> Point:
"""The coordinate of this starting location."""
return Point.from_np_array(self.position)
@classmethod
def from_pose(cls, pose: Pose):
"""Convert to a starting location from a pose."""
return cls(
position=pose.as_position2d(),
heading=pose.heading,
from_front_bumper=False,
)
@dataclass(frozen=True, unsafe_hash=True)
class Goal:
"""Describes an expected end state for a route or mission."""
def is_specific(self) -> bool:
"""If the goal is reachable at a specific position."""
return False
def is_reached(self, vehicle_state) -> bool:
"""If the goal has been completed."""
return False
@dataclass(frozen=True, unsafe_hash=True)
class EndlessGoal(Goal):
"""A goal that can never be completed."""
pass
@dataclass(frozen=True, unsafe_hash=True)
class PositionalGoal(Goal):
"""A goal that can be completed by reaching an end area."""
position: Point
# target_heading: Heading
radius: float
@classmethod
def from_road(
cls,
road_id: str,
road_map: RoadMap,
lane_index: int = 0,
lane_offset: Optional[float] = None,
radius: float = 1,
):
"""Generate the goal ending at the specified road lane."""
road = road_map.road_by_id(road_id)
lane = road.lane_at_index(lane_index)
if lane_offset is None:
# Default to the midpoint safely ensuring we are on the lane and not
# bordering another
lane_offset = lane.length * 0.5
position = lane.from_lane_coord(RefLinePoint(lane_offset))
return cls(position=position, radius=radius)
def is_specific(self) -> bool:
return True
def is_reached(self, vehicle_state) -> bool:
a = vehicle_state.pose.position
b = self.position
sqr_dist = (a[0] - b.x) ** 2 + (a[1] - b.y) ** 2
return sqr_dist <= self.radius**2
@dataclass(frozen=True, unsafe_hash=True)
class TraverseGoal(Goal):
"""A TraverseGoal is satisfied whenever an Agent-driven vehicle
successfully finishes traversing a non-closed (acyclic) map
It's a way for the vehicle to exit the simulation successfully,
for example, driving across from one side to the other on a
straight road and then continuing off the map. This goal is
non-specific about *where* the map is exited, save for that
the vehicle must be going the correct direction in its lane
just prior to doing so."""
road_map: RoadMap
def is_specific(self) -> bool:
return False
def is_reached(self, vehicle_state) -> bool:
pose = vehicle_state.pose
return self._drove_off_map(pose.point, pose.heading)
def _drove_off_map(self, veh_pos: Point, veh_heading: float) -> bool:
# try to determine if the vehicle "exited" the map by driving beyond the end of a dead-end lane.
nearest_lanes = self.road_map.nearest_lanes(veh_pos)
if not nearest_lanes:
return False # we can't tell anything here
nl, dist = nearest_lanes[0]
offset = nl.to_lane_coord(veh_pos).s
nl_width, conf = nl.width_at_offset(offset)
if conf > 0.5:
|
# now check its heading to ensure it was going in roughly the right direction for this lane
end_vec = nl.vector_at_offset(nl.length - 0.1)
end_heading = vec_to_radians(end_vec[:2])
heading_err = min_angles_difference_signed(end_heading, veh_heading)
return abs(heading_err) < math.pi / 6
def default_entry_tactic(default_entry_speed: Optional[float] = None) -> EntryTactic:
"""The default tactic the simulation will use to acquire an actor for an agent."""
return TrapEntryTactic(
start_time=MISSING,
wait_to_hijack_limit_s=0,
exclusion_prefixes=tuple(),
zone=None,
default_entry_speed=default_entry_speed,
)
@dataclass(frozen=True)
class Via:
"""Describes a collectible item that can be used to shape rewards."""
lane_id: str
road_id: str
lane_index: int
position: Tuple[float, float]
hit_distance: float
required_speed: float
@dataclass(frozen=True)
class VehicleSpec:
"""Vehicle specifications"""
veh_id: str
veh_config_type: str
dimensions: Dimensions
@dataclass(frozen=True)
class Mission:
"""A navigation mission describing a desired trip."""
# XXX: Note that this Mission differs from sstudio.types.Mission in that
# this can be less specific as to the particular route taken to the goal,
# whereas sstudio.type.Mission includes a specific, predetermined/static route
# (which might be random, but is still determined before running the scenario).
start: Start
goal: Goal
# An optional list of road IDs between the start and end goal that we want to
# ensure the mission includes
route_vias: Tuple[str, ...] = field(default_factory=tuple)
start_time: float = MISSING
entry_tactic: Optional[EntryTactic] = None
via: Tuple[Via, ...] = ()
# if specified, will use vehicle_spec to build the vehicle (for histories)
vehicle_spec: Optional[VehicleSpec] = None
@property
def requires_route(self) -> bool:
"""If the mission requires a route to be generated."""
return self.goal.is_specific()
def is_complete(self, vehicle_state, distance_travelled: float) -> bool:
"""If the mission has been completed successfully."""
return self.goal.is_reached(vehicle_state)
@staticmethod
def endless_mission(
start_pose: Pose,
) -> Mission:
"""Generate an endless mission."""
return Mission(
start=Start(start_pose.as_position2d(), start_pose.heading),
goal=EndlessGoal(),
entry_tactic=None,
)
@staticmethod
def random_endless_mission(
road_map: RoadMap,
min_range_along_lane: float = 0.3,
max_range_along_lane: float = 0.9,
) -> Mission:
"""A mission that starts from a random location and continues indefinitely."""
assert min_range_along_lane > 0 # Need to start further than beginning of lane
assert max_range_along_lane < 1 # Cannot start past end of lane
assert min_range_along_lane < max_range_along_lane # Min must be less than max
road = road_map.random_route(1).roads[0]
n_lane = random.choice(road.lanes)
# XXX: The ends of the road are not as useful as starting mission locations.
offset = random.random() * min_range_along_lane + (
max_range_along_lane - min_range_along_lane
)
offset *= n_lane.length
coord = n_lane.from_lane_coord(RefLinePoint(offset))
target_pose = n_lane.center_pose_at_point(coord)
return Mission.endless_mission(start_pose=target_pose)
def __post_init__(self):
if self.entry_tactic is not None and self.entry_tactic.start_time != MISSING:
object.__setattr__(self, "start_time", self.entry_tactic.start_time)
elif self.start_time == MISSING:
object.__setattr__(self, "start_time", 0.1)
@dataclass(frozen=True)
class LapMission(Mission):
"""A mission requiring a number of laps through the goal."""
num_laps: Optional[int] = None # None means infinite # of laps
# If a route was specified in a sstudio.types.LapMission object,
# then this should be set to its road length
route_length: Optional[float] = None
def __post_init__(self):
# TAI: consider allowing LapMissions for TraverseGoal goals (num_laps ~ num_traversals)
assert self.goal.is_specific
if self.route_length is None:
# TAI: could just assert here, but may want to be more clever...
self.route_length = 1
def is_complete(self, vehicle_state, distance_travelled: float) -> bool:
"""If the mission has been completed."""
return (
self.goal.is_reached(vehicle_state)
and distance_travelled > self.route_length * self.num_laps
)
@dataclass
class PlanFrame:
"""Describes a plan that is serializable."""
road_ids: List[str]
mission: Optional[Mission]
class Plan:
"""Describes a navigation plan (route) to fulfill a mission."""
def __init__(
self,
road_map: RoadMap,
mission: Optional[Mission] = None,
find_route: bool = True,
):
self._road_map = road_map
self._mission = mission
self._route = None
if find_route:
self.create_route(mission)
@property
def route(self) -> Optional[RoadMap.Route]:
"""The route that this plan calls for."""
return self._route
@route.setter
def route(self, route: RoadMap.Route):
# XXX: traffic simulator may also track route
self._route = route
@property
def mission(self) -> Optional[Mission]:
"""The mission this plan is meant to fulfill."""
# XXX: This currently can be `None`
return self._mission
@property
def road_map(self) -> RoadMap:
"""The road map this plan is relative to."""
return self._road_map
def create_route(
self,
mission: Mission,
start_lane_radius: Optional[float] = None,
end_lane_radius: Optional[float] = None,
):
"""Generates a route that conforms to a mission.
Args:
mission (Mission):
A mission the agent should follow. Defaults to endless if `None`.
start_lane_radius (Optional[float]):
Radius (meter) to find the nearest starting lane for the given
mission. Defaults to a function of `_default_lane_width` of the
underlying road_map.
end_lane_radius (Optional[float]):
Radius (meter) to find the nearest ending lane for the given
mission. Defaults to a function of `_default_lane_width` of the
underlying road_map.
"""
assert not self._route or not len(
self._route.road_ids
), "Already called create_route()."
self._mission = mission or Mission.random_endless_mission(self._road_map)
if not self._mission.requires_route:
self._route = self._road_map.empty_route()
return
assert isinstance(self._mission.goal, PositionalGoal)
start_lanes = self._road_map.nearest_lanes(
self._mission.start.point,
include_junctions=True,
radius=start_lane_radius,
)
if not start_lanes:
self._mission = Mission.endless_mission(Pose.origin())
raise PlanningError("Starting lane not found. Route must start in a lane.")
via_roads = [self._road_map.road_by_id(via) for via in self._mission.route_vias]
end_lanes = self._road_map.nearest_lanes(
self._mission.goal.position,
include_junctions=False,
radius=end_lane_radius,
)
assert end_lanes is not None, "No end lane found. Route must end in a lane."
# When an agent is in an intersection, the `nearest_lanes` method might
# not return the correct road as the first choice. Hence, nearest
# starting lanes are tried in sequence until a route is found or until
# all nearby starting lane options are exhausted.
for end_lane, _ in end_lanes:
for start_lane, _ in start_lanes:
self._route = self._road_map.generate_routes(
start_lane, end_lane, via_roads, 1
)[0]
if self._route.road_length > 0:
break
if self._route.road_length > 0:
break
if len(self._route.roads) == 0:
self._mission = Mission.endless_mission(Pose.origin())
start_road_ids = [start_lane.road.road_id for start_lane, _ in start_lanes]
raise PlanningError(
"Unable to find a route between start={} and end={}. If either of "
"these are junctions (not well supported today) please switch to "
"roads and ensure there is a > 0 offset into the road if it is "
"after a junction.".format(start_road_ids, end_lane.road.road_id)
)
return self._mission
def frame(self) -> PlanFrame:
"""Get the state of this plan."""
assert self._mission
return PlanFrame(
road_ids=self._route.road_ids if self._route else [], mission=self._mission
)
@classmethod
def from_frame(cls, plan_frame: PlanFrame, road_map: RoadMap) -> "Plan":
"""Generate the plan from a frame."""
new_plan = cls(road_map=road_map, mission=plan_frame.mission, find_route=False)
new_plan.route = road_map.route_from_road_ids(plan_frame.road_ids)
return new_plan
| if nl.outgoing_lanes or dist < 0.5 * nl_width + 1e-1:
return False # the last lane it was in was not a dead-end, or it's still in a lane
if offset < nl.length - 2 * nl_width:
return False # it's no where near the end of the lane | conditional_block |
plan.py | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# to allow for typing to refer to class being defined (Mission)...
from __future__ import annotations
import math
import random
import sys
import warnings
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import numpy as np
from smarts.core.coordinates import Dimensions, Heading, Point, Pose, RefLinePoint
from smarts.core.road_map import RoadMap
from smarts.core.utils.math import min_angles_difference_signed, vec_to_radians
from smarts.sstudio.types import EntryTactic, TrapEntryTactic
MISSING = sys.maxsize
class PlanningError(Exception):
"""Raised in cases when map related planning fails."""
pass
# XXX: consider using smarts.core.coordinates.Pose for this
@dataclass(frozen=True)
class Start:
"""A starting state for a route or mission."""
position: np.ndarray
heading: Heading
from_front_bumper: Optional[bool] = True
@property
def point(self) -> Point:
"""The coordinate of this starting location."""
return Point.from_np_array(self.position)
@classmethod
def from_pose(cls, pose: Pose):
"""Convert to a starting location from a pose."""
return cls(
position=pose.as_position2d(),
heading=pose.heading,
from_front_bumper=False,
)
@dataclass(frozen=True, unsafe_hash=True)
class Goal:
"""Describes an expected end state for a route or mission."""
def is_specific(self) -> bool:
"""If the goal is reachable at a specific position."""
return False
def is_reached(self, vehicle_state) -> bool:
"""If the goal has been completed."""
return False
@dataclass(frozen=True, unsafe_hash=True)
class EndlessGoal(Goal):
"""A goal that can never be completed."""
pass
@dataclass(frozen=True, unsafe_hash=True)
class PositionalGoal(Goal):
"""A goal that can be completed by reaching an end area."""
position: Point
# target_heading: Heading
radius: float
@classmethod
def from_road(
cls,
road_id: str,
road_map: RoadMap,
lane_index: int = 0,
lane_offset: Optional[float] = None,
radius: float = 1,
):
"""Generate the goal ending at the specified road lane."""
road = road_map.road_by_id(road_id)
lane = road.lane_at_index(lane_index)
if lane_offset is None:
# Default to the midpoint safely ensuring we are on the lane and not
# bordering another
lane_offset = lane.length * 0.5
position = lane.from_lane_coord(RefLinePoint(lane_offset))
return cls(position=position, radius=radius)
def is_specific(self) -> bool:
return True
def is_reached(self, vehicle_state) -> bool:
a = vehicle_state.pose.position
b = self.position
sqr_dist = (a[0] - b.x) ** 2 + (a[1] - b.y) ** 2
return sqr_dist <= self.radius**2
@dataclass(frozen=True, unsafe_hash=True) | class TraverseGoal(Goal):
"""A TraverseGoal is satisfied whenever an Agent-driven vehicle
successfully finishes traversing a non-closed (acyclic) map
It's a way for the vehicle to exit the simulation successfully,
for example, driving across from one side to the other on a
straight road and then continuing off the map. This goal is
non-specific about *where* the map is exited, save for that
the vehicle must be going the correct direction in its lane
just prior to doing so."""
road_map: RoadMap
def is_specific(self) -> bool:
return False
def is_reached(self, vehicle_state) -> bool:
pose = vehicle_state.pose
return self._drove_off_map(pose.point, pose.heading)
def _drove_off_map(self, veh_pos: Point, veh_heading: float) -> bool:
# try to determine if the vehicle "exited" the map by driving beyond the end of a dead-end lane.
nearest_lanes = self.road_map.nearest_lanes(veh_pos)
if not nearest_lanes:
return False # we can't tell anything here
nl, dist = nearest_lanes[0]
offset = nl.to_lane_coord(veh_pos).s
nl_width, conf = nl.width_at_offset(offset)
if conf > 0.5:
if nl.outgoing_lanes or dist < 0.5 * nl_width + 1e-1:
return False # the last lane it was in was not a dead-end, or it's still in a lane
if offset < nl.length - 2 * nl_width:
return False # it's no where near the end of the lane
# now check its heading to ensure it was going in roughly the right direction for this lane
end_vec = nl.vector_at_offset(nl.length - 0.1)
end_heading = vec_to_radians(end_vec[:2])
heading_err = min_angles_difference_signed(end_heading, veh_heading)
return abs(heading_err) < math.pi / 6
def default_entry_tactic(default_entry_speed: Optional[float] = None) -> EntryTactic:
"""The default tactic the simulation will use to acquire an actor for an agent."""
return TrapEntryTactic(
start_time=MISSING,
wait_to_hijack_limit_s=0,
exclusion_prefixes=tuple(),
zone=None,
default_entry_speed=default_entry_speed,
)
@dataclass(frozen=True)
class Via:
"""Describes a collectible item that can be used to shape rewards."""
lane_id: str
road_id: str
lane_index: int
position: Tuple[float, float]
hit_distance: float
required_speed: float
@dataclass(frozen=True)
class VehicleSpec:
"""Vehicle specifications"""
veh_id: str
veh_config_type: str
dimensions: Dimensions
@dataclass(frozen=True)
class Mission:
"""A navigation mission describing a desired trip."""
# XXX: Note that this Mission differs from sstudio.types.Mission in that
# this can be less specific as to the particular route taken to the goal,
# whereas sstudio.type.Mission includes a specific, predetermined/static route
# (which might be random, but is still determined before running the scenario).
start: Start
goal: Goal
# An optional list of road IDs between the start and end goal that we want to
# ensure the mission includes
route_vias: Tuple[str, ...] = field(default_factory=tuple)
start_time: float = MISSING
entry_tactic: Optional[EntryTactic] = None
via: Tuple[Via, ...] = ()
# if specified, will use vehicle_spec to build the vehicle (for histories)
vehicle_spec: Optional[VehicleSpec] = None
@property
def requires_route(self) -> bool:
"""If the mission requires a route to be generated."""
return self.goal.is_specific()
def is_complete(self, vehicle_state, distance_travelled: float) -> bool:
"""If the mission has been completed successfully."""
return self.goal.is_reached(vehicle_state)
@staticmethod
def endless_mission(
start_pose: Pose,
) -> Mission:
"""Generate an endless mission."""
return Mission(
start=Start(start_pose.as_position2d(), start_pose.heading),
goal=EndlessGoal(),
entry_tactic=None,
)
@staticmethod
def random_endless_mission(
road_map: RoadMap,
min_range_along_lane: float = 0.3,
max_range_along_lane: float = 0.9,
) -> Mission:
"""A mission that starts from a random location and continues indefinitely."""
assert min_range_along_lane > 0 # Need to start further than beginning of lane
assert max_range_along_lane < 1 # Cannot start past end of lane
assert min_range_along_lane < max_range_along_lane # Min must be less than max
road = road_map.random_route(1).roads[0]
n_lane = random.choice(road.lanes)
# XXX: The ends of the road are not as useful as starting mission locations.
offset = random.random() * min_range_along_lane + (
max_range_along_lane - min_range_along_lane
)
offset *= n_lane.length
coord = n_lane.from_lane_coord(RefLinePoint(offset))
target_pose = n_lane.center_pose_at_point(coord)
return Mission.endless_mission(start_pose=target_pose)
def __post_init__(self):
if self.entry_tactic is not None and self.entry_tactic.start_time != MISSING:
object.__setattr__(self, "start_time", self.entry_tactic.start_time)
elif self.start_time == MISSING:
object.__setattr__(self, "start_time", 0.1)
@dataclass(frozen=True)
class LapMission(Mission):
"""A mission requiring a number of laps through the goal."""
num_laps: Optional[int] = None # None means infinite # of laps
# If a route was specified in a sstudio.types.LapMission object,
# then this should be set to its road length
route_length: Optional[float] = None
def __post_init__(self):
# TAI: consider allowing LapMissions for TraverseGoal goals (num_laps ~ num_traversals)
assert self.goal.is_specific
if self.route_length is None:
# TAI: could just assert here, but may want to be more clever...
self.route_length = 1
def is_complete(self, vehicle_state, distance_travelled: float) -> bool:
"""If the mission has been completed."""
return (
self.goal.is_reached(vehicle_state)
and distance_travelled > self.route_length * self.num_laps
)
@dataclass
class PlanFrame:
"""Describes a plan that is serializable."""
road_ids: List[str]
mission: Optional[Mission]
class Plan:
"""Describes a navigation plan (route) to fulfill a mission."""
def __init__(
self,
road_map: RoadMap,
mission: Optional[Mission] = None,
find_route: bool = True,
):
self._road_map = road_map
self._mission = mission
self._route = None
if find_route:
self.create_route(mission)
@property
def route(self) -> Optional[RoadMap.Route]:
"""The route that this plan calls for."""
return self._route
@route.setter
def route(self, route: RoadMap.Route):
# XXX: traffic simulator may also track route
self._route = route
@property
def mission(self) -> Optional[Mission]:
"""The mission this plan is meant to fulfill."""
# XXX: This currently can be `None`
return self._mission
@property
def road_map(self) -> RoadMap:
"""The road map this plan is relative to."""
return self._road_map
def create_route(
self,
mission: Mission,
start_lane_radius: Optional[float] = None,
end_lane_radius: Optional[float] = None,
):
"""Generates a route that conforms to a mission.
Args:
mission (Mission):
A mission the agent should follow. Defaults to endless if `None`.
start_lane_radius (Optional[float]):
Radius (meter) to find the nearest starting lane for the given
mission. Defaults to a function of `_default_lane_width` of the
underlying road_map.
end_lane_radius (Optional[float]):
Radius (meter) to find the nearest ending lane for the given
mission. Defaults to a function of `_default_lane_width` of the
underlying road_map.
"""
assert not self._route or not len(
self._route.road_ids
), "Already called create_route()."
self._mission = mission or Mission.random_endless_mission(self._road_map)
if not self._mission.requires_route:
self._route = self._road_map.empty_route()
return
assert isinstance(self._mission.goal, PositionalGoal)
start_lanes = self._road_map.nearest_lanes(
self._mission.start.point,
include_junctions=True,
radius=start_lane_radius,
)
if not start_lanes:
self._mission = Mission.endless_mission(Pose.origin())
raise PlanningError("Starting lane not found. Route must start in a lane.")
via_roads = [self._road_map.road_by_id(via) for via in self._mission.route_vias]
end_lanes = self._road_map.nearest_lanes(
self._mission.goal.position,
include_junctions=False,
radius=end_lane_radius,
)
assert end_lanes is not None, "No end lane found. Route must end in a lane."
# When an agent is in an intersection, the `nearest_lanes` method might
# not return the correct road as the first choice. Hence, nearest
# starting lanes are tried in sequence until a route is found or until
# all nearby starting lane options are exhausted.
for end_lane, _ in end_lanes:
for start_lane, _ in start_lanes:
self._route = self._road_map.generate_routes(
start_lane, end_lane, via_roads, 1
)[0]
if self._route.road_length > 0:
break
if self._route.road_length > 0:
break
if len(self._route.roads) == 0:
self._mission = Mission.endless_mission(Pose.origin())
start_road_ids = [start_lane.road.road_id for start_lane, _ in start_lanes]
raise PlanningError(
"Unable to find a route between start={} and end={}. If either of "
"these are junctions (not well supported today) please switch to "
"roads and ensure there is a > 0 offset into the road if it is "
"after a junction.".format(start_road_ids, end_lane.road.road_id)
)
return self._mission
def frame(self) -> PlanFrame:
"""Get the state of this plan."""
assert self._mission
return PlanFrame(
road_ids=self._route.road_ids if self._route else [], mission=self._mission
)
@classmethod
def from_frame(cls, plan_frame: PlanFrame, road_map: RoadMap) -> "Plan":
"""Generate the plan from a frame."""
new_plan = cls(road_map=road_map, mission=plan_frame.mission, find_route=False)
new_plan.route = road_map.route_from_road_ids(plan_frame.road_ids)
return new_plan | random_line_split | |
plan.py | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# to allow for typing to refer to class being defined (Mission)...
from __future__ import annotations
import math
import random
import sys
import warnings
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import numpy as np
from smarts.core.coordinates import Dimensions, Heading, Point, Pose, RefLinePoint
from smarts.core.road_map import RoadMap
from smarts.core.utils.math import min_angles_difference_signed, vec_to_radians
from smarts.sstudio.types import EntryTactic, TrapEntryTactic
MISSING = sys.maxsize
class PlanningError(Exception):
"""Raised in cases when map related planning fails."""
pass
# XXX: consider using smarts.core.coordinates.Pose for this
@dataclass(frozen=True)
class Start:
"""A starting state for a route or mission."""
position: np.ndarray
heading: Heading
from_front_bumper: Optional[bool] = True
@property
def point(self) -> Point:
"""The coordinate of this starting location."""
return Point.from_np_array(self.position)
@classmethod
def from_pose(cls, pose: Pose):
"""Convert to a starting location from a pose."""
return cls(
position=pose.as_position2d(),
heading=pose.heading,
from_front_bumper=False,
)
@dataclass(frozen=True, unsafe_hash=True)
class Goal:
"""Describes an expected end state for a route or mission."""
def is_specific(self) -> bool:
"""If the goal is reachable at a specific position."""
return False
def is_reached(self, vehicle_state) -> bool:
"""If the goal has been completed."""
return False
@dataclass(frozen=True, unsafe_hash=True)
class EndlessGoal(Goal):
"""A goal that can never be completed."""
pass
@dataclass(frozen=True, unsafe_hash=True)
class PositionalGoal(Goal):
"""A goal that can be completed by reaching an end area."""
position: Point
# target_heading: Heading
radius: float
@classmethod
def from_road(
cls,
road_id: str,
road_map: RoadMap,
lane_index: int = 0,
lane_offset: Optional[float] = None,
radius: float = 1,
):
"""Generate the goal ending at the specified road lane."""
road = road_map.road_by_id(road_id)
lane = road.lane_at_index(lane_index)
if lane_offset is None:
# Default to the midpoint safely ensuring we are on the lane and not
# bordering another
lane_offset = lane.length * 0.5
position = lane.from_lane_coord(RefLinePoint(lane_offset))
return cls(position=position, radius=radius)
def is_specific(self) -> bool:
return True
def is_reached(self, vehicle_state) -> bool:
a = vehicle_state.pose.position
b = self.position
sqr_dist = (a[0] - b.x) ** 2 + (a[1] - b.y) ** 2
return sqr_dist <= self.radius**2
@dataclass(frozen=True, unsafe_hash=True)
class TraverseGoal(Goal):
|
def default_entry_tactic(default_entry_speed: Optional[float] = None) -> EntryTactic:
"""The default tactic the simulation will use to acquire an actor for an agent."""
return TrapEntryTactic(
start_time=MISSING,
wait_to_hijack_limit_s=0,
exclusion_prefixes=tuple(),
zone=None,
default_entry_speed=default_entry_speed,
)
@dataclass(frozen=True)
class Via:
"""Describes a collectible item that can be used to shape rewards."""
lane_id: str
road_id: str
lane_index: int
position: Tuple[float, float]
hit_distance: float
required_speed: float
@dataclass(frozen=True)
class VehicleSpec:
"""Vehicle specifications"""
veh_id: str
veh_config_type: str
dimensions: Dimensions
@dataclass(frozen=True)
class Mission:
"""A navigation mission describing a desired trip."""
# XXX: Note that this Mission differs from sstudio.types.Mission in that
# this can be less specific as to the particular route taken to the goal,
# whereas sstudio.type.Mission includes a specific, predetermined/static route
# (which might be random, but is still determined before running the scenario).
start: Start
goal: Goal
# An optional list of road IDs between the start and end goal that we want to
# ensure the mission includes
route_vias: Tuple[str, ...] = field(default_factory=tuple)
start_time: float = MISSING
entry_tactic: Optional[EntryTactic] = None
via: Tuple[Via, ...] = ()
# if specified, will use vehicle_spec to build the vehicle (for histories)
vehicle_spec: Optional[VehicleSpec] = None
@property
def requires_route(self) -> bool:
"""If the mission requires a route to be generated."""
return self.goal.is_specific()
def is_complete(self, vehicle_state, distance_travelled: float) -> bool:
"""If the mission has been completed successfully."""
return self.goal.is_reached(vehicle_state)
@staticmethod
def endless_mission(
start_pose: Pose,
) -> Mission:
"""Generate an endless mission."""
return Mission(
start=Start(start_pose.as_position2d(), start_pose.heading),
goal=EndlessGoal(),
entry_tactic=None,
)
@staticmethod
def random_endless_mission(
road_map: RoadMap,
min_range_along_lane: float = 0.3,
max_range_along_lane: float = 0.9,
) -> Mission:
"""A mission that starts from a random location and continues indefinitely."""
assert min_range_along_lane > 0 # Need to start further than beginning of lane
assert max_range_along_lane < 1 # Cannot start past end of lane
assert min_range_along_lane < max_range_along_lane # Min must be less than max
road = road_map.random_route(1).roads[0]
n_lane = random.choice(road.lanes)
# XXX: The ends of the road are not as useful as starting mission locations.
offset = random.random() * min_range_along_lane + (
max_range_along_lane - min_range_along_lane
)
offset *= n_lane.length
coord = n_lane.from_lane_coord(RefLinePoint(offset))
target_pose = n_lane.center_pose_at_point(coord)
return Mission.endless_mission(start_pose=target_pose)
def __post_init__(self):
if self.entry_tactic is not None and self.entry_tactic.start_time != MISSING:
object.__setattr__(self, "start_time", self.entry_tactic.start_time)
elif self.start_time == MISSING:
object.__setattr__(self, "start_time", 0.1)
@dataclass(frozen=True)
class LapMission(Mission):
"""A mission requiring a number of laps through the goal."""
num_laps: Optional[int] = None # None means infinite # of laps
# If a route was specified in a sstudio.types.LapMission object,
# then this should be set to its road length
route_length: Optional[float] = None
def __post_init__(self):
# TAI: consider allowing LapMissions for TraverseGoal goals (num_laps ~ num_traversals)
assert self.goal.is_specific
if self.route_length is None:
# TAI: could just assert here, but may want to be more clever...
self.route_length = 1
def is_complete(self, vehicle_state, distance_travelled: float) -> bool:
"""If the mission has been completed."""
return (
self.goal.is_reached(vehicle_state)
and distance_travelled > self.route_length * self.num_laps
)
@dataclass
class PlanFrame:
"""Describes a plan that is serializable."""
road_ids: List[str]
mission: Optional[Mission]
class Plan:
"""Describes a navigation plan (route) to fulfill a mission."""
def __init__(
self,
road_map: RoadMap,
mission: Optional[Mission] = None,
find_route: bool = True,
):
self._road_map = road_map
self._mission = mission
self._route = None
if find_route:
self.create_route(mission)
@property
def route(self) -> Optional[RoadMap.Route]:
"""The route that this plan calls for."""
return self._route
@route.setter
def route(self, route: RoadMap.Route):
# XXX: traffic simulator may also track route
self._route = route
@property
def mission(self) -> Optional[Mission]:
"""The mission this plan is meant to fulfill."""
# XXX: This currently can be `None`
return self._mission
@property
def road_map(self) -> RoadMap:
"""The road map this plan is relative to."""
return self._road_map
def create_route(
self,
mission: Mission,
start_lane_radius: Optional[float] = None,
end_lane_radius: Optional[float] = None,
):
"""Generates a route that conforms to a mission.
Args:
mission (Mission):
A mission the agent should follow. Defaults to endless if `None`.
start_lane_radius (Optional[float]):
Radius (meter) to find the nearest starting lane for the given
mission. Defaults to a function of `_default_lane_width` of the
underlying road_map.
end_lane_radius (Optional[float]):
Radius (meter) to find the nearest ending lane for the given
mission. Defaults to a function of `_default_lane_width` of the
underlying road_map.
"""
assert not self._route or not len(
self._route.road_ids
), "Already called create_route()."
self._mission = mission or Mission.random_endless_mission(self._road_map)
if not self._mission.requires_route:
self._route = self._road_map.empty_route()
return
assert isinstance(self._mission.goal, PositionalGoal)
start_lanes = self._road_map.nearest_lanes(
self._mission.start.point,
include_junctions=True,
radius=start_lane_radius,
)
if not start_lanes:
self._mission = Mission.endless_mission(Pose.origin())
raise PlanningError("Starting lane not found. Route must start in a lane.")
via_roads = [self._road_map.road_by_id(via) for via in self._mission.route_vias]
end_lanes = self._road_map.nearest_lanes(
self._mission.goal.position,
include_junctions=False,
radius=end_lane_radius,
)
assert end_lanes is not None, "No end lane found. Route must end in a lane."
# When an agent is in an intersection, the `nearest_lanes` method might
# not return the correct road as the first choice. Hence, nearest
# starting lanes are tried in sequence until a route is found or until
# all nearby starting lane options are exhausted.
for end_lane, _ in end_lanes:
for start_lane, _ in start_lanes:
self._route = self._road_map.generate_routes(
start_lane, end_lane, via_roads, 1
)[0]
if self._route.road_length > 0:
break
if self._route.road_length > 0:
break
if len(self._route.roads) == 0:
self._mission = Mission.endless_mission(Pose.origin())
start_road_ids = [start_lane.road.road_id for start_lane, _ in start_lanes]
raise PlanningError(
"Unable to find a route between start={} and end={}. If either of "
"these are junctions (not well supported today) please switch to "
"roads and ensure there is a > 0 offset into the road if it is "
"after a junction.".format(start_road_ids, end_lane.road.road_id)
)
return self._mission
def frame(self) -> PlanFrame:
"""Get the state of this plan."""
assert self._mission
return PlanFrame(
road_ids=self._route.road_ids if self._route else [], mission=self._mission
)
@classmethod
def from_frame(cls, plan_frame: PlanFrame, road_map: RoadMap) -> "Plan":
"""Generate the plan from a frame."""
new_plan = cls(road_map=road_map, mission=plan_frame.mission, find_route=False)
new_plan.route = road_map.route_from_road_ids(plan_frame.road_ids)
return new_plan
| """A TraverseGoal is satisfied whenever an Agent-driven vehicle
successfully finishes traversing a non-closed (acyclic) map
It's a way for the vehicle to exit the simulation successfully,
for example, driving across from one side to the other on a
straight road and then continuing off the map. This goal is
non-specific about *where* the map is exited, save for that
the vehicle must be going the correct direction in its lane
just prior to doing so."""
road_map: RoadMap
def is_specific(self) -> bool:
return False
def is_reached(self, vehicle_state) -> bool:
pose = vehicle_state.pose
return self._drove_off_map(pose.point, pose.heading)
def _drove_off_map(self, veh_pos: Point, veh_heading: float) -> bool:
# try to determine if the vehicle "exited" the map by driving beyond the end of a dead-end lane.
nearest_lanes = self.road_map.nearest_lanes(veh_pos)
if not nearest_lanes:
return False # we can't tell anything here
nl, dist = nearest_lanes[0]
offset = nl.to_lane_coord(veh_pos).s
nl_width, conf = nl.width_at_offset(offset)
if conf > 0.5:
if nl.outgoing_lanes or dist < 0.5 * nl_width + 1e-1:
return False # the last lane it was in was not a dead-end, or it's still in a lane
if offset < nl.length - 2 * nl_width:
return False # it's no where near the end of the lane
# now check its heading to ensure it was going in roughly the right direction for this lane
end_vec = nl.vector_at_offset(nl.length - 0.1)
end_heading = vec_to_radians(end_vec[:2])
heading_err = min_angles_difference_signed(end_heading, veh_heading)
return abs(heading_err) < math.pi / 6 | identifier_body |
plan.py | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# to allow for typing to refer to class being defined (Mission)...
from __future__ import annotations
import math
import random
import sys
import warnings
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import numpy as np
from smarts.core.coordinates import Dimensions, Heading, Point, Pose, RefLinePoint
from smarts.core.road_map import RoadMap
from smarts.core.utils.math import min_angles_difference_signed, vec_to_radians
from smarts.sstudio.types import EntryTactic, TrapEntryTactic
MISSING = sys.maxsize
class PlanningError(Exception):
"""Raised in cases when map related planning fails."""
pass
# XXX: consider using smarts.core.coordinates.Pose for this
@dataclass(frozen=True)
class Start:
"""A starting state for a route or mission."""
position: np.ndarray
heading: Heading
from_front_bumper: Optional[bool] = True
@property
def point(self) -> Point:
"""The coordinate of this starting location."""
return Point.from_np_array(self.position)
@classmethod
def | (cls, pose: Pose):
"""Convert to a starting location from a pose."""
return cls(
position=pose.as_position2d(),
heading=pose.heading,
from_front_bumper=False,
)
@dataclass(frozen=True, unsafe_hash=True)
class Goal:
"""Describes an expected end state for a route or mission."""
def is_specific(self) -> bool:
"""If the goal is reachable at a specific position."""
return False
def is_reached(self, vehicle_state) -> bool:
"""If the goal has been completed."""
return False
@dataclass(frozen=True, unsafe_hash=True)
class EndlessGoal(Goal):
"""A goal that can never be completed."""
pass
@dataclass(frozen=True, unsafe_hash=True)
class PositionalGoal(Goal):
"""A goal that can be completed by reaching an end area."""
position: Point
# target_heading: Heading
radius: float
@classmethod
def from_road(
cls,
road_id: str,
road_map: RoadMap,
lane_index: int = 0,
lane_offset: Optional[float] = None,
radius: float = 1,
):
"""Generate the goal ending at the specified road lane."""
road = road_map.road_by_id(road_id)
lane = road.lane_at_index(lane_index)
if lane_offset is None:
# Default to the midpoint safely ensuring we are on the lane and not
# bordering another
lane_offset = lane.length * 0.5
position = lane.from_lane_coord(RefLinePoint(lane_offset))
return cls(position=position, radius=radius)
def is_specific(self) -> bool:
return True
def is_reached(self, vehicle_state) -> bool:
a = vehicle_state.pose.position
b = self.position
sqr_dist = (a[0] - b.x) ** 2 + (a[1] - b.y) ** 2
return sqr_dist <= self.radius**2
@dataclass(frozen=True, unsafe_hash=True)
class TraverseGoal(Goal):
"""A TraverseGoal is satisfied whenever an Agent-driven vehicle
successfully finishes traversing a non-closed (acyclic) map
It's a way for the vehicle to exit the simulation successfully,
for example, driving across from one side to the other on a
straight road and then continuing off the map. This goal is
non-specific about *where* the map is exited, save for that
the vehicle must be going the correct direction in its lane
just prior to doing so."""
road_map: RoadMap
def is_specific(self) -> bool:
return False
def is_reached(self, vehicle_state) -> bool:
pose = vehicle_state.pose
return self._drove_off_map(pose.point, pose.heading)
def _drove_off_map(self, veh_pos: Point, veh_heading: float) -> bool:
# try to determine if the vehicle "exited" the map by driving beyond the end of a dead-end lane.
nearest_lanes = self.road_map.nearest_lanes(veh_pos)
if not nearest_lanes:
return False # we can't tell anything here
nl, dist = nearest_lanes[0]
offset = nl.to_lane_coord(veh_pos).s
nl_width, conf = nl.width_at_offset(offset)
if conf > 0.5:
if nl.outgoing_lanes or dist < 0.5 * nl_width + 1e-1:
return False # the last lane it was in was not a dead-end, or it's still in a lane
if offset < nl.length - 2 * nl_width:
return False # it's no where near the end of the lane
# now check its heading to ensure it was going in roughly the right direction for this lane
end_vec = nl.vector_at_offset(nl.length - 0.1)
end_heading = vec_to_radians(end_vec[:2])
heading_err = min_angles_difference_signed(end_heading, veh_heading)
return abs(heading_err) < math.pi / 6
def default_entry_tactic(default_entry_speed: Optional[float] = None) -> EntryTactic:
"""The default tactic the simulation will use to acquire an actor for an agent."""
return TrapEntryTactic(
start_time=MISSING,
wait_to_hijack_limit_s=0,
exclusion_prefixes=tuple(),
zone=None,
default_entry_speed=default_entry_speed,
)
@dataclass(frozen=True)
class Via:
"""Describes a collectible item that can be used to shape rewards."""
lane_id: str
road_id: str
lane_index: int
position: Tuple[float, float]
hit_distance: float
required_speed: float
@dataclass(frozen=True)
class VehicleSpec:
"""Vehicle specifications"""
veh_id: str
veh_config_type: str
dimensions: Dimensions
@dataclass(frozen=True)
class Mission:
"""A navigation mission describing a desired trip."""
# XXX: Note that this Mission differs from sstudio.types.Mission in that
# this can be less specific as to the particular route taken to the goal,
# whereas sstudio.type.Mission includes a specific, predetermined/static route
# (which might be random, but is still determined before running the scenario).
start: Start
goal: Goal
# An optional list of road IDs between the start and end goal that we want to
# ensure the mission includes
route_vias: Tuple[str, ...] = field(default_factory=tuple)
start_time: float = MISSING
entry_tactic: Optional[EntryTactic] = None
via: Tuple[Via, ...] = ()
# if specified, will use vehicle_spec to build the vehicle (for histories)
vehicle_spec: Optional[VehicleSpec] = None
@property
def requires_route(self) -> bool:
"""If the mission requires a route to be generated."""
return self.goal.is_specific()
def is_complete(self, vehicle_state, distance_travelled: float) -> bool:
"""If the mission has been completed successfully."""
return self.goal.is_reached(vehicle_state)
@staticmethod
def endless_mission(
start_pose: Pose,
) -> Mission:
"""Generate an endless mission."""
return Mission(
start=Start(start_pose.as_position2d(), start_pose.heading),
goal=EndlessGoal(),
entry_tactic=None,
)
@staticmethod
def random_endless_mission(
road_map: RoadMap,
min_range_along_lane: float = 0.3,
max_range_along_lane: float = 0.9,
) -> Mission:
"""A mission that starts from a random location and continues indefinitely."""
assert min_range_along_lane > 0 # Need to start further than beginning of lane
assert max_range_along_lane < 1 # Cannot start past end of lane
assert min_range_along_lane < max_range_along_lane # Min must be less than max
road = road_map.random_route(1).roads[0]
n_lane = random.choice(road.lanes)
# XXX: The ends of the road are not as useful as starting mission locations.
offset = random.random() * min_range_along_lane + (
max_range_along_lane - min_range_along_lane
)
offset *= n_lane.length
coord = n_lane.from_lane_coord(RefLinePoint(offset))
target_pose = n_lane.center_pose_at_point(coord)
return Mission.endless_mission(start_pose=target_pose)
def __post_init__(self):
if self.entry_tactic is not None and self.entry_tactic.start_time != MISSING:
object.__setattr__(self, "start_time", self.entry_tactic.start_time)
elif self.start_time == MISSING:
object.__setattr__(self, "start_time", 0.1)
@dataclass(frozen=True)
class LapMission(Mission):
"""A mission requiring a number of laps through the goal."""
num_laps: Optional[int] = None # None means infinite # of laps
# If a route was specified in a sstudio.types.LapMission object,
# then this should be set to its road length
route_length: Optional[float] = None
def __post_init__(self):
# TAI: consider allowing LapMissions for TraverseGoal goals (num_laps ~ num_traversals)
assert self.goal.is_specific
if self.route_length is None:
# TAI: could just assert here, but may want to be more clever...
self.route_length = 1
def is_complete(self, vehicle_state, distance_travelled: float) -> bool:
"""If the mission has been completed."""
return (
self.goal.is_reached(vehicle_state)
and distance_travelled > self.route_length * self.num_laps
)
@dataclass
class PlanFrame:
"""Describes a plan that is serializable."""
road_ids: List[str]
mission: Optional[Mission]
class Plan:
"""Describes a navigation plan (route) to fulfill a mission."""
def __init__(
self,
road_map: RoadMap,
mission: Optional[Mission] = None,
find_route: bool = True,
):
self._road_map = road_map
self._mission = mission
self._route = None
if find_route:
self.create_route(mission)
@property
def route(self) -> Optional[RoadMap.Route]:
"""The route that this plan calls for."""
return self._route
@route.setter
def route(self, route: RoadMap.Route):
# XXX: traffic simulator may also track route
self._route = route
@property
def mission(self) -> Optional[Mission]:
"""The mission this plan is meant to fulfill."""
# XXX: This currently can be `None`
return self._mission
@property
def road_map(self) -> RoadMap:
"""The road map this plan is relative to."""
return self._road_map
def create_route(
self,
mission: Mission,
start_lane_radius: Optional[float] = None,
end_lane_radius: Optional[float] = None,
):
"""Generates a route that conforms to a mission.
Args:
mission (Mission):
A mission the agent should follow. Defaults to endless if `None`.
start_lane_radius (Optional[float]):
Radius (meter) to find the nearest starting lane for the given
mission. Defaults to a function of `_default_lane_width` of the
underlying road_map.
end_lane_radius (Optional[float]):
Radius (meter) to find the nearest ending lane for the given
mission. Defaults to a function of `_default_lane_width` of the
underlying road_map.
"""
assert not self._route or not len(
self._route.road_ids
), "Already called create_route()."
self._mission = mission or Mission.random_endless_mission(self._road_map)
if not self._mission.requires_route:
self._route = self._road_map.empty_route()
return
assert isinstance(self._mission.goal, PositionalGoal)
start_lanes = self._road_map.nearest_lanes(
self._mission.start.point,
include_junctions=True,
radius=start_lane_radius,
)
if not start_lanes:
self._mission = Mission.endless_mission(Pose.origin())
raise PlanningError("Starting lane not found. Route must start in a lane.")
via_roads = [self._road_map.road_by_id(via) for via in self._mission.route_vias]
end_lanes = self._road_map.nearest_lanes(
self._mission.goal.position,
include_junctions=False,
radius=end_lane_radius,
)
assert end_lanes is not None, "No end lane found. Route must end in a lane."
# When an agent is in an intersection, the `nearest_lanes` method might
# not return the correct road as the first choice. Hence, nearest
# starting lanes are tried in sequence until a route is found or until
# all nearby starting lane options are exhausted.
for end_lane, _ in end_lanes:
for start_lane, _ in start_lanes:
self._route = self._road_map.generate_routes(
start_lane, end_lane, via_roads, 1
)[0]
if self._route.road_length > 0:
break
if self._route.road_length > 0:
break
if len(self._route.roads) == 0:
self._mission = Mission.endless_mission(Pose.origin())
start_road_ids = [start_lane.road.road_id for start_lane, _ in start_lanes]
raise PlanningError(
"Unable to find a route between start={} and end={}. If either of "
"these are junctions (not well supported today) please switch to "
"roads and ensure there is a > 0 offset into the road if it is "
"after a junction.".format(start_road_ids, end_lane.road.road_id)
)
return self._mission
def frame(self) -> PlanFrame:
"""Get the state of this plan."""
assert self._mission
return PlanFrame(
road_ids=self._route.road_ids if self._route else [], mission=self._mission
)
@classmethod
def from_frame(cls, plan_frame: PlanFrame, road_map: RoadMap) -> "Plan":
"""Generate the plan from a frame."""
new_plan = cls(road_map=road_map, mission=plan_frame.mission, find_route=False)
new_plan.route = road_map.route_from_road_ids(plan_frame.road_ids)
return new_plan
| from_pose | identifier_name |
__init__.py | class CWrapPlugin(object):
"""Base class from which all cwrap plugins should inherit.
Override any of the following methods to implement the desired wrapping
behavior.
"""
def initialize(self, cwrap):
"""Initialize the Plugin class prior to calling any other functions.
It is used to give the Plugin access to the cwrap object's helper
functions and state.
Args:
cwrap: the cwrap object performing the wrapping.
"""
pass
def get_type_check(self, arg, option):
"""Used to generate code for runtime checks of object types.
The type can be found in arg['type']. For example, it could be
THTensor*. If this Plugin recognizes the type in arg, it should
return a Template string containing code that checks whether a
Python object is of this type. For example, the return type in
this case would be:
Template('(PyObject*)Py_TYPE($arg) == THPTensorClass')
As a simpler example, if the type == 'bool' then we would return:
Template('PyBool_Check($arg)')
Note that the name of the identifier that will be subsituted must be
$arg.
Args:
arg: a Python object with a 'type' field representing the type
to generate a check string for.
option: dictionary containing the information for this specific
option.
Returns:
A Template string as described above, or None if this Plugin does
not have a corresponding type check for the passed type.
"""
pass
def get_type_unpack(self, arg, option):
"""Used to generate code unpacking of Python objects into C types.
Similar to get_type_check, but for unpacking Python objects into their |
Template('((THPTensor*)$arg)->cdata')
For a simpler type, such as a long, we could do:
Template('PyLong_AsLong($arg)')
though in practice we will use our own custom unpacking code. Once
again, $arg must be used as the identifier.
Args:
arg: a Python object with a 'type' field representing the type
to generate a unpack string for.
option: dictionary containing the information for this specific
option.
Returns:
A Template string as described above, or None if this Plugin does
not have a corresponding type unpack for the passed type.
"""
pass
def get_return_wrapper(self, option):
"""Used to generate code wrapping a function's return value.
Wrapped functions should always return a PyObject *. However,
internally, the code will be working with C objects or primitives.
Therefore, if a function has a return value we need to convert it back
to a PyObject * before the function returns. Plugins can override this
function to generate wrapper code for returning specific C types. The
type is accessible via option['return'].
Continuing on with our THTensor* example, we might do something like:
Template('return THPTensor_(New)($result);')
In general, you want to do return <statement>; In this case, we call
into THP's library routine that takes a THTensor* (the $result
identifier) and returns a PyObject *.
For a bool, we could do Template('return PyBool_FromLong($result);').
Note that in other cases, our logic might be more complicated. For
example, if our return value is also an argument to the function call,
we could need to increase the reference count prior to returning.
Args:
option: dictionary containing the information for this specific
option.
Returns:
A Template string as described above, or None if this Plugin does
not have a corresponding return wrapper for the functions return
type or specifier.
"""
pass
def get_wrapper_template(self, declaration):
"""Used to create a code template to wrap the options.
This function returns a Template string that contains the function call
for the overall declaration, including the method definition, opening
and closing brackets, and any additional code within the method body.
Look through the examples to get a sense of what this might look like.
The only requirements are that it contains unsubstituted template
identifiers for anything the cwrap engine expects.
Note that for any declaration only one Plugin can generate the wrapper
template.
Args:
declaration: the declaration for the wrapped method.
Returns:
A template string representing the entire function declaration,
with identifiers as necessary.
"""
pass
def get_assign_args(self, arguments):
"""Used to modify argument metadata prior to assignment.
We have already setup argument checking, and how to unpack arguments.
This function allows you to modify the metadata of an argument prior to
actually performing the assignment. For example, you might want to
check that an argument is of a specific type, but when unpacking it you
might want to treat it as a different type. This function will allow
you to do stuff like that --> e.g. you could set the 'type' field for a
particular argument to be something else.
Args:
arguments: a list of argument metadata dictionaries.
Returns:
The same list of arguments, with any modifications as you see fit.
"""
pass
def get_arg_accessor(self, arg, option):
"""Used to generate a string for accessing the passed arg.
One of the key components of the YAML definition for a method to be
wrapped are the arguments to that method. Override this function to
show how to access that specific arg in the code. For example, you
might do something different if the argument is a keyword argument, or
a constant, or self. The base cwrap plugin has a fallback arg accessor
for loading elements from the args PyObject * tuple passed to the
function.
Its best to look at some of the existing Plugins to get a sense of what
one might do.
Args:
arg: a dictionary specifying attributes of the arg to be accessed
option: dictionary containing the information for this specific
option.
Returns:
A a string (note: not a Template string!) of code that can be used
to access the given arg. If the plugin does not know how to access
the arg, return None.
"""
pass
def process_full_file(self, code):
"""Used to modify the code for the entire output file.
The last thing any plugin can do. Code contains the results of wrapping
all the declarations. The plugin can do things like adding header
guards, include statements, etc.
Args:
code: a string source code for the wrapped declarations.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_single_check(self, code, arg, arg_accessor):
"""Used to postprocess a type check.
Above we defined a function get_type_check that returns a Template
string that allows for type checking a PyObject * for a specific type.
In this function, the passed "code" is a combination of that type check
along with a specific arg_accessor pasted in. For example:
'(PyObject*)Py_TYPE(PyTuple_GET_ITEM(args, 1)) == THPTensorClass'
This function can be overriden to support modifying this check string.
For example, if an argument can be null, we might want to check and see
if the type is Py_None, as well.
Args:
code: The string code representing a type check for a specific
argument being accessed.
arg: dictionary containing properties of that specific argument
arg_accessor: the arg_accessor string for that specific argument.
Note that this is likely also embedded in code, but if you want to
be able to access this arg and throw away the other code, you can
do so.
Returns:
A string representing the processed check/access string for this
arg. If the plugin does not know how to modify a specific input, it
should return the original code.
"""
return code
def process_all_checks(self, code, option):
"""Used to generate additional checks based on all the individual ones.
After individually processing each argument with get_type_check,
get_arg_accessor, process_single_check, this function allows you to
inspect the combined checks and do any additional checking/modify that
string as you see fit. In particular, given code is a string like:
CHECK_TYPE(GET_ARG(0)) && CHECK_TYPE(GET_ARG(1)) && ..
We can process it as we see fit. For example, we may want to add a
check at the beginning that we have the specified number of arguments.
Args:
code: A string representing each argument check separated by an
'&&'. code can be None if there are no arguments to be checked.
option: dictionary containing the information for this specific
option.
Returns:
The modified code string with any additional checks, or just the
existing code if no modifications are to be made.
"""
return code
def process_single_unpack(self, code, arg, arg_accessor):
"""Used to postprocess a type unpack.
Same as process_single_check above, but for type unpacking. E.g. an
example code could be:
PyLong_FromLong(PyTuple_GET_ITEM(args, 0))
And this code could modify that as it sees fit. For example, if the
result of accessing the argument is None, we would not want to call the
unpacking code.
Args:
code: The string code representing a type unpack for a specific
argument being accessed.
arg: dictionary containing properties of that specific argument
arg_accessor: the arg_accessor string for that specific argument.
Note that this is likely also embedded in code, but if you want to
be able to access this arg and throw away the other code, you can
do so.
Returns:
A string representing the processed unpack/access string for this
arg. If the plugin does not know how to modify a specific input, it
should return the original code.
"""
return code
def process_all_call_arg(self, code, option):
"""Used to modify the arguments to the underlying C function call.
Code is the string of comma-separated arguments that will be passed to
the wrapped C function. You can use this function to modify that string
as you see fit. For example, THP prepends the LIBRARY_STATE definition
so that the generated code will follow the conventions it uses for
writing one function for both TH/THC calls.
Args:
code: A string as described above.
option: dictionary containing the information for this specific
option.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_option_code(self, code, option):
"""Used to modify the entire code body for an option.
Code in this case is a string containing the entire generated code for
a specific option. Note that this body includes the checks for each
option, i.e. if (type checks for one permutation) { ... } else if (type
checks for another permutation) { ... } etc.
Args:
code: string representing the generated code for the option
option: dictionary containing the information for this specific
option.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_wrapper(self, code, declaration):
"""Used to modify the entire code body for a declaration.
Code in this case is a string containing the entire generated code for
a specific declaration. This code can be modified as the plugin sees
fit. For example, we might want to wrap the function in preprocessor
guards if it is only enabled for floats.
Args:
code: string representing the generated code for the declaration
declaration: the declaration metadata.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_declarations(self, declarations):
"""Used to process/modify the function's declaration.
Cwrap loads the YAML of a function to be cwrap'd into a dictionary.
This is known as the declaration. The cwrap code sets some defaults as
necessary, and then passes this dictionary to process_declarations.
Overriding this code allows the plugin to modify this declaration as it
sees fit prior to any code generation. The plugin may add, remove or
modify the fields of the declaration dictionary. It can also save state
to the Plugin for use in subsequent function overrides.
Its best to look at some of the existing Plugins to get a sense of what
one might do.
Args:
declarations: a list of declarations, i.e. dictionaries that define
the function(s) being wrapped. Note that this can be plural, so the
function must take care to modify each input declaration.
Returns:
Those same declarations, modified as the Plugin sees fit. Note that
you could insert a declaration, if you wanted to take an input
declaration and e.g. wrap it multiple times.
"""
return declarations
def process_option_code_template(self, template, option):
"""Used to modify the code template for the option.
The "code template" can be thought of the actual body implementing the
wrapped function call --> i.e. it is not the argument check,
assignment, etc. but the actual logic of the function. The template is
a list containing two operations: the $call, and the $return_result.
These represent the "locations" where the function call will happen,
and the function will return.
This function can modify the list to insert arbitrary code around the
$call and $return_result. For example, one might want to wrap the code
in a try/catch, or post-process the result in some way. This allows a
plugin to do that.
Args:
template: a list containing $call and $return_result, in addition
to any arbitrary code inserted by other plugins.
option: dictionary containing the information for this specific
option.
Returns:
The same "code template", possibly modified by this plugin.
"""
return template
def process_pre_arg_assign(self, template, option):
"""Used to include any code before argument assignment.
This function can be used to insert any code that will be part of the
resulting function. The code is inserted after argument checks occur,
but before argument assignment.
Args:
template: String representing the code to be inserted. If other
plugins have included code for pre_arg_assign, it will be included
here.
option: dictionary containing the information for this specific
option.
Returns:
template, with any additional code if needed.
"""
return template
from .StandaloneExtension import StandaloneExtension
from .NullableArguments import NullableArguments
from .OptionalArguments import OptionalArguments
from .ArgcountChecker import ArgcountChecker
from .ArgumentReferences import ArgumentReferences
from .BeforeAfterCall import BeforeAfterCall
from .ConstantArguments import ConstantArguments
from .ReturnArguments import ReturnArguments
from .GILRelease import GILRelease
from .AutoGPU import AutoGPU
from .CuDNNPlugin import CuDNNPlugin
from .GenericNN import GenericNN
from .WrapDim import WrapDim
from .Broadcast import Broadcast | corresponding C types. The type is once again accessible via
arg['type']. This time we return a Template string that unpacks an
object. For a THTensor*, we know that the corresponding PyTorch type is
a THPTensor*, so we need to get the cdata from the object. So we would
return: | random_line_split |
__init__.py |
class CWrapPlugin(object):
"""Base class from which all cwrap plugins should inherit.
Override any of the following methods to implement the desired wrapping
behavior.
"""
def initialize(self, cwrap):
"""Initialize the Plugin class prior to calling any other functions.
It is used to give the Plugin access to the cwrap object's helper
functions and state.
Args:
cwrap: the cwrap object performing the wrapping.
"""
pass
def get_type_check(self, arg, option):
"""Used to generate code for runtime checks of object types.
The type can be found in arg['type']. For example, it could be
THTensor*. If this Plugin recognizes the type in arg, it should
return a Template string containing code that checks whether a
Python object is of this type. For example, the return type in
this case would be:
Template('(PyObject*)Py_TYPE($arg) == THPTensorClass')
As a simpler example, if the type == 'bool' then we would return:
Template('PyBool_Check($arg)')
Note that the name of the identifier that will be subsituted must be
$arg.
Args:
arg: a Python object with a 'type' field representing the type
to generate a check string for.
option: dictionary containing the information for this specific
option.
Returns:
A Template string as described above, or None if this Plugin does
not have a corresponding type check for the passed type.
"""
pass
def get_type_unpack(self, arg, option):
"""Used to generate code unpacking of Python objects into C types.
Similar to get_type_check, but for unpacking Python objects into their
corresponding C types. The type is once again accessible via
arg['type']. This time we return a Template string that unpacks an
object. For a THTensor*, we know that the corresponding PyTorch type is
a THPTensor*, so we need to get the cdata from the object. So we would
return:
Template('((THPTensor*)$arg)->cdata')
For a simpler type, such as a long, we could do:
Template('PyLong_AsLong($arg)')
though in practice we will use our own custom unpacking code. Once
again, $arg must be used as the identifier.
Args:
arg: a Python object with a 'type' field representing the type
to generate a unpack string for.
option: dictionary containing the information for this specific
option.
Returns:
A Template string as described above, or None if this Plugin does
not have a corresponding type unpack for the passed type.
"""
pass
def get_return_wrapper(self, option):
"""Used to generate code wrapping a function's return value.
Wrapped functions should always return a PyObject *. However,
internally, the code will be working with C objects or primitives.
Therefore, if a function has a return value we need to convert it back
to a PyObject * before the function returns. Plugins can override this
function to generate wrapper code for returning specific C types. The
type is accessible via option['return'].
Continuing on with our THTensor* example, we might do something like:
Template('return THPTensor_(New)($result);')
In general, you want to do return <statement>; In this case, we call
into THP's library routine that takes a THTensor* (the $result
identifier) and returns a PyObject *.
For a bool, we could do Template('return PyBool_FromLong($result);').
Note that in other cases, our logic might be more complicated. For
example, if our return value is also an argument to the function call,
we could need to increase the reference count prior to returning.
Args:
option: dictionary containing the information for this specific
option.
Returns:
A Template string as described above, or None if this Plugin does
not have a corresponding return wrapper for the functions return
type or specifier.
"""
pass
def get_wrapper_template(self, declaration):
"""Used to create a code template to wrap the options.
This function returns a Template string that contains the function call
for the overall declaration, including the method definition, opening
and closing brackets, and any additional code within the method body.
Look through the examples to get a sense of what this might look like.
The only requirements are that it contains unsubstituted template
identifiers for anything the cwrap engine expects.
Note that for any declaration only one Plugin can generate the wrapper
template.
Args:
declaration: the declaration for the wrapped method.
Returns:
A template string representing the entire function declaration,
with identifiers as necessary.
"""
pass
def get_assign_args(self, arguments):
"""Used to modify argument metadata prior to assignment.
We have already setup argument checking, and how to unpack arguments.
This function allows you to modify the metadata of an argument prior to
actually performing the assignment. For example, you might want to
check that an argument is of a specific type, but when unpacking it you
might want to treat it as a different type. This function will allow
you to do stuff like that --> e.g. you could set the 'type' field for a
particular argument to be something else.
Args:
arguments: a list of argument metadata dictionaries.
Returns:
The same list of arguments, with any modifications as you see fit.
"""
pass
def get_arg_accessor(self, arg, option):
"""Used to generate a string for accessing the passed arg.
One of the key components of the YAML definition for a method to be
wrapped are the arguments to that method. Override this function to
show how to access that specific arg in the code. For example, you
might do something different if the argument is a keyword argument, or
a constant, or self. The base cwrap plugin has a fallback arg accessor
for loading elements from the args PyObject * tuple passed to the
function.
Its best to look at some of the existing Plugins to get a sense of what
one might do.
Args:
arg: a dictionary specifying attributes of the arg to be accessed
option: dictionary containing the information for this specific
option.
Returns:
A a string (note: not a Template string!) of code that can be used
to access the given arg. If the plugin does not know how to access
the arg, return None.
"""
pass
def process_full_file(self, code):
|
def process_single_check(self, code, arg, arg_accessor):
"""Used to postprocess a type check.
Above we defined a function get_type_check that returns a Template
string that allows for type checking a PyObject * for a specific type.
In this function, the passed "code" is a combination of that type check
along with a specific arg_accessor pasted in. For example:
'(PyObject*)Py_TYPE(PyTuple_GET_ITEM(args, 1)) == THPTensorClass'
This function can be overriden to support modifying this check string.
For example, if an argument can be null, we might want to check and see
if the type is Py_None, as well.
Args:
code: The string code representing a type check for a specific
argument being accessed.
arg: dictionary containing properties of that specific argument
arg_accessor: the arg_accessor string for that specific argument.
Note that this is likely also embedded in code, but if you want to
be able to access this arg and throw away the other code, you can
do so.
Returns:
A string representing the processed check/access string for this
arg. If the plugin does not know how to modify a specific input, it
should return the original code.
"""
return code
def process_all_checks(self, code, option):
"""Used to generate additional checks based on all the individual ones.
After individually processing each argument with get_type_check,
get_arg_accessor, process_single_check, this function allows you to
inspect the combined checks and do any additional checking/modify that
string as you see fit. In particular, given code is a string like:
CHECK_TYPE(GET_ARG(0)) && CHECK_TYPE(GET_ARG(1)) && ..
We can process it as we see fit. For example, we may want to add a
check at the beginning that we have the specified number of arguments.
Args:
code: A string representing each argument check separated by an
'&&'. code can be None if there are no arguments to be checked.
option: dictionary containing the information for this specific
option.
Returns:
The modified code string with any additional checks, or just the
existing code if no modifications are to be made.
"""
return code
def process_single_unpack(self, code, arg, arg_accessor):
"""Used to postprocess a type unpack.
Same as process_single_check above, but for type unpacking. E.g. an
example code could be:
PyLong_FromLong(PyTuple_GET_ITEM(args, 0))
And this code could modify that as it sees fit. For example, if the
result of accessing the argument is None, we would not want to call the
unpacking code.
Args:
code: The string code representing a type unpack for a specific
argument being accessed.
arg: dictionary containing properties of that specific argument
arg_accessor: the arg_accessor string for that specific argument.
Note that this is likely also embedded in code, but if you want to
be able to access this arg and throw away the other code, you can
do so.
Returns:
A string representing the processed unpack/access string for this
arg. If the plugin does not know how to modify a specific input, it
should return the original code.
"""
return code
def process_all_call_arg(self, code, option):
"""Used to modify the arguments to the underlying C function call.
Code is the string of comma-separated arguments that will be passed to
the wrapped C function. You can use this function to modify that string
as you see fit. For example, THP prepends the LIBRARY_STATE definition
so that the generated code will follow the conventions it uses for
writing one function for both TH/THC calls.
Args:
code: A string as described above.
option: dictionary containing the information for this specific
option.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_option_code(self, code, option):
"""Used to modify the entire code body for an option.
Code in this case is a string containing the entire generated code for
a specific option. Note that this body includes the checks for each
option, i.e. if (type checks for one permutation) { ... } else if (type
checks for another permutation) { ... } etc.
Args:
code: string representing the generated code for the option
option: dictionary containing the information for this specific
option.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_wrapper(self, code, declaration):
"""Used to modify the entire code body for a declaration.
Code in this case is a string containing the entire generated code for
a specific declaration. This code can be modified as the plugin sees
fit. For example, we might want to wrap the function in preprocessor
guards if it is only enabled for floats.
Args:
code: string representing the generated code for the declaration
declaration: the declaration metadata.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_declarations(self, declarations):
"""Used to process/modify the function's declaration.
Cwrap loads the YAML of a function to be cwrap'd into a dictionary.
This is known as the declaration. The cwrap code sets some defaults as
necessary, and then passes this dictionary to process_declarations.
Overriding this code allows the plugin to modify this declaration as it
sees fit prior to any code generation. The plugin may add, remove or
modify the fields of the declaration dictionary. It can also save state
to the Plugin for use in subsequent function overrides.
Its best to look at some of the existing Plugins to get a sense of what
one might do.
Args:
declarations: a list of declarations, i.e. dictionaries that define
the function(s) being wrapped. Note that this can be plural, so the
function must take care to modify each input declaration.
Returns:
Those same declarations, modified as the Plugin sees fit. Note that
you could insert a declaration, if you wanted to take an input
declaration and e.g. wrap it multiple times.
"""
return declarations
def process_option_code_template(self, template, option):
"""Used to modify the code template for the option.
The "code template" can be thought of the actual body implementing the
wrapped function call --> i.e. it is not the argument check,
assignment, etc. but the actual logic of the function. The template is
a list containing two operations: the $call, and the $return_result.
These represent the "locations" where the function call will happen,
and the function will return.
This function can modify the list to insert arbitrary code around the
$call and $return_result. For example, one might want to wrap the code
in a try/catch, or post-process the result in some way. This allows a
plugin to do that.
Args:
template: a list containing $call and $return_result, in addition
to any arbitrary code inserted by other plugins.
option: dictionary containing the information for this specific
option.
Returns:
The same "code template", possibly modified by this plugin.
"""
return template
def process_pre_arg_assign(self, template, option):
"""Used to include any code before argument assignment.
This function can be used to insert any code that will be part of the
resulting function. The code is inserted after argument checks occur,
but before argument assignment.
Args:
template: String representing the code to be inserted. If other
plugins have included code for pre_arg_assign, it will be included
here.
option: dictionary containing the information for this specific
option.
Returns:
template, with any additional code if needed.
"""
return template
from .StandaloneExtension import StandaloneExtension
from .NullableArguments import NullableArguments
from .OptionalArguments import OptionalArguments
from .ArgcountChecker import ArgcountChecker
from .ArgumentReferences import ArgumentReferences
from .BeforeAfterCall import BeforeAfterCall
from .ConstantArguments import ConstantArguments
from .ReturnArguments import ReturnArguments
from .GILRelease import GILRelease
from .AutoGPU import AutoGPU
from .CuDNNPlugin import CuDNNPlugin
from .GenericNN import GenericNN
from .WrapDim import WrapDim
from .Broadcast import Broadcast
| """Used to modify the code for the entire output file.
The last thing any plugin can do. Code contains the results of wrapping
all the declarations. The plugin can do things like adding header
guards, include statements, etc.
Args:
code: a string source code for the wrapped declarations.
Returns:
The same code, modified as the plugin sees fit.
"""
return code | identifier_body |
__init__.py |
class CWrapPlugin(object):
"""Base class from which all cwrap plugins should inherit.
Override any of the following methods to implement the desired wrapping
behavior.
"""
def initialize(self, cwrap):
"""Initialize the Plugin class prior to calling any other functions.
It is used to give the Plugin access to the cwrap object's helper
functions and state.
Args:
cwrap: the cwrap object performing the wrapping.
"""
pass
def get_type_check(self, arg, option):
"""Used to generate code for runtime checks of object types.
The type can be found in arg['type']. For example, it could be
THTensor*. If this Plugin recognizes the type in arg, it should
return a Template string containing code that checks whether a
Python object is of this type. For example, the return type in
this case would be:
Template('(PyObject*)Py_TYPE($arg) == THPTensorClass')
As a simpler example, if the type == 'bool' then we would return:
Template('PyBool_Check($arg)')
Note that the name of the identifier that will be subsituted must be
$arg.
Args:
arg: a Python object with a 'type' field representing the type
to generate a check string for.
option: dictionary containing the information for this specific
option.
Returns:
A Template string as described above, or None if this Plugin does
not have a corresponding type check for the passed type.
"""
pass
def get_type_unpack(self, arg, option):
"""Used to generate code unpacking of Python objects into C types.
Similar to get_type_check, but for unpacking Python objects into their
corresponding C types. The type is once again accessible via
arg['type']. This time we return a Template string that unpacks an
object. For a THTensor*, we know that the corresponding PyTorch type is
a THPTensor*, so we need to get the cdata from the object. So we would
return:
Template('((THPTensor*)$arg)->cdata')
For a simpler type, such as a long, we could do:
Template('PyLong_AsLong($arg)')
though in practice we will use our own custom unpacking code. Once
again, $arg must be used as the identifier.
Args:
arg: a Python object with a 'type' field representing the type
to generate a unpack string for.
option: dictionary containing the information for this specific
option.
Returns:
A Template string as described above, or None if this Plugin does
not have a corresponding type unpack for the passed type.
"""
pass
def get_return_wrapper(self, option):
"""Used to generate code wrapping a function's return value.
Wrapped functions should always return a PyObject *. However,
internally, the code will be working with C objects or primitives.
Therefore, if a function has a return value we need to convert it back
to a PyObject * before the function returns. Plugins can override this
function to generate wrapper code for returning specific C types. The
type is accessible via option['return'].
Continuing on with our THTensor* example, we might do something like:
Template('return THPTensor_(New)($result);')
In general, you want to do return <statement>; In this case, we call
into THP's library routine that takes a THTensor* (the $result
identifier) and returns a PyObject *.
For a bool, we could do Template('return PyBool_FromLong($result);').
Note that in other cases, our logic might be more complicated. For
example, if our return value is also an argument to the function call,
we could need to increase the reference count prior to returning.
Args:
option: dictionary containing the information for this specific
option.
Returns:
A Template string as described above, or None if this Plugin does
not have a corresponding return wrapper for the functions return
type or specifier.
"""
pass
def | (self, declaration):
"""Used to create a code template to wrap the options.
This function returns a Template string that contains the function call
for the overall declaration, including the method definition, opening
and closing brackets, and any additional code within the method body.
Look through the examples to get a sense of what this might look like.
The only requirements are that it contains unsubstituted template
identifiers for anything the cwrap engine expects.
Note that for any declaration only one Plugin can generate the wrapper
template.
Args:
declaration: the declaration for the wrapped method.
Returns:
A template string representing the entire function declaration,
with identifiers as necessary.
"""
pass
def get_assign_args(self, arguments):
"""Used to modify argument metadata prior to assignment.
We have already setup argument checking, and how to unpack arguments.
This function allows you to modify the metadata of an argument prior to
actually performing the assignment. For example, you might want to
check that an argument is of a specific type, but when unpacking it you
might want to treat it as a different type. This function will allow
you to do stuff like that --> e.g. you could set the 'type' field for a
particular argument to be something else.
Args:
arguments: a list of argument metadata dictionaries.
Returns:
The same list of arguments, with any modifications as you see fit.
"""
pass
def get_arg_accessor(self, arg, option):
"""Used to generate a string for accessing the passed arg.
One of the key components of the YAML definition for a method to be
wrapped are the arguments to that method. Override this function to
show how to access that specific arg in the code. For example, you
might do something different if the argument is a keyword argument, or
a constant, or self. The base cwrap plugin has a fallback arg accessor
for loading elements from the args PyObject * tuple passed to the
function.
Its best to look at some of the existing Plugins to get a sense of what
one might do.
Args:
arg: a dictionary specifying attributes of the arg to be accessed
option: dictionary containing the information for this specific
option.
Returns:
A a string (note: not a Template string!) of code that can be used
to access the given arg. If the plugin does not know how to access
the arg, return None.
"""
pass
def process_full_file(self, code):
"""Used to modify the code for the entire output file.
The last thing any plugin can do. Code contains the results of wrapping
all the declarations. The plugin can do things like adding header
guards, include statements, etc.
Args:
code: a string source code for the wrapped declarations.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_single_check(self, code, arg, arg_accessor):
"""Used to postprocess a type check.
Above we defined a function get_type_check that returns a Template
string that allows for type checking a PyObject * for a specific type.
In this function, the passed "code" is a combination of that type check
along with a specific arg_accessor pasted in. For example:
'(PyObject*)Py_TYPE(PyTuple_GET_ITEM(args, 1)) == THPTensorClass'
This function can be overriden to support modifying this check string.
For example, if an argument can be null, we might want to check and see
if the type is Py_None, as well.
Args:
code: The string code representing a type check for a specific
argument being accessed.
arg: dictionary containing properties of that specific argument
arg_accessor: the arg_accessor string for that specific argument.
Note that this is likely also embedded in code, but if you want to
be able to access this arg and throw away the other code, you can
do so.
Returns:
A string representing the processed check/access string for this
arg. If the plugin does not know how to modify a specific input, it
should return the original code.
"""
return code
def process_all_checks(self, code, option):
"""Used to generate additional checks based on all the individual ones.
After individually processing each argument with get_type_check,
get_arg_accessor, process_single_check, this function allows you to
inspect the combined checks and do any additional checking/modify that
string as you see fit. In particular, given code is a string like:
CHECK_TYPE(GET_ARG(0)) && CHECK_TYPE(GET_ARG(1)) && ..
We can process it as we see fit. For example, we may want to add a
check at the beginning that we have the specified number of arguments.
Args:
code: A string representing each argument check separated by an
'&&'. code can be None if there are no arguments to be checked.
option: dictionary containing the information for this specific
option.
Returns:
The modified code string with any additional checks, or just the
existing code if no modifications are to be made.
"""
return code
def process_single_unpack(self, code, arg, arg_accessor):
"""Used to postprocess a type unpack.
Same as process_single_check above, but for type unpacking. E.g. an
example code could be:
PyLong_FromLong(PyTuple_GET_ITEM(args, 0))
And this code could modify that as it sees fit. For example, if the
result of accessing the argument is None, we would not want to call the
unpacking code.
Args:
code: The string code representing a type unpack for a specific
argument being accessed.
arg: dictionary containing properties of that specific argument
arg_accessor: the arg_accessor string for that specific argument.
Note that this is likely also embedded in code, but if you want to
be able to access this arg and throw away the other code, you can
do so.
Returns:
A string representing the processed unpack/access string for this
arg. If the plugin does not know how to modify a specific input, it
should return the original code.
"""
return code
def process_all_call_arg(self, code, option):
"""Used to modify the arguments to the underlying C function call.
Code is the string of comma-separated arguments that will be passed to
the wrapped C function. You can use this function to modify that string
as you see fit. For example, THP prepends the LIBRARY_STATE definition
so that the generated code will follow the conventions it uses for
writing one function for both TH/THC calls.
Args:
code: A string as described above.
option: dictionary containing the information for this specific
option.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_option_code(self, code, option):
"""Used to modify the entire code body for an option.
Code in this case is a string containing the entire generated code for
a specific option. Note that this body includes the checks for each
option, i.e. if (type checks for one permutation) { ... } else if (type
checks for another permutation) { ... } etc.
Args:
code: string representing the generated code for the option
option: dictionary containing the information for this specific
option.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_wrapper(self, code, declaration):
"""Used to modify the entire code body for a declaration.
Code in this case is a string containing the entire generated code for
a specific declaration. This code can be modified as the plugin sees
fit. For example, we might want to wrap the function in preprocessor
guards if it is only enabled for floats.
Args:
code: string representing the generated code for the declaration
declaration: the declaration metadata.
Returns:
The same code, modified as the plugin sees fit.
"""
return code
def process_declarations(self, declarations):
"""Used to process/modify the function's declaration.
Cwrap loads the YAML of a function to be cwrap'd into a dictionary.
This is known as the declaration. The cwrap code sets some defaults as
necessary, and then passes this dictionary to process_declarations.
Overriding this code allows the plugin to modify this declaration as it
sees fit prior to any code generation. The plugin may add, remove or
modify the fields of the declaration dictionary. It can also save state
to the Plugin for use in subsequent function overrides.
Its best to look at some of the existing Plugins to get a sense of what
one might do.
Args:
declarations: a list of declarations, i.e. dictionaries that define
the function(s) being wrapped. Note that this can be plural, so the
function must take care to modify each input declaration.
Returns:
Those same declarations, modified as the Plugin sees fit. Note that
you could insert a declaration, if you wanted to take an input
declaration and e.g. wrap it multiple times.
"""
return declarations
def process_option_code_template(self, template, option):
"""Used to modify the code template for the option.
The "code template" can be thought of the actual body implementing the
wrapped function call --> i.e. it is not the argument check,
assignment, etc. but the actual logic of the function. The template is
a list containing two operations: the $call, and the $return_result.
These represent the "locations" where the function call will happen,
and the function will return.
This function can modify the list to insert arbitrary code around the
$call and $return_result. For example, one might want to wrap the code
in a try/catch, or post-process the result in some way. This allows a
plugin to do that.
Args:
template: a list containing $call and $return_result, in addition
to any arbitrary code inserted by other plugins.
option: dictionary containing the information for this specific
option.
Returns:
The same "code template", possibly modified by this plugin.
"""
return template
def process_pre_arg_assign(self, template, option):
"""Used to include any code before argument assignment.
This function can be used to insert any code that will be part of the
resulting function. The code is inserted after argument checks occur,
but before argument assignment.
Args:
template: String representing the code to be inserted. If other
plugins have included code for pre_arg_assign, it will be included
here.
option: dictionary containing the information for this specific
option.
Returns:
template, with any additional code if needed.
"""
return template
from .StandaloneExtension import StandaloneExtension
from .NullableArguments import NullableArguments
from .OptionalArguments import OptionalArguments
from .ArgcountChecker import ArgcountChecker
from .ArgumentReferences import ArgumentReferences
from .BeforeAfterCall import BeforeAfterCall
from .ConstantArguments import ConstantArguments
from .ReturnArguments import ReturnArguments
from .GILRelease import GILRelease
from .AutoGPU import AutoGPU
from .CuDNNPlugin import CuDNNPlugin
from .GenericNN import GenericNN
from .WrapDim import WrapDim
from .Broadcast import Broadcast
| get_wrapper_template | identifier_name |
lib.rs | /*!
# Strip MapMaking library
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur facilisis consectetur arcu. Etiam semper, sem sit amet lacinia dignissim, mauris eros rutrum massa, a imperdiet orci urna vel elit. Nulla at sagittis lacus. Curabitur eu gravida turpis. Mauris blandit porta orci. Aliquam fringilla felis a sem aliquet rhoncus. Suspendisse porta, mi vel euismod porta, mi ex cursus diam, quis iaculis sapien massa eget massa. Fusce sit amet neque vel turpis interdum tempus et non nisl. Nunc aliquam nunc vitae justo accumsan pretium. Morbi eget urna quis ex pellentesque molestie. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Integer vehicula vehicula tortor sit amet dignissim. Duis finibus, felis ut fringilla tincidunt, mi lectus fermentum eros, ut laoreet justo lacus id urna.
Duis iaculis faucibus mollis. Maecenas dignissim efficitur ex. Sed pulvinar justo a arcu lobortis imperdiet. Suspendisse placerat venenatis volutpat. Aenean eu nulla vitae libero porta dignissim ut sit amet ante. Vestibulum porttitor sodales nibh, nec imperdiet tortor accumsan quis. Ut sagittis arcu eu efficitur varius. Etiam at ex condimentum, volutpat ipsum sed, posuere nibh. Sed posuere fringilla mi in commodo. Ut sodales, elit volutpat finibus dapibus, dui lacus porttitor enim, ac placerat erat ligula quis ipsum. Morbi sagittis et nisl mollis fringilla. Praesent commodo faucibus erat, nec congue lectus finibus vitae. Sed eu ipsum in lorem congue vehicula.
# Using the Strip MapMaking
Duis iaculis faucibus mollis. Maecenas dignissim efficitur ex. Sed pulvinar justo a arcu lobortis imperdiet. Suspendisse placerat venenatis volutpat. Aenean eu nulla vitae libero porta dignissim ut sit amet ante. Vestibulum porttitor sodales nibh, nec imperdiet tortor accumsan quis. Ut sagittis arcu eu efficitur varius. Etiam at ex condimentum, volutpat ipsum sed, posuere nibh. Sed posuere fringilla mi in commodo. Ut sodales, elit volutpat finibus dapibus, dui lacus porttitor enim, ac placerat erat ligula quis ipsum. Morbi sagittis et nisl mollis fringilla. Praesent commodo faucibus erat, nec congue lectus finibus vitae. Sed eu ipsum in lorem congue vehicula.
*/
extern crate rustfft;
pub mod directory;
pub mod iteratorscustom;
pub mod sky;
pub mod threadpool;
pub mod misc;
pub mod noisemodel;
pub mod plot_suite;
pub mod conjugategradient;
use threadpool::ThreadPool;
use std::{fs::File, io::Write, sync::mpsc, usize, vec};
use colored::Colorize;
use conjugategradient::conjgrad;
use iteratorscustom::FloatIterator;
use num::{ToPrimitive, complex::Complex32};
use rustfft::{FftPlanner, num_complex::Complex};
// use rustfft::algorithm::Radix4;
// use noisemodel::NoiseModel;
// use std::time::Instant;
// use gnuplot::*;
use std::marker::PhantomData;
#[derive(Debug)]
pub struct Obs <'a> {
start: String,
stop: String,
detector: Vec<String>,
mc_id: u8,
alpha: f32,
f_knee: f32,
pix: Vec<Vec<i32>>,
tod: Vec<Vec<f32>>,
sky_t: Vec<f32>,
phantom: PhantomData<&'a f32>,
}
// ```
// Documentation
// Creation function
// ```
impl <'a> Obs <'a> {
pub fn new(
start: String,
stop: String,
detector: Vec<String>,
mc_id: u8,
alpha: f32,
f_knee: f32,
tod: Vec<Vec<f32>>,
sky: Vec<f32>,
pix: Vec<Vec<i32>> ) -> Self
{
let mut tod_final: Vec<Vec<f32>> = Vec::new();
for (i, j) in tod.iter().zip(pix.iter()){
//let noise = NoiseModel::new(50.0, 7e9, 1.0/20.0, 0.1, 1.0, 123, i.len());
//let tod_noise = noise.get_noise_tod();
let mut tmp: Vec<f32> = Vec::new();
for (_n, (k, l)) in i.into_iter().zip(j.iter()).enumerate(){
let t_sky = sky[match l.to_usize() {Some(p) => p, None=>0}];
//let r = tod_noise[_n];
tmp.push(0.54*k + t_sky);
}
tod_final.push(tmp);
}
return Obs {
start,
stop,
detector,
mc_id,
alpha,
f_knee,
pix,
tod: tod_final,
sky_t: sky,
phantom: PhantomData,
}
}
}
// The `get` methods
impl <'a> Obs <'a>{
pub fn get_start(&self) -> &String {
&self.start
}
pub fn get_stop(&self) -> &String {
&self.stop
}
pub fn get_detector(&self) -> &Vec<String> {
&self.detector
}
pub fn get_mcid(&self) -> &u8 |
pub fn get_pix(&self) -> &Vec<Vec<i32>> {
&self.pix
}
pub fn get_tod(&self) -> &Vec<Vec<f32>> {
&self.tod
}
}
// Mitigation of the systematic effects
// Starting from the binning, to the
// implementation of a de_noise model
impl <'a> Obs <'a>{
pub fn binning(&self) -> (Vec<f32>, Vec<i32>) {
println!("");
println!("Start {}", "binning".bright_blue().bold());
const NSIDE: usize = 128;
const NUM_PIX: usize = NSIDE*NSIDE*12;
let mut signal_maps: Vec<Vec<f32>> = Vec::new();
let mut hit_maps: Vec<Vec<i32>> = Vec::new();
let pix = &self.pix;
let tod = &self.tod;
let num_threads = num_cpus::get();
let bin_pool = ThreadPool::new(num_threads);
let (tx, rx) = mpsc::channel();
for i in 0..tod.len() {
let t = tod[i].clone();
let p = pix[i].clone();
let tx = tx.clone();
bin_pool.execute(move ||{
let (sig_par, hit_par) = bin_map(t.clone(), p.clone(), 128);
tx.send((sig_par, hit_par)).unwrap();
});
}
for _i in 0..tod.len() {
let rec = rx.recv().unwrap();
signal_maps.push(rec.0.clone());
hit_maps.push(rec.1.clone());
}
let mut final_sig: Vec<f32> = vec![0.0; NUM_PIX];
let mut final_hit: Vec<i32> = vec![0; NUM_PIX];
for idx in 0..signal_maps.len() {
let s = signal_maps[idx].clone();
let h = hit_maps[idx].clone();
for pidx in 0..NUM_PIX{
let signal = s[pidx];
let hits = h[pidx];
final_sig[pidx] += signal;
final_hit[pidx] += hits;
}
}
println!("{}", "COMPLETED".bright_green());
/***PRINT ON FILE */
println!("");
let id_number = self.get_mcid();
let file_name = format!("binned_{}.dat", id_number);
println!("Print maps on file: {}", file_name.bright_green().bold());
let mut f = File::create(file_name).unwrap();
let hit: Vec<String> = final_hit.iter().map(|a| a.to_string()).collect();
let sig: Vec<String> = final_sig.iter().map(|a| a.to_string()).collect();
for (i,j) in hit.iter().zip(sig.iter()) {
writeln!(f, "{}\t{}",i, j).unwrap();
}
println!("{}", "COMPLETED".bright_green());
(final_sig, final_hit)
}
}
// GLS DENOISE
impl <'a> Obs <'a>{
pub fn gls_denoise(&self, _tol: f32, _maxiter: usize, _nside: usize){
println!("{}", "Execution of the gls_denoise".bright_blue().bold());
const NUM_PIX: usize = 12*128*128;
let _x: Vec<f32> = vec![0.0; NUM_PIX];
let (tx, rx) = mpsc::channel();
let tods = &self.tod;
let pixs = &self.pix;
//println!("Len TODs: {}", tods.len()); // 55 * n_hour
let mut partial_maps: Vec<Vec<f32>> = Vec::new();
let num_threads = num_cpus::get();
let my_pool_b = ThreadPool::new(num_threads);
for idx_th in 0..tods.len() {
let t = tods[idx_th].clone();
let p = pixs[idx_th].clone();
let tx = tx.clone();
my_pool_b.execute(move || {
let b = get_b(t, p, 128);
tx.send(b).expect("channel will be there waiting for the pool");
});
}
for _ in 0..tods.len(){
partial_maps.push(rx.recv().unwrap());
}
let mut b: Vec<f32> = vec![0.0; 12*128*128];
for i in partial_maps.iter(){
for (n,j) in i.iter().enumerate(){
b[n] += j;
}
}
let map = conjgrad(a(), b, _tol, _maxiter, p(), pixs.clone());
/***PRINT ON FILE */
println!("");
let id_number = self.get_mcid();
let file_name = format!("gls_{}.dat", id_number);
println!("Print maps on file: {}", file_name.bright_green().bold());
let mut f = File::create(file_name).unwrap();
let sig: Vec<String> = map.iter().map(|a| a.to_string()).collect();
for i in sig.iter() {
writeln!(f, "{}",i).unwrap();
}
println!("{}", "COMPLETED".bright_green());
}// End of GLS_DENOISE
}
// UTILS functions
pub fn fn_noise_prior(f: f32, alpha: f32, f_k: f32, sigma: f32, _n: f32) -> f32 {
let mut _np: f32 = 0.0;
if f > 0.0 {
let _np_g = f32::exp( -((10.0 - f) * (10.0-f)) / (2.0 * 0.0002));
_np = sigma * f32::powf( 1.0 + f_k/(10.0-f), alpha.clone()) + 8E8 * _np_g ;
} else {
let _np_g = f32::exp( -((10.0 + f) * (10.0-f)) / (2.0 * 0.0002));
_np = sigma*sigma * f32::powf( 1.0 + f_k/(10.0+f), alpha.clone()) + 8E8 * _np_g;
}
_np
}
pub fn kaiser(beta: f32, length: i32) -> Vec<f32> {
use crate::misc::bessel_i0 as bessel;
let mut window: Vec<f32> = Vec::new();
let start: f32 = (-(length - 1) / 2) as f32;
let end: f32 = ((length - 1) / 2) as f32;
let n_idx = iteratorscustom::FloatIterator::new(start, end, match length.to_u32(){Some(p) => p, None => 0});
for n in n_idx {
let m = length as f32;
window.push(bessel(beta * (1. - (n / (m / 2.)).powi(2)).sqrt()) / bessel(beta))
}// 70-80
window
}
pub fn hann(length: i32) -> Vec<f32> {
let mut window: Vec<f32> = Vec::new();
let n_idx = iteratorscustom::FloatIterator::new(0.0, length as f32, match length.to_u32(){Some(p) => p, None => 0});
for n in n_idx {
window.push(f32::powi( f32::sin(3.141592 * n / (length as f32)), 2));
}
window
}
pub fn denoise(tod: Vec<f32>, _alpha: f32, _f_k: f32, _sigma: f32, _fs: f32) -> Vec<f32> {
let win: Vec<f32> = kaiser(5.0, match tod.len().to_i32(){Some(p)=>p, None=> 0});//70!!!!!
//let win: Vec<f32> = hann( match tod.len().to_i32(){Some(p)=>p, None=> 0});
// let mut fg = Figure::new();
// fg.axes2d().lines(0..win.len(), win.clone(), &[Caption("Kaiser")]).
// lines(0..win.len(), win_h, &[Caption("Hanning")]);
// fg.show().unwrap();
//let now = Instant::now();
let mut input: Vec<Complex<f32>> = tod.iter().zip(win.iter()).map(|x| Complex32::new(
*x.0 *x.1,
0.0)).
collect(); // ~0
//println!("Denoise process: {}", now.elapsed().as_millis());
let mut planner = FftPlanner::new();
let fft = planner.plan_fft_forward(tod.len()); // 10
//let fft = Radix4::new()
fft.process(&mut input); // 50
let mut noise_p: Vec<f32> = Vec::new();
let freq_iter: FloatIterator = FloatIterator::new(-10.0, 10.0, match input.len().to_u32() {Some(p) => p, None => 0});
let mut freq: Vec<f32> = Vec::new();
for f in freq_iter {
noise_p.push(fn_noise_prior(f, _alpha, _f_k, _sigma, match tod.len().to_f32(){Some(p)=>p, None=>0.0} ));
freq.push(f);
}
// Denoise
let mut tod_denoised: Vec<Complex32> = input.iter().zip(noise_p.iter()).map(|(a, b)| {
let (_, angle) = a.to_polar();
let module = a.norm()/b;
Complex32::from_polar(module, angle)
}).collect();
// let mut fg = Figure::new();
// fg.axes2d().points(0..input.len()/2, input.iter().map(|t| t.re).collect::<Vec<f32>>(), &[Caption("FFT - raw")]).set_x_log(Some(10.0)).set_y_log(Some(10.0)).
// points(0..tod_denoised.len()/2, tod_denoised.iter().map(|f| f.re).collect::<Vec<f32>>(), &[Caption("FFT - denoised")]).set_x_log(Some(10.0)).set_y_log(Some(10.0)).
// lines(0..noise_p.len()/2, noise_p, &[Caption("Noise model")]);
// fg.show().unwrap();
let mut planner = FftPlanner::new(); // 60
let ifft = planner.plan_fft_inverse(tod.len());
ifft.process(&mut tod_denoised);
let tod_denoised: Vec<Complex32> = tod_denoised.iter().map(|c| {
let (module, angle) = c.to_polar();
let module2 = module / (tod.len() as f32);
Complex32::from_polar(module2, angle)
}).collect();
let tod_real: Vec<f32> = tod_denoised.iter().zip( win.iter()).map(|t| t.0.re / t.1 ).collect();
// let mut fg = Figure::new();
// fg.axes2d().lines(0..tod_real.len(), tod_real.clone(), &[Caption("TOD denoised")]).lines(0..tod.len(), tod, &[Caption("TOD RAW")]);
// fg.show().unwrap();
tod_real
}
pub fn get_b(tod: Vec<f32>, pix: Vec<i32>, nside: usize) -> Vec<f32> {
let mut b: Vec<f32> = vec![0.0; 12*128*128];
let tod_n = denoise(tod.clone(), 4.0/3.0, 7.0, 30.0, 20.0);
let (map, _) = bin_map(tod_n.clone(), pix, nside);
for i in 0..12*nside*nside {
b[i] += map[i];
}
b
}
fn a() -> Box<dyn Fn(Vec<f32>, Vec<Vec<i32>>) -> Vec<f32>> {
Box::new(|_x: Vec<f32>, pointings: Vec<Vec<i32>>| {
let mut temp_maps: Vec<Vec<f32>> = Vec::new();
let mut res: Vec<f32> = vec![0.0; 12*128*128];
let num_threads = num_cpus::get();
let pool_denoise = ThreadPool::new(num_threads);
let (tx, rx) = mpsc::channel();
for i_det in pointings.iter() {
let mut tmp: Vec<f32> = Vec::new();
let tx = tx.clone();
let point_i = i_det.clone();
let x = _x.clone();
pool_denoise.execute(move ||{
for i in point_i.iter() {
tmp.push(x[*i as usize]);
}
let tmp_denoised = denoise(tmp.clone(), 4.0/3.0, 7.0, 30.0, 20.0);
let (map, _) = bin_map(tmp_denoised.clone(), point_i.clone(), 128);
// let mut final_map = Vec::new();
// for (m, h) in map.iter().zip(hit.iter()) {
// final_map.push(m.clone()/(h.clone() as f32));
// }
tx.send(map).unwrap();
});
}
for _i in 0..pointings.len() {
temp_maps.push(rx.recv().unwrap());
}
for map in temp_maps.iter() {
res = res.iter().zip(map.iter()).map(|(r, m)| r+m).collect::<Vec<f32>>();
}
res
})
}
pub fn p() -> Box<dyn Fn(Vec<f32>) -> Vec<f32>> {
Box::new(|m| m.iter().map(|m| { 1.0*m } ).collect::<Vec<f32>>())
}
pub fn bin_map(tod: Vec<f32>, pix: Vec<i32>, nside: usize) -> (Vec<f32>, Vec<i32>) {
let num_pixs: usize = 12*nside*nside;
let mut signal_map: Vec<f32> = vec![0.0; num_pixs];
let mut hit_map: Vec<i32> = vec![0; num_pixs];
let mut iterator: usize = 0;
for i in pix.iter() {
let pixel = match i.to_usize(){Some(p)=> p, None=> 0};
hit_map[pixel] += 1;
signal_map[pixel] += tod[iterator];
iterator += 1;
}
(signal_map, hit_map)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_create_obs() {
let nside = 128;
let sky = vec![0.0; 12*nside*nside];
let obs = Obs::new(
String::from("start"),
String::from("stop"),
vec![String::from("det1")],
1,
1.0,
0.01,
vec![vec![300.0, 300.0, 300.0]],
sky,
vec![vec![1, 1, 3]]
);
let bin_map = obs.binning();
let sig_1_map = bin_map.0[1];
let pix_1_map = bin_map.1[1];
assert_eq!(sig_1_map, 0.54*(300.0+300.0));
assert_eq!(pix_1_map, 2);
}
} | {
&self.mc_id
} | identifier_body |
lib.rs | /*!
# Strip MapMaking library
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur facilisis consectetur arcu. Etiam semper, sem sit amet lacinia dignissim, mauris eros rutrum massa, a imperdiet orci urna vel elit. Nulla at sagittis lacus. Curabitur eu gravida turpis. Mauris blandit porta orci. Aliquam fringilla felis a sem aliquet rhoncus. Suspendisse porta, mi vel euismod porta, mi ex cursus diam, quis iaculis sapien massa eget massa. Fusce sit amet neque vel turpis interdum tempus et non nisl. Nunc aliquam nunc vitae justo accumsan pretium. Morbi eget urna quis ex pellentesque molestie. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Integer vehicula vehicula tortor sit amet dignissim. Duis finibus, felis ut fringilla tincidunt, mi lectus fermentum eros, ut laoreet justo lacus id urna.
Duis iaculis faucibus mollis. Maecenas dignissim efficitur ex. Sed pulvinar justo a arcu lobortis imperdiet. Suspendisse placerat venenatis volutpat. Aenean eu nulla vitae libero porta dignissim ut sit amet ante. Vestibulum porttitor sodales nibh, nec imperdiet tortor accumsan quis. Ut sagittis arcu eu efficitur varius. Etiam at ex condimentum, volutpat ipsum sed, posuere nibh. Sed posuere fringilla mi in commodo. Ut sodales, elit volutpat finibus dapibus, dui lacus porttitor enim, ac placerat erat ligula quis ipsum. Morbi sagittis et nisl mollis fringilla. Praesent commodo faucibus erat, nec congue lectus finibus vitae. Sed eu ipsum in lorem congue vehicula.
# Using the Strip MapMaking
Duis iaculis faucibus mollis. Maecenas dignissim efficitur ex. Sed pulvinar justo a arcu lobortis imperdiet. Suspendisse placerat venenatis volutpat. Aenean eu nulla vitae libero porta dignissim ut sit amet ante. Vestibulum porttitor sodales nibh, nec imperdiet tortor accumsan quis. Ut sagittis arcu eu efficitur varius. Etiam at ex condimentum, volutpat ipsum sed, posuere nibh. Sed posuere fringilla mi in commodo. Ut sodales, elit volutpat finibus dapibus, dui lacus porttitor enim, ac placerat erat ligula quis ipsum. Morbi sagittis et nisl mollis fringilla. Praesent commodo faucibus erat, nec congue lectus finibus vitae. Sed eu ipsum in lorem congue vehicula.
*/
extern crate rustfft;
pub mod directory;
pub mod iteratorscustom;
pub mod sky;
pub mod threadpool;
pub mod misc;
pub mod noisemodel;
pub mod plot_suite;
pub mod conjugategradient;
use threadpool::ThreadPool;
use std::{fs::File, io::Write, sync::mpsc, usize, vec};
use colored::Colorize;
use conjugategradient::conjgrad;
use iteratorscustom::FloatIterator;
use num::{ToPrimitive, complex::Complex32};
use rustfft::{FftPlanner, num_complex::Complex};
// use rustfft::algorithm::Radix4;
// use noisemodel::NoiseModel;
// use std::time::Instant;
// use gnuplot::*;
use std::marker::PhantomData;
#[derive(Debug)]
pub struct Obs <'a> {
start: String,
stop: String,
detector: Vec<String>,
mc_id: u8,
alpha: f32,
f_knee: f32,
pix: Vec<Vec<i32>>,
tod: Vec<Vec<f32>>,
sky_t: Vec<f32>,
phantom: PhantomData<&'a f32>,
}
// ```
// Documentation
// Creation function
// ```
impl <'a> Obs <'a> {
pub fn new(
start: String,
stop: String,
detector: Vec<String>,
mc_id: u8,
alpha: f32,
f_knee: f32,
tod: Vec<Vec<f32>>,
sky: Vec<f32>,
pix: Vec<Vec<i32>> ) -> Self
{
let mut tod_final: Vec<Vec<f32>> = Vec::new();
for (i, j) in tod.iter().zip(pix.iter()){
//let noise = NoiseModel::new(50.0, 7e9, 1.0/20.0, 0.1, 1.0, 123, i.len());
//let tod_noise = noise.get_noise_tod();
let mut tmp: Vec<f32> = Vec::new();
for (_n, (k, l)) in i.into_iter().zip(j.iter()).enumerate(){
let t_sky = sky[match l.to_usize() {Some(p) => p, None=>0}];
//let r = tod_noise[_n];
tmp.push(0.54*k + t_sky);
}
tod_final.push(tmp);
}
return Obs {
start,
stop,
detector,
mc_id,
alpha,
f_knee,
pix,
tod: tod_final,
sky_t: sky,
phantom: PhantomData,
}
}
}
// The `get` methods
impl <'a> Obs <'a>{
pub fn get_start(&self) -> &String {
&self.start
}
pub fn get_stop(&self) -> &String {
&self.stop
}
pub fn get_detector(&self) -> &Vec<String> {
&self.detector
}
pub fn get_mcid(&self) -> &u8 {
&self.mc_id
}
pub fn | (&self) -> &Vec<Vec<i32>> {
&self.pix
}
pub fn get_tod(&self) -> &Vec<Vec<f32>> {
&self.tod
}
}
// Mitigation of the systematic effects
// Starting from the binning, to the
// implementation of a de_noise model
impl <'a> Obs <'a>{
pub fn binning(&self) -> (Vec<f32>, Vec<i32>) {
println!("");
println!("Start {}", "binning".bright_blue().bold());
const NSIDE: usize = 128;
const NUM_PIX: usize = NSIDE*NSIDE*12;
let mut signal_maps: Vec<Vec<f32>> = Vec::new();
let mut hit_maps: Vec<Vec<i32>> = Vec::new();
let pix = &self.pix;
let tod = &self.tod;
let num_threads = num_cpus::get();
let bin_pool = ThreadPool::new(num_threads);
let (tx, rx) = mpsc::channel();
for i in 0..tod.len() {
let t = tod[i].clone();
let p = pix[i].clone();
let tx = tx.clone();
bin_pool.execute(move ||{
let (sig_par, hit_par) = bin_map(t.clone(), p.clone(), 128);
tx.send((sig_par, hit_par)).unwrap();
});
}
for _i in 0..tod.len() {
let rec = rx.recv().unwrap();
signal_maps.push(rec.0.clone());
hit_maps.push(rec.1.clone());
}
let mut final_sig: Vec<f32> = vec![0.0; NUM_PIX];
let mut final_hit: Vec<i32> = vec![0; NUM_PIX];
for idx in 0..signal_maps.len() {
let s = signal_maps[idx].clone();
let h = hit_maps[idx].clone();
for pidx in 0..NUM_PIX{
let signal = s[pidx];
let hits = h[pidx];
final_sig[pidx] += signal;
final_hit[pidx] += hits;
}
}
println!("{}", "COMPLETED".bright_green());
/***PRINT ON FILE */
println!("");
let id_number = self.get_mcid();
let file_name = format!("binned_{}.dat", id_number);
println!("Print maps on file: {}", file_name.bright_green().bold());
let mut f = File::create(file_name).unwrap();
let hit: Vec<String> = final_hit.iter().map(|a| a.to_string()).collect();
let sig: Vec<String> = final_sig.iter().map(|a| a.to_string()).collect();
for (i,j) in hit.iter().zip(sig.iter()) {
writeln!(f, "{}\t{}",i, j).unwrap();
}
println!("{}", "COMPLETED".bright_green());
(final_sig, final_hit)
}
}
// GLS DENOISE
impl <'a> Obs <'a>{
pub fn gls_denoise(&self, _tol: f32, _maxiter: usize, _nside: usize){
println!("{}", "Execution of the gls_denoise".bright_blue().bold());
const NUM_PIX: usize = 12*128*128;
let _x: Vec<f32> = vec![0.0; NUM_PIX];
let (tx, rx) = mpsc::channel();
let tods = &self.tod;
let pixs = &self.pix;
//println!("Len TODs: {}", tods.len()); // 55 * n_hour
let mut partial_maps: Vec<Vec<f32>> = Vec::new();
let num_threads = num_cpus::get();
let my_pool_b = ThreadPool::new(num_threads);
for idx_th in 0..tods.len() {
let t = tods[idx_th].clone();
let p = pixs[idx_th].clone();
let tx = tx.clone();
my_pool_b.execute(move || {
let b = get_b(t, p, 128);
tx.send(b).expect("channel will be there waiting for the pool");
});
}
for _ in 0..tods.len(){
partial_maps.push(rx.recv().unwrap());
}
let mut b: Vec<f32> = vec![0.0; 12*128*128];
for i in partial_maps.iter(){
for (n,j) in i.iter().enumerate(){
b[n] += j;
}
}
let map = conjgrad(a(), b, _tol, _maxiter, p(), pixs.clone());
/***PRINT ON FILE */
println!("");
let id_number = self.get_mcid();
let file_name = format!("gls_{}.dat", id_number);
println!("Print maps on file: {}", file_name.bright_green().bold());
let mut f = File::create(file_name).unwrap();
let sig: Vec<String> = map.iter().map(|a| a.to_string()).collect();
for i in sig.iter() {
writeln!(f, "{}",i).unwrap();
}
println!("{}", "COMPLETED".bright_green());
}// End of GLS_DENOISE
}
// UTILS functions
pub fn fn_noise_prior(f: f32, alpha: f32, f_k: f32, sigma: f32, _n: f32) -> f32 {
let mut _np: f32 = 0.0;
if f > 0.0 {
let _np_g = f32::exp( -((10.0 - f) * (10.0-f)) / (2.0 * 0.0002));
_np = sigma * f32::powf( 1.0 + f_k/(10.0-f), alpha.clone()) + 8E8 * _np_g ;
} else {
let _np_g = f32::exp( -((10.0 + f) * (10.0-f)) / (2.0 * 0.0002));
_np = sigma*sigma * f32::powf( 1.0 + f_k/(10.0+f), alpha.clone()) + 8E8 * _np_g;
}
_np
}
pub fn kaiser(beta: f32, length: i32) -> Vec<f32> {
use crate::misc::bessel_i0 as bessel;
let mut window: Vec<f32> = Vec::new();
let start: f32 = (-(length - 1) / 2) as f32;
let end: f32 = ((length - 1) / 2) as f32;
let n_idx = iteratorscustom::FloatIterator::new(start, end, match length.to_u32(){Some(p) => p, None => 0});
for n in n_idx {
let m = length as f32;
window.push(bessel(beta * (1. - (n / (m / 2.)).powi(2)).sqrt()) / bessel(beta))
}// 70-80
window
}
pub fn hann(length: i32) -> Vec<f32> {
let mut window: Vec<f32> = Vec::new();
let n_idx = iteratorscustom::FloatIterator::new(0.0, length as f32, match length.to_u32(){Some(p) => p, None => 0});
for n in n_idx {
window.push(f32::powi( f32::sin(3.141592 * n / (length as f32)), 2));
}
window
}
pub fn denoise(tod: Vec<f32>, _alpha: f32, _f_k: f32, _sigma: f32, _fs: f32) -> Vec<f32> {
let win: Vec<f32> = kaiser(5.0, match tod.len().to_i32(){Some(p)=>p, None=> 0});//70!!!!!
//let win: Vec<f32> = hann( match tod.len().to_i32(){Some(p)=>p, None=> 0});
// let mut fg = Figure::new();
// fg.axes2d().lines(0..win.len(), win.clone(), &[Caption("Kaiser")]).
// lines(0..win.len(), win_h, &[Caption("Hanning")]);
// fg.show().unwrap();
//let now = Instant::now();
let mut input: Vec<Complex<f32>> = tod.iter().zip(win.iter()).map(|x| Complex32::new(
*x.0 *x.1,
0.0)).
collect(); // ~0
//println!("Denoise process: {}", now.elapsed().as_millis());
let mut planner = FftPlanner::new();
let fft = planner.plan_fft_forward(tod.len()); // 10
//let fft = Radix4::new()
fft.process(&mut input); // 50
let mut noise_p: Vec<f32> = Vec::new();
let freq_iter: FloatIterator = FloatIterator::new(-10.0, 10.0, match input.len().to_u32() {Some(p) => p, None => 0});
let mut freq: Vec<f32> = Vec::new();
for f in freq_iter {
noise_p.push(fn_noise_prior(f, _alpha, _f_k, _sigma, match tod.len().to_f32(){Some(p)=>p, None=>0.0} ));
freq.push(f);
}
// Denoise
let mut tod_denoised: Vec<Complex32> = input.iter().zip(noise_p.iter()).map(|(a, b)| {
let (_, angle) = a.to_polar();
let module = a.norm()/b;
Complex32::from_polar(module, angle)
}).collect();
// let mut fg = Figure::new();
// fg.axes2d().points(0..input.len()/2, input.iter().map(|t| t.re).collect::<Vec<f32>>(), &[Caption("FFT - raw")]).set_x_log(Some(10.0)).set_y_log(Some(10.0)).
// points(0..tod_denoised.len()/2, tod_denoised.iter().map(|f| f.re).collect::<Vec<f32>>(), &[Caption("FFT - denoised")]).set_x_log(Some(10.0)).set_y_log(Some(10.0)).
// lines(0..noise_p.len()/2, noise_p, &[Caption("Noise model")]);
// fg.show().unwrap();
let mut planner = FftPlanner::new(); // 60
let ifft = planner.plan_fft_inverse(tod.len());
ifft.process(&mut tod_denoised);
let tod_denoised: Vec<Complex32> = tod_denoised.iter().map(|c| {
let (module, angle) = c.to_polar();
let module2 = module / (tod.len() as f32);
Complex32::from_polar(module2, angle)
}).collect();
let tod_real: Vec<f32> = tod_denoised.iter().zip( win.iter()).map(|t| t.0.re / t.1 ).collect();
// let mut fg = Figure::new();
// fg.axes2d().lines(0..tod_real.len(), tod_real.clone(), &[Caption("TOD denoised")]).lines(0..tod.len(), tod, &[Caption("TOD RAW")]);
// fg.show().unwrap();
tod_real
}
pub fn get_b(tod: Vec<f32>, pix: Vec<i32>, nside: usize) -> Vec<f32> {
let mut b: Vec<f32> = vec![0.0; 12*128*128];
let tod_n = denoise(tod.clone(), 4.0/3.0, 7.0, 30.0, 20.0);
let (map, _) = bin_map(tod_n.clone(), pix, nside);
for i in 0..12*nside*nside {
b[i] += map[i];
}
b
}
fn a() -> Box<dyn Fn(Vec<f32>, Vec<Vec<i32>>) -> Vec<f32>> {
Box::new(|_x: Vec<f32>, pointings: Vec<Vec<i32>>| {
let mut temp_maps: Vec<Vec<f32>> = Vec::new();
let mut res: Vec<f32> = vec![0.0; 12*128*128];
let num_threads = num_cpus::get();
let pool_denoise = ThreadPool::new(num_threads);
let (tx, rx) = mpsc::channel();
for i_det in pointings.iter() {
let mut tmp: Vec<f32> = Vec::new();
let tx = tx.clone();
let point_i = i_det.clone();
let x = _x.clone();
pool_denoise.execute(move ||{
for i in point_i.iter() {
tmp.push(x[*i as usize]);
}
let tmp_denoised = denoise(tmp.clone(), 4.0/3.0, 7.0, 30.0, 20.0);
let (map, _) = bin_map(tmp_denoised.clone(), point_i.clone(), 128);
// let mut final_map = Vec::new();
// for (m, h) in map.iter().zip(hit.iter()) {
// final_map.push(m.clone()/(h.clone() as f32));
// }
tx.send(map).unwrap();
});
}
for _i in 0..pointings.len() {
temp_maps.push(rx.recv().unwrap());
}
for map in temp_maps.iter() {
res = res.iter().zip(map.iter()).map(|(r, m)| r+m).collect::<Vec<f32>>();
}
res
})
}
pub fn p() -> Box<dyn Fn(Vec<f32>) -> Vec<f32>> {
Box::new(|m| m.iter().map(|m| { 1.0*m } ).collect::<Vec<f32>>())
}
pub fn bin_map(tod: Vec<f32>, pix: Vec<i32>, nside: usize) -> (Vec<f32>, Vec<i32>) {
let num_pixs: usize = 12*nside*nside;
let mut signal_map: Vec<f32> = vec![0.0; num_pixs];
let mut hit_map: Vec<i32> = vec![0; num_pixs];
let mut iterator: usize = 0;
for i in pix.iter() {
let pixel = match i.to_usize(){Some(p)=> p, None=> 0};
hit_map[pixel] += 1;
signal_map[pixel] += tod[iterator];
iterator += 1;
}
(signal_map, hit_map)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_create_obs() {
let nside = 128;
let sky = vec![0.0; 12*nside*nside];
let obs = Obs::new(
String::from("start"),
String::from("stop"),
vec![String::from("det1")],
1,
1.0,
0.01,
vec![vec![300.0, 300.0, 300.0]],
sky,
vec![vec![1, 1, 3]]
);
let bin_map = obs.binning();
let sig_1_map = bin_map.0[1];
let pix_1_map = bin_map.1[1];
assert_eq!(sig_1_map, 0.54*(300.0+300.0));
assert_eq!(pix_1_map, 2);
}
} | get_pix | identifier_name |
lib.rs | /*!
# Strip MapMaking library
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur facilisis consectetur arcu. Etiam semper, sem sit amet lacinia dignissim, mauris eros rutrum massa, a imperdiet orci urna vel elit. Nulla at sagittis lacus. Curabitur eu gravida turpis. Mauris blandit porta orci. Aliquam fringilla felis a sem aliquet rhoncus. Suspendisse porta, mi vel euismod porta, mi ex cursus diam, quis iaculis sapien massa eget massa. Fusce sit amet neque vel turpis interdum tempus et non nisl. Nunc aliquam nunc vitae justo accumsan pretium. Morbi eget urna quis ex pellentesque molestie. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Integer vehicula vehicula tortor sit amet dignissim. Duis finibus, felis ut fringilla tincidunt, mi lectus fermentum eros, ut laoreet justo lacus id urna.
Duis iaculis faucibus mollis. Maecenas dignissim efficitur ex. Sed pulvinar justo a arcu lobortis imperdiet. Suspendisse placerat venenatis volutpat. Aenean eu nulla vitae libero porta dignissim ut sit amet ante. Vestibulum porttitor sodales nibh, nec imperdiet tortor accumsan quis. Ut sagittis arcu eu efficitur varius. Etiam at ex condimentum, volutpat ipsum sed, posuere nibh. Sed posuere fringilla mi in commodo. Ut sodales, elit volutpat finibus dapibus, dui lacus porttitor enim, ac placerat erat ligula quis ipsum. Morbi sagittis et nisl mollis fringilla. Praesent commodo faucibus erat, nec congue lectus finibus vitae. Sed eu ipsum in lorem congue vehicula.
# Using the Strip MapMaking
Duis iaculis faucibus mollis. Maecenas dignissim efficitur ex. Sed pulvinar justo a arcu lobortis imperdiet. Suspendisse placerat venenatis volutpat. Aenean eu nulla vitae libero porta dignissim ut sit amet ante. Vestibulum porttitor sodales nibh, nec imperdiet tortor accumsan quis. Ut sagittis arcu eu efficitur varius. Etiam at ex condimentum, volutpat ipsum sed, posuere nibh. Sed posuere fringilla mi in commodo. Ut sodales, elit volutpat finibus dapibus, dui lacus porttitor enim, ac placerat erat ligula quis ipsum. Morbi sagittis et nisl mollis fringilla. Praesent commodo faucibus erat, nec congue lectus finibus vitae. Sed eu ipsum in lorem congue vehicula.
*/
extern crate rustfft;
pub mod directory;
pub mod iteratorscustom;
pub mod sky;
pub mod threadpool;
pub mod misc;
pub mod noisemodel;
pub mod plot_suite;
pub mod conjugategradient;
use threadpool::ThreadPool;
use std::{fs::File, io::Write, sync::mpsc, usize, vec};
use colored::Colorize;
use conjugategradient::conjgrad;
use iteratorscustom::FloatIterator;
use num::{ToPrimitive, complex::Complex32};
use rustfft::{FftPlanner, num_complex::Complex};
// use rustfft::algorithm::Radix4;
// use noisemodel::NoiseModel;
// use std::time::Instant;
// use gnuplot::*;
use std::marker::PhantomData;
#[derive(Debug)]
pub struct Obs <'a> {
start: String,
stop: String,
detector: Vec<String>,
mc_id: u8,
alpha: f32,
f_knee: f32,
pix: Vec<Vec<i32>>,
tod: Vec<Vec<f32>>,
sky_t: Vec<f32>,
phantom: PhantomData<&'a f32>,
}
// ```
// Documentation
// Creation function
// ```
impl <'a> Obs <'a> {
pub fn new(
start: String,
stop: String,
detector: Vec<String>,
mc_id: u8,
alpha: f32,
f_knee: f32,
tod: Vec<Vec<f32>>,
sky: Vec<f32>,
pix: Vec<Vec<i32>> ) -> Self
{
let mut tod_final: Vec<Vec<f32>> = Vec::new();
for (i, j) in tod.iter().zip(pix.iter()){
//let noise = NoiseModel::new(50.0, 7e9, 1.0/20.0, 0.1, 1.0, 123, i.len());
//let tod_noise = noise.get_noise_tod();
let mut tmp: Vec<f32> = Vec::new();
for (_n, (k, l)) in i.into_iter().zip(j.iter()).enumerate(){
let t_sky = sky[match l.to_usize() {Some(p) => p, None=>0}];
//let r = tod_noise[_n];
tmp.push(0.54*k + t_sky);
} | }
return Obs {
start,
stop,
detector,
mc_id,
alpha,
f_knee,
pix,
tod: tod_final,
sky_t: sky,
phantom: PhantomData,
}
}
}
// The `get` methods
impl <'a> Obs <'a>{
pub fn get_start(&self) -> &String {
&self.start
}
pub fn get_stop(&self) -> &String {
&self.stop
}
pub fn get_detector(&self) -> &Vec<String> {
&self.detector
}
pub fn get_mcid(&self) -> &u8 {
&self.mc_id
}
pub fn get_pix(&self) -> &Vec<Vec<i32>> {
&self.pix
}
pub fn get_tod(&self) -> &Vec<Vec<f32>> {
&self.tod
}
}
// Mitigation of the systematic effects
// Starting from the binning, to the
// implementation of a de_noise model
impl <'a> Obs <'a>{
pub fn binning(&self) -> (Vec<f32>, Vec<i32>) {
println!("");
println!("Start {}", "binning".bright_blue().bold());
const NSIDE: usize = 128;
const NUM_PIX: usize = NSIDE*NSIDE*12;
let mut signal_maps: Vec<Vec<f32>> = Vec::new();
let mut hit_maps: Vec<Vec<i32>> = Vec::new();
let pix = &self.pix;
let tod = &self.tod;
let num_threads = num_cpus::get();
let bin_pool = ThreadPool::new(num_threads);
let (tx, rx) = mpsc::channel();
for i in 0..tod.len() {
let t = tod[i].clone();
let p = pix[i].clone();
let tx = tx.clone();
bin_pool.execute(move ||{
let (sig_par, hit_par) = bin_map(t.clone(), p.clone(), 128);
tx.send((sig_par, hit_par)).unwrap();
});
}
for _i in 0..tod.len() {
let rec = rx.recv().unwrap();
signal_maps.push(rec.0.clone());
hit_maps.push(rec.1.clone());
}
let mut final_sig: Vec<f32> = vec![0.0; NUM_PIX];
let mut final_hit: Vec<i32> = vec![0; NUM_PIX];
for idx in 0..signal_maps.len() {
let s = signal_maps[idx].clone();
let h = hit_maps[idx].clone();
for pidx in 0..NUM_PIX{
let signal = s[pidx];
let hits = h[pidx];
final_sig[pidx] += signal;
final_hit[pidx] += hits;
}
}
println!("{}", "COMPLETED".bright_green());
/***PRINT ON FILE */
println!("");
let id_number = self.get_mcid();
let file_name = format!("binned_{}.dat", id_number);
println!("Print maps on file: {}", file_name.bright_green().bold());
let mut f = File::create(file_name).unwrap();
let hit: Vec<String> = final_hit.iter().map(|a| a.to_string()).collect();
let sig: Vec<String> = final_sig.iter().map(|a| a.to_string()).collect();
for (i,j) in hit.iter().zip(sig.iter()) {
writeln!(f, "{}\t{}",i, j).unwrap();
}
println!("{}", "COMPLETED".bright_green());
(final_sig, final_hit)
}
}
// GLS DENOISE
impl <'a> Obs <'a>{
pub fn gls_denoise(&self, _tol: f32, _maxiter: usize, _nside: usize){
println!("{}", "Execution of the gls_denoise".bright_blue().bold());
const NUM_PIX: usize = 12*128*128;
let _x: Vec<f32> = vec![0.0; NUM_PIX];
let (tx, rx) = mpsc::channel();
let tods = &self.tod;
let pixs = &self.pix;
//println!("Len TODs: {}", tods.len()); // 55 * n_hour
let mut partial_maps: Vec<Vec<f32>> = Vec::new();
let num_threads = num_cpus::get();
let my_pool_b = ThreadPool::new(num_threads);
for idx_th in 0..tods.len() {
let t = tods[idx_th].clone();
let p = pixs[idx_th].clone();
let tx = tx.clone();
my_pool_b.execute(move || {
let b = get_b(t, p, 128);
tx.send(b).expect("channel will be there waiting for the pool");
});
}
for _ in 0..tods.len(){
partial_maps.push(rx.recv().unwrap());
}
let mut b: Vec<f32> = vec![0.0; 12*128*128];
for i in partial_maps.iter(){
for (n,j) in i.iter().enumerate(){
b[n] += j;
}
}
let map = conjgrad(a(), b, _tol, _maxiter, p(), pixs.clone());
/***PRINT ON FILE */
println!("");
let id_number = self.get_mcid();
let file_name = format!("gls_{}.dat", id_number);
println!("Print maps on file: {}", file_name.bright_green().bold());
let mut f = File::create(file_name).unwrap();
let sig: Vec<String> = map.iter().map(|a| a.to_string()).collect();
for i in sig.iter() {
writeln!(f, "{}",i).unwrap();
}
println!("{}", "COMPLETED".bright_green());
}// End of GLS_DENOISE
}
// UTILS functions
pub fn fn_noise_prior(f: f32, alpha: f32, f_k: f32, sigma: f32, _n: f32) -> f32 {
let mut _np: f32 = 0.0;
if f > 0.0 {
let _np_g = f32::exp( -((10.0 - f) * (10.0-f)) / (2.0 * 0.0002));
_np = sigma * f32::powf( 1.0 + f_k/(10.0-f), alpha.clone()) + 8E8 * _np_g ;
} else {
let _np_g = f32::exp( -((10.0 + f) * (10.0-f)) / (2.0 * 0.0002));
_np = sigma*sigma * f32::powf( 1.0 + f_k/(10.0+f), alpha.clone()) + 8E8 * _np_g;
}
_np
}
pub fn kaiser(beta: f32, length: i32) -> Vec<f32> {
use crate::misc::bessel_i0 as bessel;
let mut window: Vec<f32> = Vec::new();
let start: f32 = (-(length - 1) / 2) as f32;
let end: f32 = ((length - 1) / 2) as f32;
let n_idx = iteratorscustom::FloatIterator::new(start, end, match length.to_u32(){Some(p) => p, None => 0});
for n in n_idx {
let m = length as f32;
window.push(bessel(beta * (1. - (n / (m / 2.)).powi(2)).sqrt()) / bessel(beta))
}// 70-80
window
}
pub fn hann(length: i32) -> Vec<f32> {
let mut window: Vec<f32> = Vec::new();
let n_idx = iteratorscustom::FloatIterator::new(0.0, length as f32, match length.to_u32(){Some(p) => p, None => 0});
for n in n_idx {
window.push(f32::powi( f32::sin(3.141592 * n / (length as f32)), 2));
}
window
}
pub fn denoise(tod: Vec<f32>, _alpha: f32, _f_k: f32, _sigma: f32, _fs: f32) -> Vec<f32> {
let win: Vec<f32> = kaiser(5.0, match tod.len().to_i32(){Some(p)=>p, None=> 0});//70!!!!!
//let win: Vec<f32> = hann( match tod.len().to_i32(){Some(p)=>p, None=> 0});
// let mut fg = Figure::new();
// fg.axes2d().lines(0..win.len(), win.clone(), &[Caption("Kaiser")]).
// lines(0..win.len(), win_h, &[Caption("Hanning")]);
// fg.show().unwrap();
//let now = Instant::now();
let mut input: Vec<Complex<f32>> = tod.iter().zip(win.iter()).map(|x| Complex32::new(
*x.0 *x.1,
0.0)).
collect(); // ~0
//println!("Denoise process: {}", now.elapsed().as_millis());
let mut planner = FftPlanner::new();
let fft = planner.plan_fft_forward(tod.len()); // 10
//let fft = Radix4::new()
fft.process(&mut input); // 50
let mut noise_p: Vec<f32> = Vec::new();
let freq_iter: FloatIterator = FloatIterator::new(-10.0, 10.0, match input.len().to_u32() {Some(p) => p, None => 0});
let mut freq: Vec<f32> = Vec::new();
for f in freq_iter {
noise_p.push(fn_noise_prior(f, _alpha, _f_k, _sigma, match tod.len().to_f32(){Some(p)=>p, None=>0.0} ));
freq.push(f);
}
// Denoise
let mut tod_denoised: Vec<Complex32> = input.iter().zip(noise_p.iter()).map(|(a, b)| {
let (_, angle) = a.to_polar();
let module = a.norm()/b;
Complex32::from_polar(module, angle)
}).collect();
// let mut fg = Figure::new();
// fg.axes2d().points(0..input.len()/2, input.iter().map(|t| t.re).collect::<Vec<f32>>(), &[Caption("FFT - raw")]).set_x_log(Some(10.0)).set_y_log(Some(10.0)).
// points(0..tod_denoised.len()/2, tod_denoised.iter().map(|f| f.re).collect::<Vec<f32>>(), &[Caption("FFT - denoised")]).set_x_log(Some(10.0)).set_y_log(Some(10.0)).
// lines(0..noise_p.len()/2, noise_p, &[Caption("Noise model")]);
// fg.show().unwrap();
let mut planner = FftPlanner::new(); // 60
let ifft = planner.plan_fft_inverse(tod.len());
ifft.process(&mut tod_denoised);
let tod_denoised: Vec<Complex32> = tod_denoised.iter().map(|c| {
let (module, angle) = c.to_polar();
let module2 = module / (tod.len() as f32);
Complex32::from_polar(module2, angle)
}).collect();
let tod_real: Vec<f32> = tod_denoised.iter().zip( win.iter()).map(|t| t.0.re / t.1 ).collect();
// let mut fg = Figure::new();
// fg.axes2d().lines(0..tod_real.len(), tod_real.clone(), &[Caption("TOD denoised")]).lines(0..tod.len(), tod, &[Caption("TOD RAW")]);
// fg.show().unwrap();
tod_real
}
pub fn get_b(tod: Vec<f32>, pix: Vec<i32>, nside: usize) -> Vec<f32> {
let mut b: Vec<f32> = vec![0.0; 12*128*128];
let tod_n = denoise(tod.clone(), 4.0/3.0, 7.0, 30.0, 20.0);
let (map, _) = bin_map(tod_n.clone(), pix, nside);
for i in 0..12*nside*nside {
b[i] += map[i];
}
b
}
fn a() -> Box<dyn Fn(Vec<f32>, Vec<Vec<i32>>) -> Vec<f32>> {
Box::new(|_x: Vec<f32>, pointings: Vec<Vec<i32>>| {
let mut temp_maps: Vec<Vec<f32>> = Vec::new();
let mut res: Vec<f32> = vec![0.0; 12*128*128];
let num_threads = num_cpus::get();
let pool_denoise = ThreadPool::new(num_threads);
let (tx, rx) = mpsc::channel();
for i_det in pointings.iter() {
let mut tmp: Vec<f32> = Vec::new();
let tx = tx.clone();
let point_i = i_det.clone();
let x = _x.clone();
pool_denoise.execute(move ||{
for i in point_i.iter() {
tmp.push(x[*i as usize]);
}
let tmp_denoised = denoise(tmp.clone(), 4.0/3.0, 7.0, 30.0, 20.0);
let (map, _) = bin_map(tmp_denoised.clone(), point_i.clone(), 128);
// let mut final_map = Vec::new();
// for (m, h) in map.iter().zip(hit.iter()) {
// final_map.push(m.clone()/(h.clone() as f32));
// }
tx.send(map).unwrap();
});
}
for _i in 0..pointings.len() {
temp_maps.push(rx.recv().unwrap());
}
for map in temp_maps.iter() {
res = res.iter().zip(map.iter()).map(|(r, m)| r+m).collect::<Vec<f32>>();
}
res
})
}
pub fn p() -> Box<dyn Fn(Vec<f32>) -> Vec<f32>> {
Box::new(|m| m.iter().map(|m| { 1.0*m } ).collect::<Vec<f32>>())
}
pub fn bin_map(tod: Vec<f32>, pix: Vec<i32>, nside: usize) -> (Vec<f32>, Vec<i32>) {
let num_pixs: usize = 12*nside*nside;
let mut signal_map: Vec<f32> = vec![0.0; num_pixs];
let mut hit_map: Vec<i32> = vec![0; num_pixs];
let mut iterator: usize = 0;
for i in pix.iter() {
let pixel = match i.to_usize(){Some(p)=> p, None=> 0};
hit_map[pixel] += 1;
signal_map[pixel] += tod[iterator];
iterator += 1;
}
(signal_map, hit_map)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_create_obs() {
let nside = 128;
let sky = vec![0.0; 12*nside*nside];
let obs = Obs::new(
String::from("start"),
String::from("stop"),
vec![String::from("det1")],
1,
1.0,
0.01,
vec![vec![300.0, 300.0, 300.0]],
sky,
vec![vec![1, 1, 3]]
);
let bin_map = obs.binning();
let sig_1_map = bin_map.0[1];
let pix_1_map = bin_map.1[1];
assert_eq!(sig_1_map, 0.54*(300.0+300.0));
assert_eq!(pix_1_map, 2);
}
} | tod_final.push(tmp); | random_line_split |
timeline.js | var Timeline = (function(){
var
// The current offset into the total
// playlist, in seconds
_offset = 0,
// The total duration of all the tracks
// to be played.
_totalRuntime,
_isPlaying = true,
_earlyLoad = false,
_offsetRequest = false,
_backup = {},
_template = {},
_rateWindow = [],
// preferred quality
_quality,
Player = {
controls: false,
eventList: [
'StateChange',
'PlaybackQualityChange',
'PlaybackRateChange',
'Error',
'ApiChange'
],
Quality: {
set: function(what) {
if(what < 0 || !_.isNumber(what)) { what = 0; }
_quality = QUALITY_LEVELS[what % QUALITY_LEVELS.length];
$("#quality-down")[ (_quality == _.last(QUALITY_LEVELS) ? 'add' : 'remove') + 'Class']("disabled");
$("#quality-up")[ (_quality == _.first(QUALITY_LEVELS) ? 'add' : 'remove') + 'Class']("disabled");
// This function is invoked at startup without arguments.
if(arguments.length) {
Toolbar.status("Set preferred quality to " + _quality);
}
ev('quality', _quality);
return _quality;
},
down: () => Player.Quality.set(_.indexOf(QUALITY_LEVELS, _quality) + 1),
up: () => Player.Quality.set(_.indexOf(QUALITY_LEVELS, _quality) - 1),
},
Play: function(){
ev.isset('player_load', function(){
if(!_isPlaying) {
_isPlaying = true;
Player.active.playVideo();
$(".pause-play").html('<i class="fa fa-stop"></i>');
}
});
}
};
Player.Quality.set();
// The "current" list of videos is the same as all.
// the current could point to some other database entirely
_db.byId = _db.ALL = _db.view('id');
_db.current = _db;
_backup = {
start: 0,
off: function(){
if(Player.active.off) {
$("#backupPlayer").html('');
Player.active = Player.controls;
}
return Player.active;
},
getCurrentTime: function(){
return Player.offset + ((new Date()) - _backup._start) / 1000;
},
getDuration: () => Player.activeData.length,
pauseVideo: function() {
Player.offset = Player.active.getCurrentTime();
$("#backupPlayer").html('');
},
seekTo: function(what) {
Player.active.pauseVideo();
Player.offset = what;
Player.active.playVideo();
},
playVideo: function(){
// We pad for load time
Player.active._start = (+new Date()) + 2000;
$("#backupPlayer").html(
Template.backup({
offset: Math.floor(Player.offset),
ytid: Player.activeData.ytid
})
);
},
getPlaybackQuality: () => _quality,
on: function() {
Player.offset = Player.offset || 0;
Player.active = _backup;
Player.active.playVideo();
}
};
function updateytplayer() {
// This is the "clock" that everything is rated by.
var
// The tick is the clock-time ... this is different from the
// _offset which is where we should be in the playlist
tick = ev.incr('tick'),
scrubberPosition = 0;
// Make sure we aren't the backup player
if(Player.active && !Player.active.on && Player.active.getVideoBytesLoaded && Player.activeData) {
var rateStart = 1e10,
stats,
dtime,
ctime,
frac;
if(!isMobile) {
dtime = Player.active.getDuration() || 0;
ctime = Player.active.getCurrentTime() || 0;
frac = Player.active.getVideoLoadedFraction() || 0;
stats = [
dtime.toFixed(3),
ctime.toFixed(3),
Player.activeData.length,
Player.active.getPlayerState(),
// How far in
(
Player.active.getCurrentTime() /
Player.active.getDuration()
).toFixed(3),
// How much do we have
frac.toFixed(3)
];
debug(stats);
}
}
// The mechanics for moving the centroid
if(Player.active.getCurrentTime) |
Scrubber.real.dom.css({ left: scrubberPosition + "%"});
}
_.each(Player.eventList, function(what) {
self['ytDebug_' + what] = function(that) {
ev.set("yt-" + what, that);
log(what, that);
}
});
function ytDebugHook() {
_.each(Player.eventList, function(what) {
Player.controls.addEventListener("on" + what, 'ytDebug_' + what);
});
}
self.onPlayerStateChange = function(e) {
if(e.target === Player.eager) {
return;
}
if(e.data === 1) {
if(_offsetRequest) {
if( e.target.getVideoUrl &&
e.target.getVideoUrl().search(_offsetRequest.id) !== -1 &&
Math.abs(_offsetRequest.offset - e.target.getCurrentTime()) > 10) {
e.target.seekTo(_offsetRequest.offset);
}
_offsetRequest = false;
}
if(!Player.activeData.length) {
Player.activeData.length = Timeline.player.controls.getDuration();
Timeline.updateOffset();
}
}
}
self.onYouTubePlayerAPIReady = function() {
Player.controls = new YT.Player('player-iframe-0', {
height: 120, width: 160,
events: {
onStateChange: onPlayerStateChange
}
});
Player.eager = new YT.Player('player-iframe-1', {
height: 120, width: 160,
events: {
onStateChange: onPlayerStateChange
}
});
when(() => Player.controls.loadVideoById).run(function(){
//ytDebugHook();
Player.active = Player.controls;
setInterval(updateytplayer, CLOCK_FREQ);
ev.set('player_load');
ev.fire('volume');
});
}
// When the player is loaded all the way
// then we can set the first player to be called
// the active player, which is the one that we will
// use for the most part.
//
// This is also when we start our polling function
// that updates the scrubber and the status of
// where we currently are.
ev('app_state', function(value) {
if (value == 'main') {
_totalRuntime = Utils.runtime(_db.byId);
var parts = (localStorage[ev.db.id + 'offset'] || "").split(' '), _off
if(parts.length == 2) {
Timeline.updateOffset();
_off = _db.current.findFirst('ytid', parts[0]).offset;
Timeline.seekTo(_off + parseFloat(parts[1]));
} else {
Timeline.seekTo((0.001 * (-_epoch + (+new Date()))) % _totalRuntime);
}
ev.isset("player_load", Results.scrollTo);
}
});
// If there's an error loading the video (usually due to
// embed restrictions), we have a backup player that can
// be used. There's significantly less control over this
// player so it's the backup plan.
ev('yt-Error', function(what) {
log("yt-error", what);
if(what == 100) {
Toolbar.status("Video not working; skipping");
replace(Timeline.current().id, true);
Timeline.next();
} else if(what != 150) {
//_backup.on();
} else {
Toolbar.status("Copyright issue; skipping");
// set the current track as unplayable
remote('updateTrack', Timeline.current().ytid, 'playable', false);
Timeline.next();
}
});
ev('volume', function(volume){
Toolbar.status("Set volume to " + volume.toFixed());
if(volume > 0 && Player.active.isMuted()) {
Player.active.unMute();
}
Player.active.setVolume(volume);
});
self.Player = Player;
return {
player: Player,
backup: _backup,
load: function(id) {
ev('app_state', 'main');
Store.get(id).then(getMissingDurations);
},
// the current track
current: () => Player.activeData,
earlyLoad: (obj, delay) => {
let localId = _earlyLoad = setTimeout(() => {
if(_earlyLoad == localId) {
let off = Scrubber.phantom.container ?
.95 * Scrubber.phantom.offset * obj.length : 0;
if(!obj) { console.log("no object", obj); return }
log(`Eager Loading ${obj.ytid} (${off})`);
Player.eager.loadVideoById({
videoId: obj.ytid,
startSeconds: off
});
Player.eager.pauseVideo();
}
}, delay || 500);
return localId;
},
remove: function(ytid){
var obj = _db.findFirst('ytid', ytid);
Toolbar.status("Removed " + obj.title);
Scrubber.real.remove();
// we should store that it was removed
ev.setadd('blacklist', obj.ytid);
// we need to be aware of our current.
_db.find('ytid', obj.ytid).remove();
_db.current.find('ytid', obj.ytid).remove();
Timeline.updateOffset();
Store.saveTracks();
ev.set('request_gen', {force: true});
},
pause: function(){
ev.isset('player_load', function(){
_isPlaying = false;
Player.active.pauseVideo();
$(".pause-play").html('<i class="fa fa-play"></i>');
});
},
pauseplay: function(){
if(_isPlaying) {
Timeline.pause();
} else {
Player.Play();
}
return _isPlaying;
},
updateOffset: function(){
var
index,
aggregate = 0,
order = 0,
prevIndex = false;
_totalRuntime = Utils.runtime(_db.byId);
for(index in _db.byId) {
if(prevIndex !== false) {
_db.byId[prevIndex].next = index;
_db.byId[index].previous = prevIndex;
}
prevIndex = index;
_db.byId[index].offset = aggregate;
aggregate += (parseInt(_db.byId[index].length) || 0);
}
// This final next pointer will enable wraparound
if(index) {
_db.byId[index].next = 0;
// TODO: Sometimes _db.byId[0] is undefined. I have to figure out
// how this offset problem occurs.
for(var ix = 0; !_db.byId[ix]; ix++);
_db.byId[ix].previous = index;
}
// we need to repoint this.
if(Player.activeData) {
var rec = _db.current.first({ytid: Player.activeData.ytid});
// if this track is in our active track then we assign it
if(rec) {
Player.activeData = rec;
} else {
// otherwise we make its next track the first track
Player.activeData.next = Player.activeData.previous = 0;
}
}
},
play: function(ytid, offset) {
if(!arguments.length) {
return Player.Play();
}
offset = offset || 0;
// Only run when the controller has been loaded
ev.isset('player_load', function(){
if(!Player.activeData || Player.activeData.ytid != ytid) {
// NOTE:
//
// This is the only entry point for loading and playing a video
// There are other references to playing and pausing, but this
// is the only line that activley loads the id and offset into
// the player. This is because there has to be an activeData in
// order to go forward.
if(Player.activeData) {
// Increment this count by 1 -- we only want to do it on the case of moving to a new track.
// This is so there's no misreporting of average listen time of view count based on the reloads
// that happen during development
var duration_listened = parseInt(Player.listen_total, 10);
// if it's zero, we listened to none of it, so we should ignore it.
if(duration_listened > 0) {
remote({
func: 'addListen',
id: Player.activeData.ytid,
title: Player.activeData.title,
length: Player.activeData.length
});
remote('updateDuration', Player.activeData.ytid, duration_listened);
}
}
Player.activeData = _db.first({ytid: ytid});
Player.listen_total = 0;
// After the assignment, then we add it to the userhistory
Timeline.playById(Player.active, Player.activeData.ytid, offset);
if(isNaN(Player.activeData.length)) {
log("No length for: ", Player.activeData);
// This should be a reference to everything...
Player.activeData.length = Player.controls.getDuration();
Store.saveTracks();
}
// At this point there is now active data, so anything depending
// on that can run.
ev('active_track', Player.activeData);
ev.set('active_data');
//log("Playing " + Player.activeData.ytid + Player.activeData.title);
} else {
Timeline.seekTo(offset, {isTrackRelative:true});
}
});
},
playById: function(object, id, offset){
var opts = {
videoId: id,
startSeconds: offset,
suggestedQuality: ev('quality')
};
Player.offset = offset;
let active = Timeline
.backup
.off(object);
let eagerVid = ( Player.eager && Player.eager.getVideoUrl ) ? Player.eager.getVideoUrl() : false;
if(eagerVid && eagerVid.search(id) !== -1) {
log("Eager loading");
Player.active.stopVideo();
active = Player.eager;
Player.eager = Player.active;
Player.active = active;
Player.active.seekTo(offset);
_offsetRequest = { id, offset };
_isPlaying = false;
} else {
active.loadVideoById(opts);
}
Player.Play();
// TODO: This feels like a bad place to do this.
// There should probably be a more abstract and less
// explicit way to handle this.
ev.set('deadair', 0);
Timeline.earlyLoad(
// this search is needed for some reason ... the next
// pointer at this point of the code is incorrect for
// filtered lists.
_db.byId[
_db.current.findFirst({ytid: Player.activeData.ytid}).next
],
4000
);
},
next: function(){
Timeline.seekTo(_db.byId[Player.activeData.next].offset + 1);
Scrubber.real.dom.css({ left: 0 });
},
prev: function(){
Timeline.seekTo(_db.byId[Player.activeData.previous].offset + 1);
Scrubber.real.dom.css({ left: 0 });
},
seekTo: function(offset, opts) {
opts = opts || {};
if(!offset) {
offset = _offset;
}
if (opts.isTrackRelative) {
offset += Player.activeData.offset;
}
if (opts.isOffsetRelative) {
offset += _offset;
}
Timeline.updateOffset();
// If it's between 0 and 1, we assume it's a relative offset ... otherwise we
// take it as the absolute; and we modulus it by the runtime to permit wrap-around
var absolute = ((offset < 1) ? offset * _totalRuntime : offset) % _totalRuntime;
absolute = Math.max(0, absolute);
absolute = Math.min(_totalRuntime, absolute);
//log("Seeking to " + absolute);
var track = _db.current.findFirst(function(row) {
return (row.offset < absolute && (row.offset + row.length) > absolute)
});
if(!track) {
track = _db.current.findFirst();
}
//log("Seeked to " + track.title);
if(track) {
if(!Player.activeData || (track.ytid != Player.activeData.ytid)) {
Timeline.play(track.ytid, absolute - track.offset);
} else {
Player.offset = absolute - track.offset;
Player.active.seekTo(absolute - track.offset);
// TODO: This feels like a bad place to do this.
ev.set('deadair', 0);
}
}
},
debug: function() {
var stats = {};
_.each([
'getAvailablePlaybackRates',
'getAvailableQualityLevels',
'getCurrentTime',
'getDuration',
'getPlaybackQuality',
'getPlaybackRate',
'getPlayerState',
'getVideoBytesLoaded',
'getVideoBytesTotal',
'getVideoEmbedCode',
'getVideoLoadedFraction',
'getVideoStartBytes',
'getVideoUrl',
'getVolume',
'isMuted'
], function(what){
stats[what] = Player.active[what]();
});
log(stats);
},
init: function() {
var tag = document.createElement('script');
tag.src = "https://www.youtube.com/player_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// This doesn't reflect the filtered view ... it would be nice to know what the
// "previous" and "next" track is effeciently with a filter.
// The controls in the upper left of the timeline
$(".previous-track").click(Timeline.prev);
$(".next-track").click(Timeline.next);
$(".pause-play").click(Timeline.pauseplay);
$("#quality-down").click(Player.Quality.down);
$("#quality-up").click(Player.Quality.up);
}
};
})();
| {
var time = Player.active.getCurrentTime(),
prevOffset = _offset;
if(Player.activeData) {
localStorage[ev.db.id + 'offset'] = [Player.activeData.ytid, time].join(' ');
}
if (time > 0 && Player.activeData) {
// This generates the scrubber in the results tab below.
// We first check to see if the video is in the viewport window
if(Results.viewable[Player.activeData.ytid]) {
// And if so we get its dom and other necessary things.
var entry = Results.viewable[Player.activeData.ytid];
// If we are in the purview of the track, then we can move on.
// Otherwise, place ourselves underneath it so that the percentage
// calculations will work out.
scrubberPosition = time * 100 / Player.active.getDuration();
if(Scrubber.real.attach(entry.jquery.timeline)) {
entry.jquery.timeline.css('display','block');
}
} else {
Scrubber.real.remove();
}
// For some reason it appears that this value can
// toggle back to different quality sometimes. So
// we check to see where it's at.
/*
if( Player.active.getAvailableQualityLevels().indexOf(_quality) !== -1 &&
Player.active.getPlaybackQuality() != _quality) {
log('set-quality');
/Player.active.setPlaybackQuality(_quality);
}
*/
// There's this YouTube bug (2013/05/11) that can sometimes report
// totally incorrect values for the duration. If it's more than
// 20 seconds off and greater than 0, then we try to reload the
// video. This bug seems to have been around for almost a year or
// so? Simply loading the video again appears to fix it.
if(Player.active.getDuration() > 30 && (Player.active.getDuration() + 20 < Player.activeData.length)) {
debug("reload " + new Date());
}
// If the player is active and we are at the end of a song, then move ahead.
if(time > 0 && Player.active.getDuration() > 0 && (Player.active.getDuration() - time <= 0)) {
_offset += 1;
debug("seeking " + new Date());
Timeline.seekTo(_offset);
} else {
_offset = Player.activeData.offset + time;
}
// If we are supposed to be playing
if (_isPlaying) {
// And we haven't moved forward
if (_offset - prevOffset == 0) {
// This means there's been dead-air for a few seconds.
if ( ev.incr('deadair', CLOCK_FREQ) > RELOAD_THRESHOLD ) {
//UserHistory.reload();
}
} else {
// this means we are playing so we should increment the total
// time we are listening
Player.listen_total += CLOCK_FREQ / 1000;
}
}
}
} | conditional_block |
timeline.js | var Timeline = (function(){
var
// The current offset into the total
// playlist, in seconds
_offset = 0,
// The total duration of all the tracks
// to be played.
_totalRuntime,
_isPlaying = true,
_earlyLoad = false,
_offsetRequest = false,
_backup = {},
_template = {},
_rateWindow = [],
// preferred quality
_quality,
Player = {
controls: false,
eventList: [
'StateChange',
'PlaybackQualityChange',
'PlaybackRateChange',
'Error',
'ApiChange'
],
Quality: {
set: function(what) {
if(what < 0 || !_.isNumber(what)) { what = 0; }
_quality = QUALITY_LEVELS[what % QUALITY_LEVELS.length];
$("#quality-down")[ (_quality == _.last(QUALITY_LEVELS) ? 'add' : 'remove') + 'Class']("disabled");
$("#quality-up")[ (_quality == _.first(QUALITY_LEVELS) ? 'add' : 'remove') + 'Class']("disabled");
// This function is invoked at startup without arguments.
if(arguments.length) {
Toolbar.status("Set preferred quality to " + _quality);
}
ev('quality', _quality);
return _quality;
},
down: () => Player.Quality.set(_.indexOf(QUALITY_LEVELS, _quality) + 1),
up: () => Player.Quality.set(_.indexOf(QUALITY_LEVELS, _quality) - 1),
},
Play: function(){
ev.isset('player_load', function(){
if(!_isPlaying) {
_isPlaying = true;
Player.active.playVideo();
$(".pause-play").html('<i class="fa fa-stop"></i>');
}
});
}
};
Player.Quality.set();
// The "current" list of videos is the same as all.
// the current could point to some other database entirely
_db.byId = _db.ALL = _db.view('id');
_db.current = _db;
_backup = {
start: 0,
off: function(){
if(Player.active.off) {
$("#backupPlayer").html('');
Player.active = Player.controls;
}
return Player.active;
},
getCurrentTime: function(){
return Player.offset + ((new Date()) - _backup._start) / 1000;
},
getDuration: () => Player.activeData.length,
pauseVideo: function() {
Player.offset = Player.active.getCurrentTime();
$("#backupPlayer").html('');
},
seekTo: function(what) {
Player.active.pauseVideo();
Player.offset = what;
Player.active.playVideo();
},
playVideo: function(){
// We pad for load time
Player.active._start = (+new Date()) + 2000;
$("#backupPlayer").html(
Template.backup({
offset: Math.floor(Player.offset),
ytid: Player.activeData.ytid
})
);
},
getPlaybackQuality: () => _quality,
on: function() {
Player.offset = Player.offset || 0;
Player.active = _backup;
Player.active.playVideo();
}
};
function | () {
// This is the "clock" that everything is rated by.
var
// The tick is the clock-time ... this is different from the
// _offset which is where we should be in the playlist
tick = ev.incr('tick'),
scrubberPosition = 0;
// Make sure we aren't the backup player
if(Player.active && !Player.active.on && Player.active.getVideoBytesLoaded && Player.activeData) {
var rateStart = 1e10,
stats,
dtime,
ctime,
frac;
if(!isMobile) {
dtime = Player.active.getDuration() || 0;
ctime = Player.active.getCurrentTime() || 0;
frac = Player.active.getVideoLoadedFraction() || 0;
stats = [
dtime.toFixed(3),
ctime.toFixed(3),
Player.activeData.length,
Player.active.getPlayerState(),
// How far in
(
Player.active.getCurrentTime() /
Player.active.getDuration()
).toFixed(3),
// How much do we have
frac.toFixed(3)
];
debug(stats);
}
}
// The mechanics for moving the centroid
if(Player.active.getCurrentTime) {
var time = Player.active.getCurrentTime(),
prevOffset = _offset;
if(Player.activeData) {
localStorage[ev.db.id + 'offset'] = [Player.activeData.ytid, time].join(' ');
}
if (time > 0 && Player.activeData) {
// This generates the scrubber in the results tab below.
// We first check to see if the video is in the viewport window
if(Results.viewable[Player.activeData.ytid]) {
// And if so we get its dom and other necessary things.
var entry = Results.viewable[Player.activeData.ytid];
// If we are in the purview of the track, then we can move on.
// Otherwise, place ourselves underneath it so that the percentage
// calculations will work out.
scrubberPosition = time * 100 / Player.active.getDuration();
if(Scrubber.real.attach(entry.jquery.timeline)) {
entry.jquery.timeline.css('display','block');
}
} else {
Scrubber.real.remove();
}
// For some reason it appears that this value can
// toggle back to different quality sometimes. So
// we check to see where it's at.
/*
if( Player.active.getAvailableQualityLevels().indexOf(_quality) !== -1 &&
Player.active.getPlaybackQuality() != _quality) {
log('set-quality');
/Player.active.setPlaybackQuality(_quality);
}
*/
// There's this YouTube bug (2013/05/11) that can sometimes report
// totally incorrect values for the duration. If it's more than
// 20 seconds off and greater than 0, then we try to reload the
// video. This bug seems to have been around for almost a year or
// so? Simply loading the video again appears to fix it.
if(Player.active.getDuration() > 30 && (Player.active.getDuration() + 20 < Player.activeData.length)) {
debug("reload " + new Date());
}
// If the player is active and we are at the end of a song, then move ahead.
if(time > 0 && Player.active.getDuration() > 0 && (Player.active.getDuration() - time <= 0)) {
_offset += 1;
debug("seeking " + new Date());
Timeline.seekTo(_offset);
} else {
_offset = Player.activeData.offset + time;
}
// If we are supposed to be playing
if (_isPlaying) {
// And we haven't moved forward
if (_offset - prevOffset == 0) {
// This means there's been dead-air for a few seconds.
if ( ev.incr('deadair', CLOCK_FREQ) > RELOAD_THRESHOLD ) {
//UserHistory.reload();
}
} else {
// this means we are playing so we should increment the total
// time we are listening
Player.listen_total += CLOCK_FREQ / 1000;
}
}
}
}
Scrubber.real.dom.css({ left: scrubberPosition + "%"});
}
_.each(Player.eventList, function(what) {
self['ytDebug_' + what] = function(that) {
ev.set("yt-" + what, that);
log(what, that);
}
});
function ytDebugHook() {
_.each(Player.eventList, function(what) {
Player.controls.addEventListener("on" + what, 'ytDebug_' + what);
});
}
self.onPlayerStateChange = function(e) {
if(e.target === Player.eager) {
return;
}
if(e.data === 1) {
if(_offsetRequest) {
if( e.target.getVideoUrl &&
e.target.getVideoUrl().search(_offsetRequest.id) !== -1 &&
Math.abs(_offsetRequest.offset - e.target.getCurrentTime()) > 10) {
e.target.seekTo(_offsetRequest.offset);
}
_offsetRequest = false;
}
if(!Player.activeData.length) {
Player.activeData.length = Timeline.player.controls.getDuration();
Timeline.updateOffset();
}
}
}
self.onYouTubePlayerAPIReady = function() {
Player.controls = new YT.Player('player-iframe-0', {
height: 120, width: 160,
events: {
onStateChange: onPlayerStateChange
}
});
Player.eager = new YT.Player('player-iframe-1', {
height: 120, width: 160,
events: {
onStateChange: onPlayerStateChange
}
});
when(() => Player.controls.loadVideoById).run(function(){
//ytDebugHook();
Player.active = Player.controls;
setInterval(updateytplayer, CLOCK_FREQ);
ev.set('player_load');
ev.fire('volume');
});
}
// When the player is loaded all the way
// then we can set the first player to be called
// the active player, which is the one that we will
// use for the most part.
//
// This is also when we start our polling function
// that updates the scrubber and the status of
// where we currently are.
ev('app_state', function(value) {
if (value == 'main') {
_totalRuntime = Utils.runtime(_db.byId);
var parts = (localStorage[ev.db.id + 'offset'] || "").split(' '), _off
if(parts.length == 2) {
Timeline.updateOffset();
_off = _db.current.findFirst('ytid', parts[0]).offset;
Timeline.seekTo(_off + parseFloat(parts[1]));
} else {
Timeline.seekTo((0.001 * (-_epoch + (+new Date()))) % _totalRuntime);
}
ev.isset("player_load", Results.scrollTo);
}
});
// If there's an error loading the video (usually due to
// embed restrictions), we have a backup player that can
// be used. There's significantly less control over this
// player so it's the backup plan.
ev('yt-Error', function(what) {
log("yt-error", what);
if(what == 100) {
Toolbar.status("Video not working; skipping");
replace(Timeline.current().id, true);
Timeline.next();
} else if(what != 150) {
//_backup.on();
} else {
Toolbar.status("Copyright issue; skipping");
// set the current track as unplayable
remote('updateTrack', Timeline.current().ytid, 'playable', false);
Timeline.next();
}
});
ev('volume', function(volume){
Toolbar.status("Set volume to " + volume.toFixed());
if(volume > 0 && Player.active.isMuted()) {
Player.active.unMute();
}
Player.active.setVolume(volume);
});
self.Player = Player;
return {
player: Player,
backup: _backup,
load: function(id) {
ev('app_state', 'main');
Store.get(id).then(getMissingDurations);
},
// the current track
current: () => Player.activeData,
earlyLoad: (obj, delay) => {
let localId = _earlyLoad = setTimeout(() => {
if(_earlyLoad == localId) {
let off = Scrubber.phantom.container ?
.95 * Scrubber.phantom.offset * obj.length : 0;
if(!obj) { console.log("no object", obj); return }
log(`Eager Loading ${obj.ytid} (${off})`);
Player.eager.loadVideoById({
videoId: obj.ytid,
startSeconds: off
});
Player.eager.pauseVideo();
}
}, delay || 500);
return localId;
},
remove: function(ytid){
var obj = _db.findFirst('ytid', ytid);
Toolbar.status("Removed " + obj.title);
Scrubber.real.remove();
// we should store that it was removed
ev.setadd('blacklist', obj.ytid);
// we need to be aware of our current.
_db.find('ytid', obj.ytid).remove();
_db.current.find('ytid', obj.ytid).remove();
Timeline.updateOffset();
Store.saveTracks();
ev.set('request_gen', {force: true});
},
pause: function(){
ev.isset('player_load', function(){
_isPlaying = false;
Player.active.pauseVideo();
$(".pause-play").html('<i class="fa fa-play"></i>');
});
},
pauseplay: function(){
if(_isPlaying) {
Timeline.pause();
} else {
Player.Play();
}
return _isPlaying;
},
updateOffset: function(){
var
index,
aggregate = 0,
order = 0,
prevIndex = false;
_totalRuntime = Utils.runtime(_db.byId);
for(index in _db.byId) {
if(prevIndex !== false) {
_db.byId[prevIndex].next = index;
_db.byId[index].previous = prevIndex;
}
prevIndex = index;
_db.byId[index].offset = aggregate;
aggregate += (parseInt(_db.byId[index].length) || 0);
}
// This final next pointer will enable wraparound
if(index) {
_db.byId[index].next = 0;
// TODO: Sometimes _db.byId[0] is undefined. I have to figure out
// how this offset problem occurs.
for(var ix = 0; !_db.byId[ix]; ix++);
_db.byId[ix].previous = index;
}
// we need to repoint this.
if(Player.activeData) {
var rec = _db.current.first({ytid: Player.activeData.ytid});
// if this track is in our active track then we assign it
if(rec) {
Player.activeData = rec;
} else {
// otherwise we make its next track the first track
Player.activeData.next = Player.activeData.previous = 0;
}
}
},
play: function(ytid, offset) {
if(!arguments.length) {
return Player.Play();
}
offset = offset || 0;
// Only run when the controller has been loaded
ev.isset('player_load', function(){
if(!Player.activeData || Player.activeData.ytid != ytid) {
// NOTE:
//
// This is the only entry point for loading and playing a video
// There are other references to playing and pausing, but this
// is the only line that activley loads the id and offset into
// the player. This is because there has to be an activeData in
// order to go forward.
if(Player.activeData) {
// Increment this count by 1 -- we only want to do it on the case of moving to a new track.
// This is so there's no misreporting of average listen time of view count based on the reloads
// that happen during development
var duration_listened = parseInt(Player.listen_total, 10);
// if it's zero, we listened to none of it, so we should ignore it.
if(duration_listened > 0) {
remote({
func: 'addListen',
id: Player.activeData.ytid,
title: Player.activeData.title,
length: Player.activeData.length
});
remote('updateDuration', Player.activeData.ytid, duration_listened);
}
}
Player.activeData = _db.first({ytid: ytid});
Player.listen_total = 0;
// After the assignment, then we add it to the userhistory
Timeline.playById(Player.active, Player.activeData.ytid, offset);
if(isNaN(Player.activeData.length)) {
log("No length for: ", Player.activeData);
// This should be a reference to everything...
Player.activeData.length = Player.controls.getDuration();
Store.saveTracks();
}
// At this point there is now active data, so anything depending
// on that can run.
ev('active_track', Player.activeData);
ev.set('active_data');
//log("Playing " + Player.activeData.ytid + Player.activeData.title);
} else {
Timeline.seekTo(offset, {isTrackRelative:true});
}
});
},
playById: function(object, id, offset){
var opts = {
videoId: id,
startSeconds: offset,
suggestedQuality: ev('quality')
};
Player.offset = offset;
let active = Timeline
.backup
.off(object);
let eagerVid = ( Player.eager && Player.eager.getVideoUrl ) ? Player.eager.getVideoUrl() : false;
if(eagerVid && eagerVid.search(id) !== -1) {
log("Eager loading");
Player.active.stopVideo();
active = Player.eager;
Player.eager = Player.active;
Player.active = active;
Player.active.seekTo(offset);
_offsetRequest = { id, offset };
_isPlaying = false;
} else {
active.loadVideoById(opts);
}
Player.Play();
// TODO: This feels like a bad place to do this.
// There should probably be a more abstract and less
// explicit way to handle this.
ev.set('deadair', 0);
Timeline.earlyLoad(
// this search is needed for some reason ... the next
// pointer at this point of the code is incorrect for
// filtered lists.
_db.byId[
_db.current.findFirst({ytid: Player.activeData.ytid}).next
],
4000
);
},
next: function(){
Timeline.seekTo(_db.byId[Player.activeData.next].offset + 1);
Scrubber.real.dom.css({ left: 0 });
},
prev: function(){
Timeline.seekTo(_db.byId[Player.activeData.previous].offset + 1);
Scrubber.real.dom.css({ left: 0 });
},
seekTo: function(offset, opts) {
opts = opts || {};
if(!offset) {
offset = _offset;
}
if (opts.isTrackRelative) {
offset += Player.activeData.offset;
}
if (opts.isOffsetRelative) {
offset += _offset;
}
Timeline.updateOffset();
// If it's between 0 and 1, we assume it's a relative offset ... otherwise we
// take it as the absolute; and we modulus it by the runtime to permit wrap-around
var absolute = ((offset < 1) ? offset * _totalRuntime : offset) % _totalRuntime;
absolute = Math.max(0, absolute);
absolute = Math.min(_totalRuntime, absolute);
//log("Seeking to " + absolute);
var track = _db.current.findFirst(function(row) {
return (row.offset < absolute && (row.offset + row.length) > absolute)
});
if(!track) {
track = _db.current.findFirst();
}
//log("Seeked to " + track.title);
if(track) {
if(!Player.activeData || (track.ytid != Player.activeData.ytid)) {
Timeline.play(track.ytid, absolute - track.offset);
} else {
Player.offset = absolute - track.offset;
Player.active.seekTo(absolute - track.offset);
// TODO: This feels like a bad place to do this.
ev.set('deadair', 0);
}
}
},
debug: function() {
var stats = {};
_.each([
'getAvailablePlaybackRates',
'getAvailableQualityLevels',
'getCurrentTime',
'getDuration',
'getPlaybackQuality',
'getPlaybackRate',
'getPlayerState',
'getVideoBytesLoaded',
'getVideoBytesTotal',
'getVideoEmbedCode',
'getVideoLoadedFraction',
'getVideoStartBytes',
'getVideoUrl',
'getVolume',
'isMuted'
], function(what){
stats[what] = Player.active[what]();
});
log(stats);
},
init: function() {
var tag = document.createElement('script');
tag.src = "https://www.youtube.com/player_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// This doesn't reflect the filtered view ... it would be nice to know what the
// "previous" and "next" track is effeciently with a filter.
// The controls in the upper left of the timeline
$(".previous-track").click(Timeline.prev);
$(".next-track").click(Timeline.next);
$(".pause-play").click(Timeline.pauseplay);
$("#quality-down").click(Player.Quality.down);
$("#quality-up").click(Player.Quality.up);
}
};
})();
| updateytplayer | identifier_name |
timeline.js | var Timeline = (function(){
var
// The current offset into the total
// playlist, in seconds
_offset = 0,
// The total duration of all the tracks
// to be played.
_totalRuntime,
_isPlaying = true,
_earlyLoad = false,
_offsetRequest = false,
_backup = {},
_template = {},
_rateWindow = [],
// preferred quality
_quality,
Player = {
controls: false,
eventList: [
'StateChange',
'PlaybackQualityChange',
'PlaybackRateChange',
'Error',
'ApiChange'
],
Quality: {
set: function(what) {
if(what < 0 || !_.isNumber(what)) { what = 0; }
_quality = QUALITY_LEVELS[what % QUALITY_LEVELS.length];
$("#quality-down")[ (_quality == _.last(QUALITY_LEVELS) ? 'add' : 'remove') + 'Class']("disabled");
$("#quality-up")[ (_quality == _.first(QUALITY_LEVELS) ? 'add' : 'remove') + 'Class']("disabled");
// This function is invoked at startup without arguments.
if(arguments.length) {
Toolbar.status("Set preferred quality to " + _quality);
}
ev('quality', _quality);
return _quality;
},
down: () => Player.Quality.set(_.indexOf(QUALITY_LEVELS, _quality) + 1),
up: () => Player.Quality.set(_.indexOf(QUALITY_LEVELS, _quality) - 1),
},
Play: function(){
ev.isset('player_load', function(){
if(!_isPlaying) {
_isPlaying = true;
Player.active.playVideo();
$(".pause-play").html('<i class="fa fa-stop"></i>');
}
});
}
};
Player.Quality.set();
// The "current" list of videos is the same as all.
// the current could point to some other database entirely
_db.byId = _db.ALL = _db.view('id');
_db.current = _db;
_backup = {
start: 0,
off: function(){
if(Player.active.off) {
$("#backupPlayer").html('');
Player.active = Player.controls;
}
return Player.active;
},
getCurrentTime: function(){
return Player.offset + ((new Date()) - _backup._start) / 1000;
},
getDuration: () => Player.activeData.length,
pauseVideo: function() {
Player.offset = Player.active.getCurrentTime();
$("#backupPlayer").html('');
},
seekTo: function(what) {
Player.active.pauseVideo();
Player.offset = what;
Player.active.playVideo();
},
playVideo: function(){
// We pad for load time
Player.active._start = (+new Date()) + 2000;
$("#backupPlayer").html(
Template.backup({
offset: Math.floor(Player.offset),
ytid: Player.activeData.ytid
})
);
},
getPlaybackQuality: () => _quality,
on: function() {
Player.offset = Player.offset || 0;
Player.active = _backup;
Player.active.playVideo();
}
};
function updateytplayer() {
// This is the "clock" that everything is rated by.
var
// The tick is the clock-time ... this is different from the
// _offset which is where we should be in the playlist
tick = ev.incr('tick'),
scrubberPosition = 0;
// Make sure we aren't the backup player
if(Player.active && !Player.active.on && Player.active.getVideoBytesLoaded && Player.activeData) {
var rateStart = 1e10,
stats,
dtime,
ctime,
frac;
if(!isMobile) {
dtime = Player.active.getDuration() || 0;
ctime = Player.active.getCurrentTime() || 0;
frac = Player.active.getVideoLoadedFraction() || 0;
stats = [
dtime.toFixed(3),
ctime.toFixed(3),
Player.activeData.length,
Player.active.getPlayerState(),
// How far in
(
Player.active.getCurrentTime() /
Player.active.getDuration()
).toFixed(3),
// How much do we have
frac.toFixed(3)
];
debug(stats);
}
}
// The mechanics for moving the centroid
if(Player.active.getCurrentTime) {
var time = Player.active.getCurrentTime(),
prevOffset = _offset;
if(Player.activeData) {
localStorage[ev.db.id + 'offset'] = [Player.activeData.ytid, time].join(' ');
}
if (time > 0 && Player.activeData) {
// This generates the scrubber in the results tab below.
// We first check to see if the video is in the viewport window
if(Results.viewable[Player.activeData.ytid]) {
// And if so we get its dom and other necessary things.
var entry = Results.viewable[Player.activeData.ytid];
// If we are in the purview of the track, then we can move on.
// Otherwise, place ourselves underneath it so that the percentage
// calculations will work out.
scrubberPosition = time * 100 / Player.active.getDuration();
if(Scrubber.real.attach(entry.jquery.timeline)) {
entry.jquery.timeline.css('display','block');
}
} else {
Scrubber.real.remove();
}
// For some reason it appears that this value can
// toggle back to different quality sometimes. So
// we check to see where it's at.
/*
if( Player.active.getAvailableQualityLevels().indexOf(_quality) !== -1 &&
Player.active.getPlaybackQuality() != _quality) {
| log('set-quality');
/Player.active.setPlaybackQuality(_quality);
}
*/
// There's this YouTube bug (2013/05/11) that can sometimes report
// totally incorrect values for the duration. If it's more than
// 20 seconds off and greater than 0, then we try to reload the
// video. This bug seems to have been around for almost a year or
// so? Simply loading the video again appears to fix it.
if(Player.active.getDuration() > 30 && (Player.active.getDuration() + 20 < Player.activeData.length)) {
debug("reload " + new Date());
}
// If the player is active and we are at the end of a song, then move ahead.
if(time > 0 && Player.active.getDuration() > 0 && (Player.active.getDuration() - time <= 0)) {
_offset += 1;
debug("seeking " + new Date());
Timeline.seekTo(_offset);
} else {
_offset = Player.activeData.offset + time;
}
// If we are supposed to be playing
if (_isPlaying) {
// And we haven't moved forward
if (_offset - prevOffset == 0) {
// This means there's been dead-air for a few seconds.
if ( ev.incr('deadair', CLOCK_FREQ) > RELOAD_THRESHOLD ) {
//UserHistory.reload();
}
} else {
// this means we are playing so we should increment the total
// time we are listening
Player.listen_total += CLOCK_FREQ / 1000;
}
}
}
}
Scrubber.real.dom.css({ left: scrubberPosition + "%"});
}
_.each(Player.eventList, function(what) {
self['ytDebug_' + what] = function(that) {
ev.set("yt-" + what, that);
log(what, that);
}
});
function ytDebugHook() {
_.each(Player.eventList, function(what) {
Player.controls.addEventListener("on" + what, 'ytDebug_' + what);
});
}
self.onPlayerStateChange = function(e) {
if(e.target === Player.eager) {
return;
}
if(e.data === 1) {
if(_offsetRequest) {
if( e.target.getVideoUrl &&
e.target.getVideoUrl().search(_offsetRequest.id) !== -1 &&
Math.abs(_offsetRequest.offset - e.target.getCurrentTime()) > 10) {
e.target.seekTo(_offsetRequest.offset);
}
_offsetRequest = false;
}
if(!Player.activeData.length) {
Player.activeData.length = Timeline.player.controls.getDuration();
Timeline.updateOffset();
}
}
}
self.onYouTubePlayerAPIReady = function() {
Player.controls = new YT.Player('player-iframe-0', {
height: 120, width: 160,
events: {
onStateChange: onPlayerStateChange
}
});
Player.eager = new YT.Player('player-iframe-1', {
height: 120, width: 160,
events: {
onStateChange: onPlayerStateChange
}
});
when(() => Player.controls.loadVideoById).run(function(){
//ytDebugHook();
Player.active = Player.controls;
setInterval(updateytplayer, CLOCK_FREQ);
ev.set('player_load');
ev.fire('volume');
});
}
// When the player is loaded all the way
// then we can set the first player to be called
// the active player, which is the one that we will
// use for the most part.
//
// This is also when we start our polling function
// that updates the scrubber and the status of
// where we currently are.
ev('app_state', function(value) {
if (value == 'main') {
_totalRuntime = Utils.runtime(_db.byId);
var parts = (localStorage[ev.db.id + 'offset'] || "").split(' '), _off
if(parts.length == 2) {
Timeline.updateOffset();
_off = _db.current.findFirst('ytid', parts[0]).offset;
Timeline.seekTo(_off + parseFloat(parts[1]));
} else {
Timeline.seekTo((0.001 * (-_epoch + (+new Date()))) % _totalRuntime);
}
ev.isset("player_load", Results.scrollTo);
}
});
// If there's an error loading the video (usually due to
// embed restrictions), we have a backup player that can
// be used. There's significantly less control over this
// player so it's the backup plan.
ev('yt-Error', function(what) {
log("yt-error", what);
if(what == 100) {
Toolbar.status("Video not working; skipping");
replace(Timeline.current().id, true);
Timeline.next();
} else if(what != 150) {
//_backup.on();
} else {
Toolbar.status("Copyright issue; skipping");
// set the current track as unplayable
remote('updateTrack', Timeline.current().ytid, 'playable', false);
Timeline.next();
}
});
ev('volume', function(volume){
Toolbar.status("Set volume to " + volume.toFixed());
if(volume > 0 && Player.active.isMuted()) {
Player.active.unMute();
}
Player.active.setVolume(volume);
});
self.Player = Player;
return {
player: Player,
backup: _backup,
load: function(id) {
ev('app_state', 'main');
Store.get(id).then(getMissingDurations);
},
// the current track
current: () => Player.activeData,
earlyLoad: (obj, delay) => {
let localId = _earlyLoad = setTimeout(() => {
if(_earlyLoad == localId) {
let off = Scrubber.phantom.container ?
.95 * Scrubber.phantom.offset * obj.length : 0;
if(!obj) { console.log("no object", obj); return }
log(`Eager Loading ${obj.ytid} (${off})`);
Player.eager.loadVideoById({
videoId: obj.ytid,
startSeconds: off
});
Player.eager.pauseVideo();
}
}, delay || 500);
return localId;
},
remove: function(ytid){
var obj = _db.findFirst('ytid', ytid);
Toolbar.status("Removed " + obj.title);
Scrubber.real.remove();
// we should store that it was removed
ev.setadd('blacklist', obj.ytid);
// we need to be aware of our current.
_db.find('ytid', obj.ytid).remove();
_db.current.find('ytid', obj.ytid).remove();
Timeline.updateOffset();
Store.saveTracks();
ev.set('request_gen', {force: true});
},
pause: function(){
ev.isset('player_load', function(){
_isPlaying = false;
Player.active.pauseVideo();
$(".pause-play").html('<i class="fa fa-play"></i>');
});
},
pauseplay: function(){
if(_isPlaying) {
Timeline.pause();
} else {
Player.Play();
}
return _isPlaying;
},
updateOffset: function(){
var
index,
aggregate = 0,
order = 0,
prevIndex = false;
_totalRuntime = Utils.runtime(_db.byId);
for(index in _db.byId) {
if(prevIndex !== false) {
_db.byId[prevIndex].next = index;
_db.byId[index].previous = prevIndex;
}
prevIndex = index;
_db.byId[index].offset = aggregate;
aggregate += (parseInt(_db.byId[index].length) || 0);
}
// This final next pointer will enable wraparound
if(index) {
_db.byId[index].next = 0;
// TODO: Sometimes _db.byId[0] is undefined. I have to figure out
// how this offset problem occurs.
for(var ix = 0; !_db.byId[ix]; ix++);
_db.byId[ix].previous = index;
}
// we need to repoint this.
if(Player.activeData) {
var rec = _db.current.first({ytid: Player.activeData.ytid});
// if this track is in our active track then we assign it
if(rec) {
Player.activeData = rec;
} else {
// otherwise we make its next track the first track
Player.activeData.next = Player.activeData.previous = 0;
}
}
},
play: function(ytid, offset) {
if(!arguments.length) {
return Player.Play();
}
offset = offset || 0;
// Only run when the controller has been loaded
ev.isset('player_load', function(){
if(!Player.activeData || Player.activeData.ytid != ytid) {
// NOTE:
//
// This is the only entry point for loading and playing a video
// There are other references to playing and pausing, but this
// is the only line that activley loads the id and offset into
// the player. This is because there has to be an activeData in
// order to go forward.
if(Player.activeData) {
// Increment this count by 1 -- we only want to do it on the case of moving to a new track.
// This is so there's no misreporting of average listen time of view count based on the reloads
// that happen during development
var duration_listened = parseInt(Player.listen_total, 10);
// if it's zero, we listened to none of it, so we should ignore it.
if(duration_listened > 0) {
remote({
func: 'addListen',
id: Player.activeData.ytid,
title: Player.activeData.title,
length: Player.activeData.length
});
remote('updateDuration', Player.activeData.ytid, duration_listened);
}
}
Player.activeData = _db.first({ytid: ytid});
Player.listen_total = 0;
// After the assignment, then we add it to the userhistory
Timeline.playById(Player.active, Player.activeData.ytid, offset);
if(isNaN(Player.activeData.length)) {
log("No length for: ", Player.activeData);
// This should be a reference to everything...
Player.activeData.length = Player.controls.getDuration();
Store.saveTracks();
}
// At this point there is now active data, so anything depending
// on that can run.
ev('active_track', Player.activeData);
ev.set('active_data');
//log("Playing " + Player.activeData.ytid + Player.activeData.title);
} else {
Timeline.seekTo(offset, {isTrackRelative:true});
}
});
},
playById: function(object, id, offset){
var opts = {
videoId: id,
startSeconds: offset,
suggestedQuality: ev('quality')
};
Player.offset = offset;
let active = Timeline
.backup
.off(object);
let eagerVid = ( Player.eager && Player.eager.getVideoUrl ) ? Player.eager.getVideoUrl() : false;
if(eagerVid && eagerVid.search(id) !== -1) {
log("Eager loading");
Player.active.stopVideo();
active = Player.eager;
Player.eager = Player.active;
Player.active = active;
Player.active.seekTo(offset);
_offsetRequest = { id, offset };
_isPlaying = false;
} else {
active.loadVideoById(opts);
}
Player.Play();
// TODO: This feels like a bad place to do this.
// There should probably be a more abstract and less
// explicit way to handle this.
ev.set('deadair', 0);
Timeline.earlyLoad(
// this search is needed for some reason ... the next
// pointer at this point of the code is incorrect for
// filtered lists.
_db.byId[
_db.current.findFirst({ytid: Player.activeData.ytid}).next
],
4000
);
},
next: function(){
Timeline.seekTo(_db.byId[Player.activeData.next].offset + 1);
Scrubber.real.dom.css({ left: 0 });
},
prev: function(){
Timeline.seekTo(_db.byId[Player.activeData.previous].offset + 1);
Scrubber.real.dom.css({ left: 0 });
},
seekTo: function(offset, opts) {
opts = opts || {};
if(!offset) {
offset = _offset;
}
if (opts.isTrackRelative) {
offset += Player.activeData.offset;
}
if (opts.isOffsetRelative) {
offset += _offset;
}
Timeline.updateOffset();
// If it's between 0 and 1, we assume it's a relative offset ... otherwise we
// take it as the absolute; and we modulus it by the runtime to permit wrap-around
var absolute = ((offset < 1) ? offset * _totalRuntime : offset) % _totalRuntime;
absolute = Math.max(0, absolute);
absolute = Math.min(_totalRuntime, absolute);
//log("Seeking to " + absolute);
var track = _db.current.findFirst(function(row) {
return (row.offset < absolute && (row.offset + row.length) > absolute)
});
if(!track) {
track = _db.current.findFirst();
}
//log("Seeked to " + track.title);
if(track) {
if(!Player.activeData || (track.ytid != Player.activeData.ytid)) {
Timeline.play(track.ytid, absolute - track.offset);
} else {
Player.offset = absolute - track.offset;
Player.active.seekTo(absolute - track.offset);
// TODO: This feels like a bad place to do this.
ev.set('deadair', 0);
}
}
},
debug: function() {
var stats = {};
_.each([
'getAvailablePlaybackRates',
'getAvailableQualityLevels',
'getCurrentTime',
'getDuration',
'getPlaybackQuality',
'getPlaybackRate',
'getPlayerState',
'getVideoBytesLoaded',
'getVideoBytesTotal',
'getVideoEmbedCode',
'getVideoLoadedFraction',
'getVideoStartBytes',
'getVideoUrl',
'getVolume',
'isMuted'
], function(what){
stats[what] = Player.active[what]();
});
log(stats);
},
init: function() {
var tag = document.createElement('script');
tag.src = "https://www.youtube.com/player_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// This doesn't reflect the filtered view ... it would be nice to know what the
// "previous" and "next" track is effeciently with a filter.
// The controls in the upper left of the timeline
$(".previous-track").click(Timeline.prev);
$(".next-track").click(Timeline.next);
$(".pause-play").click(Timeline.pauseplay);
$("#quality-down").click(Player.Quality.down);
$("#quality-up").click(Player.Quality.up);
}
};
})(); | random_line_split | |
timeline.js | var Timeline = (function(){
var
// The current offset into the total
// playlist, in seconds
_offset = 0,
// The total duration of all the tracks
// to be played.
_totalRuntime,
_isPlaying = true,
_earlyLoad = false,
_offsetRequest = false,
_backup = {},
_template = {},
_rateWindow = [],
// preferred quality
_quality,
Player = {
controls: false,
eventList: [
'StateChange',
'PlaybackQualityChange',
'PlaybackRateChange',
'Error',
'ApiChange'
],
Quality: {
set: function(what) {
if(what < 0 || !_.isNumber(what)) { what = 0; }
_quality = QUALITY_LEVELS[what % QUALITY_LEVELS.length];
$("#quality-down")[ (_quality == _.last(QUALITY_LEVELS) ? 'add' : 'remove') + 'Class']("disabled");
$("#quality-up")[ (_quality == _.first(QUALITY_LEVELS) ? 'add' : 'remove') + 'Class']("disabled");
// This function is invoked at startup without arguments.
if(arguments.length) {
Toolbar.status("Set preferred quality to " + _quality);
}
ev('quality', _quality);
return _quality;
},
down: () => Player.Quality.set(_.indexOf(QUALITY_LEVELS, _quality) + 1),
up: () => Player.Quality.set(_.indexOf(QUALITY_LEVELS, _quality) - 1),
},
Play: function(){
ev.isset('player_load', function(){
if(!_isPlaying) {
_isPlaying = true;
Player.active.playVideo();
$(".pause-play").html('<i class="fa fa-stop"></i>');
}
});
}
};
Player.Quality.set();
// The "current" list of videos is the same as all.
// the current could point to some other database entirely
_db.byId = _db.ALL = _db.view('id');
_db.current = _db;
_backup = {
start: 0,
off: function(){
if(Player.active.off) {
$("#backupPlayer").html('');
Player.active = Player.controls;
}
return Player.active;
},
getCurrentTime: function(){
return Player.offset + ((new Date()) - _backup._start) / 1000;
},
getDuration: () => Player.activeData.length,
pauseVideo: function() {
Player.offset = Player.active.getCurrentTime();
$("#backupPlayer").html('');
},
seekTo: function(what) {
Player.active.pauseVideo();
Player.offset = what;
Player.active.playVideo();
},
playVideo: function(){
// We pad for load time
Player.active._start = (+new Date()) + 2000;
$("#backupPlayer").html(
Template.backup({
offset: Math.floor(Player.offset),
ytid: Player.activeData.ytid
})
);
},
getPlaybackQuality: () => _quality,
on: function() {
Player.offset = Player.offset || 0;
Player.active = _backup;
Player.active.playVideo();
}
};
function updateytplayer() {
// This is the "clock" that everything is rated by.
var
// The tick is the clock-time ... this is different from the
// _offset which is where we should be in the playlist
tick = ev.incr('tick'),
scrubberPosition = 0;
// Make sure we aren't the backup player
if(Player.active && !Player.active.on && Player.active.getVideoBytesLoaded && Player.activeData) {
var rateStart = 1e10,
stats,
dtime,
ctime,
frac;
if(!isMobile) {
dtime = Player.active.getDuration() || 0;
ctime = Player.active.getCurrentTime() || 0;
frac = Player.active.getVideoLoadedFraction() || 0;
stats = [
dtime.toFixed(3),
ctime.toFixed(3),
Player.activeData.length,
Player.active.getPlayerState(),
// How far in
(
Player.active.getCurrentTime() /
Player.active.getDuration()
).toFixed(3),
// How much do we have
frac.toFixed(3)
];
debug(stats);
}
}
// The mechanics for moving the centroid
if(Player.active.getCurrentTime) {
var time = Player.active.getCurrentTime(),
prevOffset = _offset;
if(Player.activeData) {
localStorage[ev.db.id + 'offset'] = [Player.activeData.ytid, time].join(' ');
}
if (time > 0 && Player.activeData) {
// This generates the scrubber in the results tab below.
// We first check to see if the video is in the viewport window
if(Results.viewable[Player.activeData.ytid]) {
// And if so we get its dom and other necessary things.
var entry = Results.viewable[Player.activeData.ytid];
// If we are in the purview of the track, then we can move on.
// Otherwise, place ourselves underneath it so that the percentage
// calculations will work out.
scrubberPosition = time * 100 / Player.active.getDuration();
if(Scrubber.real.attach(entry.jquery.timeline)) {
entry.jquery.timeline.css('display','block');
}
} else {
Scrubber.real.remove();
}
// For some reason it appears that this value can
// toggle back to different quality sometimes. So
// we check to see where it's at.
/*
if( Player.active.getAvailableQualityLevels().indexOf(_quality) !== -1 &&
Player.active.getPlaybackQuality() != _quality) {
log('set-quality');
/Player.active.setPlaybackQuality(_quality);
}
*/
// There's this YouTube bug (2013/05/11) that can sometimes report
// totally incorrect values for the duration. If it's more than
// 20 seconds off and greater than 0, then we try to reload the
// video. This bug seems to have been around for almost a year or
// so? Simply loading the video again appears to fix it.
if(Player.active.getDuration() > 30 && (Player.active.getDuration() + 20 < Player.activeData.length)) {
debug("reload " + new Date());
}
// If the player is active and we are at the end of a song, then move ahead.
if(time > 0 && Player.active.getDuration() > 0 && (Player.active.getDuration() - time <= 0)) {
_offset += 1;
debug("seeking " + new Date());
Timeline.seekTo(_offset);
} else {
_offset = Player.activeData.offset + time;
}
// If we are supposed to be playing
if (_isPlaying) {
// And we haven't moved forward
if (_offset - prevOffset == 0) {
// This means there's been dead-air for a few seconds.
if ( ev.incr('deadair', CLOCK_FREQ) > RELOAD_THRESHOLD ) {
//UserHistory.reload();
}
} else {
// this means we are playing so we should increment the total
// time we are listening
Player.listen_total += CLOCK_FREQ / 1000;
}
}
}
}
Scrubber.real.dom.css({ left: scrubberPosition + "%"});
}
_.each(Player.eventList, function(what) {
self['ytDebug_' + what] = function(that) {
ev.set("yt-" + what, that);
log(what, that);
}
});
function ytDebugHook() |
self.onPlayerStateChange = function(e) {
if(e.target === Player.eager) {
return;
}
if(e.data === 1) {
if(_offsetRequest) {
if( e.target.getVideoUrl &&
e.target.getVideoUrl().search(_offsetRequest.id) !== -1 &&
Math.abs(_offsetRequest.offset - e.target.getCurrentTime()) > 10) {
e.target.seekTo(_offsetRequest.offset);
}
_offsetRequest = false;
}
if(!Player.activeData.length) {
Player.activeData.length = Timeline.player.controls.getDuration();
Timeline.updateOffset();
}
}
}
self.onYouTubePlayerAPIReady = function() {
Player.controls = new YT.Player('player-iframe-0', {
height: 120, width: 160,
events: {
onStateChange: onPlayerStateChange
}
});
Player.eager = new YT.Player('player-iframe-1', {
height: 120, width: 160,
events: {
onStateChange: onPlayerStateChange
}
});
when(() => Player.controls.loadVideoById).run(function(){
//ytDebugHook();
Player.active = Player.controls;
setInterval(updateytplayer, CLOCK_FREQ);
ev.set('player_load');
ev.fire('volume');
});
}
// When the player is loaded all the way
// then we can set the first player to be called
// the active player, which is the one that we will
// use for the most part.
//
// This is also when we start our polling function
// that updates the scrubber and the status of
// where we currently are.
ev('app_state', function(value) {
if (value == 'main') {
_totalRuntime = Utils.runtime(_db.byId);
var parts = (localStorage[ev.db.id + 'offset'] || "").split(' '), _off
if(parts.length == 2) {
Timeline.updateOffset();
_off = _db.current.findFirst('ytid', parts[0]).offset;
Timeline.seekTo(_off + parseFloat(parts[1]));
} else {
Timeline.seekTo((0.001 * (-_epoch + (+new Date()))) % _totalRuntime);
}
ev.isset("player_load", Results.scrollTo);
}
});
// If there's an error loading the video (usually due to
// embed restrictions), we have a backup player that can
// be used. There's significantly less control over this
// player so it's the backup plan.
ev('yt-Error', function(what) {
log("yt-error", what);
if(what == 100) {
Toolbar.status("Video not working; skipping");
replace(Timeline.current().id, true);
Timeline.next();
} else if(what != 150) {
//_backup.on();
} else {
Toolbar.status("Copyright issue; skipping");
// set the current track as unplayable
remote('updateTrack', Timeline.current().ytid, 'playable', false);
Timeline.next();
}
});
ev('volume', function(volume){
Toolbar.status("Set volume to " + volume.toFixed());
if(volume > 0 && Player.active.isMuted()) {
Player.active.unMute();
}
Player.active.setVolume(volume);
});
self.Player = Player;
return {
player: Player,
backup: _backup,
load: function(id) {
ev('app_state', 'main');
Store.get(id).then(getMissingDurations);
},
// the current track
current: () => Player.activeData,
earlyLoad: (obj, delay) => {
let localId = _earlyLoad = setTimeout(() => {
if(_earlyLoad == localId) {
let off = Scrubber.phantom.container ?
.95 * Scrubber.phantom.offset * obj.length : 0;
if(!obj) { console.log("no object", obj); return }
log(`Eager Loading ${obj.ytid} (${off})`);
Player.eager.loadVideoById({
videoId: obj.ytid,
startSeconds: off
});
Player.eager.pauseVideo();
}
}, delay || 500);
return localId;
},
remove: function(ytid){
var obj = _db.findFirst('ytid', ytid);
Toolbar.status("Removed " + obj.title);
Scrubber.real.remove();
// we should store that it was removed
ev.setadd('blacklist', obj.ytid);
// we need to be aware of our current.
_db.find('ytid', obj.ytid).remove();
_db.current.find('ytid', obj.ytid).remove();
Timeline.updateOffset();
Store.saveTracks();
ev.set('request_gen', {force: true});
},
pause: function(){
ev.isset('player_load', function(){
_isPlaying = false;
Player.active.pauseVideo();
$(".pause-play").html('<i class="fa fa-play"></i>');
});
},
pauseplay: function(){
if(_isPlaying) {
Timeline.pause();
} else {
Player.Play();
}
return _isPlaying;
},
updateOffset: function(){
var
index,
aggregate = 0,
order = 0,
prevIndex = false;
_totalRuntime = Utils.runtime(_db.byId);
for(index in _db.byId) {
if(prevIndex !== false) {
_db.byId[prevIndex].next = index;
_db.byId[index].previous = prevIndex;
}
prevIndex = index;
_db.byId[index].offset = aggregate;
aggregate += (parseInt(_db.byId[index].length) || 0);
}
// This final next pointer will enable wraparound
if(index) {
_db.byId[index].next = 0;
// TODO: Sometimes _db.byId[0] is undefined. I have to figure out
// how this offset problem occurs.
for(var ix = 0; !_db.byId[ix]; ix++);
_db.byId[ix].previous = index;
}
// we need to repoint this.
if(Player.activeData) {
var rec = _db.current.first({ytid: Player.activeData.ytid});
// if this track is in our active track then we assign it
if(rec) {
Player.activeData = rec;
} else {
// otherwise we make its next track the first track
Player.activeData.next = Player.activeData.previous = 0;
}
}
},
play: function(ytid, offset) {
if(!arguments.length) {
return Player.Play();
}
offset = offset || 0;
// Only run when the controller has been loaded
ev.isset('player_load', function(){
if(!Player.activeData || Player.activeData.ytid != ytid) {
// NOTE:
//
// This is the only entry point for loading and playing a video
// There are other references to playing and pausing, but this
// is the only line that activley loads the id and offset into
// the player. This is because there has to be an activeData in
// order to go forward.
if(Player.activeData) {
// Increment this count by 1 -- we only want to do it on the case of moving to a new track.
// This is so there's no misreporting of average listen time of view count based on the reloads
// that happen during development
var duration_listened = parseInt(Player.listen_total, 10);
// if it's zero, we listened to none of it, so we should ignore it.
if(duration_listened > 0) {
remote({
func: 'addListen',
id: Player.activeData.ytid,
title: Player.activeData.title,
length: Player.activeData.length
});
remote('updateDuration', Player.activeData.ytid, duration_listened);
}
}
Player.activeData = _db.first({ytid: ytid});
Player.listen_total = 0;
// After the assignment, then we add it to the userhistory
Timeline.playById(Player.active, Player.activeData.ytid, offset);
if(isNaN(Player.activeData.length)) {
log("No length for: ", Player.activeData);
// This should be a reference to everything...
Player.activeData.length = Player.controls.getDuration();
Store.saveTracks();
}
// At this point there is now active data, so anything depending
// on that can run.
ev('active_track', Player.activeData);
ev.set('active_data');
//log("Playing " + Player.activeData.ytid + Player.activeData.title);
} else {
Timeline.seekTo(offset, {isTrackRelative:true});
}
});
},
playById: function(object, id, offset){
var opts = {
videoId: id,
startSeconds: offset,
suggestedQuality: ev('quality')
};
Player.offset = offset;
let active = Timeline
.backup
.off(object);
let eagerVid = ( Player.eager && Player.eager.getVideoUrl ) ? Player.eager.getVideoUrl() : false;
if(eagerVid && eagerVid.search(id) !== -1) {
log("Eager loading");
Player.active.stopVideo();
active = Player.eager;
Player.eager = Player.active;
Player.active = active;
Player.active.seekTo(offset);
_offsetRequest = { id, offset };
_isPlaying = false;
} else {
active.loadVideoById(opts);
}
Player.Play();
// TODO: This feels like a bad place to do this.
// There should probably be a more abstract and less
// explicit way to handle this.
ev.set('deadair', 0);
Timeline.earlyLoad(
// this search is needed for some reason ... the next
// pointer at this point of the code is incorrect for
// filtered lists.
_db.byId[
_db.current.findFirst({ytid: Player.activeData.ytid}).next
],
4000
);
},
next: function(){
Timeline.seekTo(_db.byId[Player.activeData.next].offset + 1);
Scrubber.real.dom.css({ left: 0 });
},
prev: function(){
Timeline.seekTo(_db.byId[Player.activeData.previous].offset + 1);
Scrubber.real.dom.css({ left: 0 });
},
seekTo: function(offset, opts) {
opts = opts || {};
if(!offset) {
offset = _offset;
}
if (opts.isTrackRelative) {
offset += Player.activeData.offset;
}
if (opts.isOffsetRelative) {
offset += _offset;
}
Timeline.updateOffset();
// If it's between 0 and 1, we assume it's a relative offset ... otherwise we
// take it as the absolute; and we modulus it by the runtime to permit wrap-around
var absolute = ((offset < 1) ? offset * _totalRuntime : offset) % _totalRuntime;
absolute = Math.max(0, absolute);
absolute = Math.min(_totalRuntime, absolute);
//log("Seeking to " + absolute);
var track = _db.current.findFirst(function(row) {
return (row.offset < absolute && (row.offset + row.length) > absolute)
});
if(!track) {
track = _db.current.findFirst();
}
//log("Seeked to " + track.title);
if(track) {
if(!Player.activeData || (track.ytid != Player.activeData.ytid)) {
Timeline.play(track.ytid, absolute - track.offset);
} else {
Player.offset = absolute - track.offset;
Player.active.seekTo(absolute - track.offset);
// TODO: This feels like a bad place to do this.
ev.set('deadair', 0);
}
}
},
debug: function() {
var stats = {};
_.each([
'getAvailablePlaybackRates',
'getAvailableQualityLevels',
'getCurrentTime',
'getDuration',
'getPlaybackQuality',
'getPlaybackRate',
'getPlayerState',
'getVideoBytesLoaded',
'getVideoBytesTotal',
'getVideoEmbedCode',
'getVideoLoadedFraction',
'getVideoStartBytes',
'getVideoUrl',
'getVolume',
'isMuted'
], function(what){
stats[what] = Player.active[what]();
});
log(stats);
},
init: function() {
var tag = document.createElement('script');
tag.src = "https://www.youtube.com/player_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// This doesn't reflect the filtered view ... it would be nice to know what the
// "previous" and "next" track is effeciently with a filter.
// The controls in the upper left of the timeline
$(".previous-track").click(Timeline.prev);
$(".next-track").click(Timeline.next);
$(".pause-play").click(Timeline.pauseplay);
$("#quality-down").click(Player.Quality.down);
$("#quality-up").click(Player.Quality.up);
}
};
})();
| {
_.each(Player.eventList, function(what) {
Player.controls.addEventListener("on" + what, 'ytDebug_' + what);
});
} | identifier_body |
geneOverlap.v01.py | #!/usr/local/bin/python3
## Written by Atul Kakrana - Kakrana@udel.edu
## Script takes a (transcript) summary file with start,end,chr and strand info and finds overlap with GTF file
## Returns file with overlapping genes, type of overlap and orientation of overlap
## Type of overlap - 5' Annotated genes overlaps at 5' of transcript 3' overlaps at 3' of transcript 8' completly enclaves transcript
## 0' transcript overlaps annotated gene
## Orientation - F: Overlapping gene is at same strand as transcript R: both are on different strands
## USER SETTINGS #######################################
gtf = "Zea_mays.AGPv3.27.gtf"
summary = "Summary.txt" ## Summery file with chr,start,stop and strand
delim = "\t" ## Delimiter for summary file
head = 'Y' ## Header is summary file: 'Y' else: 'N'
name = 2
chromo = 6
start = 3
end = 4
strand = 5
geneType = 20 ## Column for coding or non-coding, if info not available then mention 99
makeDB = 0 ## If DB for GTF file is not present in present directory then make : 1 else: 0
## IMPORTS ############################################
import os,sys
import sqlite3
## FUNCTIONS ##########################################
def overlapCheck(summary,conn,annotable): | ## Test DB
cur = conn.cursor()
## Test
# cur.execute("PRAGMA table_info(%s)" % (annotable))
# desc = cur.fetchall()
# print(desc)
# cur.execute("SELECT geneName FROM %s LIMIT 10" % (annotable))
# test = cur.fetchall()
# print(test)
outFile = "%s.overlap.txt" % summary.rpartition(".")[0]
fh_out = open(outFile,'w')
fh_out.write("Trans\toverlapGenes\toverlapFLags\toverlapConf\n")
fh_in = open(summary,'r')
if head == "Y":
fh_in.readline()
sumRead = fh_in.readlines()
for i in sumRead:
geneList = [] ## Store overlap genes
flagList = [] ## Store diffrent overlap flags - 5',3', 0' [if gene is enclaved within our transcript] and 8' [if gene extends our transcript at both ends]
confList = [] ## Store overlap configuration
resList = [] ## Store a single merged list fo results
ent = i.split(delim)
trans = ent[name-1]
achr = ent[chromo-1]
astart = ent[start-1]
aend = ent[end-1]
astrand = ent[strand-1]
# print("\n***Entry:",trans,achr,astart,aend,astrand)
## Gene end overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND end between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime5 = cur.fetchall()
# print("5Prime\n%s" % prime5)
tempP5 = [] ## Temp store gene names for checking later
if prime5:
for i in prime5:
tempP5.append(i[4])
## Gene start is overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime3 = cur.fetchall()
# print("3Prime\n%s" % (prime3))
tempP3 = []
if prime3:
for i in prime3:
tempP3.append(i[4])
## Gene that completly enclaves our transcript <------ ----trans---- -------->
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start < %s AND end > %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime8 = cur.fetchall()
# print("8Prime\n%s\n" % prime8)
if prime5:
for i in prime5:
if i[4] in tempP3:
# print("Transcript enclaves the annotated gene'")
flag = 0
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
# sys.exit()
else:
# print("Gene overlaps only at one end")
if astrand == "+":
flag = 5
elif astrand == "-":
flag = 3
# print("Appending prime5:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime3:
for i in prime3:
if i[4] not in tempP5:
# print("Gene Overlaps only at one end")
if astrand == "+":
flag = 3
elif astrand == "-":
flag = 5
# print("Appending prime3:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime8:
for i in prime8:
# print("Annotated gene enclaves our transcript")
# print(i)
flag = 8
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print("geneList",geneList,"flagList",flagList,"confList",confList)
resList = list(zip(geneList,flagList,confList))
# print("Final Results",resList)
# print("FinalRes:%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
if geneList:
fh_out.write("%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
else:
## There are no overlaps
fh_out.write("%s\tNA\tNA\tNA\n" % (trans))
fh_out.close()
fh_in.close()
print("Exiting function - overlapCheck\n")
return outFile
def gtfParser(gtf):
print("Function: gtfParser")
## file I/O
fh_in = open(gtf,'r')
gtfRead = fh_in.readlines()
parsedGTF = [] ## List to hold parsed GTF entries
for i in gtfRead:
if i[0].isdigit():
ent = i.split("\t")
if ent[2] == "gene":
# print(ent)
achr = ent[0]
gStart = ent[3]
gEnd = ent[4]
gStrand = ent[6]
info = ent[8].strip("\n").split(";")
# print(info,len(info))
if len(info) == 5:
## Protein coding gene with a version number
gid = info[0].split()[1].replace('"','')
gVer = info[1].split()[1].replace('"','')
gSource = info[2].split()[1].replace('"','')
gType = info[3].split()[1].replace('"','')
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
elif len(info) == 4:
## miRNA with no version number
gid = info[0].split()[1].replace('"','')
gVer = "1"
gSource = info[1].split()[1].replace('"','')
gType = info[2].split()[1].replace('"','')
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
else:
pass
print("First 10 entries of parsedGTF list: %s" % (parsedGTF[:10]))
print ("Exiting function\n")
return parsedGTF
def summParser(summary):
'''Create a a list of summary file'''
print("\nFunction: summParser")
fh_in = open(summary,'r')
if head == 'Y':
fh_in.readline()
summRead = fh_in.readlines()
geneSet = set()
# for i in summRead:
# ent = i.split("\t")
# agene = ent[gene-1]
# geneSet.add(agene)
parsedSumm = []
acount = 0 ## To count the entries
for i in summRead:
# print(i)
ent = i.split("\t") ## It has to be tab seprated file always
agene = ent[gene-1]
if agene not in geneSet:
## New entry add
print("Total entries scanned: %s | Length of GTF Dictionary %s" % (acount,len(summDict)))
print("Exiting function - summaryDict\n")
return parsedSumm
def tableMaker(parsedGTF,parsedSumm):
'''
Make a track specific sqlite DB for probe ID and coords that will be used
to query probes on 20MB interval. Each probe entry has following info:
probe_id,FC,pval,chr,start,end
'''
mergedInfo = parsedGTF + parsedSumm
print("Function: tableMaker")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
cur = conn.cursor()
cur.execute('''DROP TABLE IF EXISTS %s''' % (annotable)) ### Drop Old table - while testing
conn.commit()
try:
cur.execute('''CREATE TABLE %s (chr integer, start integer, end integer, strand varchar(10), geneName varchar(255), geneVersion varchar(255), geneSource varchar(255), geneType varchar(255) )''' % (annotable))
conn.commit()
### Fill the table
acount = 0 ## COunt of number of gene entries
for ent in mergedInfo:
gChr,gStart,gEnd,gStrand,gid,gVer,gSource,gType = ent
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
cur.execute("INSERT INTO %s VALUES (%d,%d,%d,'%s','%s',%d,'%s','%s')" % (annotable, int(gChr), int(gStart), int(gEnd), str(gStrand), str(gid), int(gVer), str(gSource), str(gType) ))
acount +=1
except sqlite3.Error:
print('ERROR:',Error)
sys.exit()
cur.execute("SELECT * FROM %s LIMIT 10" % (annotable))
test = cur.fetchall()
print("First 10 entries:\n%s" % (test))
cur.execute("SELECT COUNT(*) FROM %s" % (annotable))
totalEnt = cur.fetchall()
print("\nTotal entries in table:%s | Total entries in file: %s" % (totalEnt[0][0],acount) )
conn.commit() ## Imporatnt to save table
print("Exiting function\n")
return annoDB,annotable,conn
def main():
if makeDB == 1:
parsedGTF = gtfParser(gtf)
annoDB,annotable,conn= tableMaker(parsedGTF)
elif makeDB == 0:
print("Existing annotation DB will be used, to make a new DB please turn on makeDB from settings")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
else:
print("'makeDB' variable takes boolean values")
resFile = overlapCheck(summary,conn,annotable)
print("Overlap Check complete - see '%s' for results" % (resFile))
if __name__ == '__main__':
main()
## v01 <-13th July -15 |
print("Function: overlapCheck")
| random_line_split |
geneOverlap.v01.py | #!/usr/local/bin/python3
## Written by Atul Kakrana - Kakrana@udel.edu
## Script takes a (transcript) summary file with start,end,chr and strand info and finds overlap with GTF file
## Returns file with overlapping genes, type of overlap and orientation of overlap
## Type of overlap - 5' Annotated genes overlaps at 5' of transcript 3' overlaps at 3' of transcript 8' completly enclaves transcript
## 0' transcript overlaps annotated gene
## Orientation - F: Overlapping gene is at same strand as transcript R: both are on different strands
## USER SETTINGS #######################################
gtf = "Zea_mays.AGPv3.27.gtf"
summary = "Summary.txt" ## Summery file with chr,start,stop and strand
delim = "\t" ## Delimiter for summary file
head = 'Y' ## Header is summary file: 'Y' else: 'N'
name = 2
chromo = 6
start = 3
end = 4
strand = 5
geneType = 20 ## Column for coding or non-coding, if info not available then mention 99
makeDB = 0 ## If DB for GTF file is not present in present directory then make : 1 else: 0
## IMPORTS ############################################
import os,sys
import sqlite3
## FUNCTIONS ##########################################
def overlapCheck(summary,conn,annotable):
print("Function: overlapCheck")
## Test DB
cur = conn.cursor()
## Test
# cur.execute("PRAGMA table_info(%s)" % (annotable))
# desc = cur.fetchall()
# print(desc)
# cur.execute("SELECT geneName FROM %s LIMIT 10" % (annotable))
# test = cur.fetchall()
# print(test)
outFile = "%s.overlap.txt" % summary.rpartition(".")[0]
fh_out = open(outFile,'w')
fh_out.write("Trans\toverlapGenes\toverlapFLags\toverlapConf\n")
fh_in = open(summary,'r')
if head == "Y":
fh_in.readline()
sumRead = fh_in.readlines()
for i in sumRead:
geneList = [] ## Store overlap genes
flagList = [] ## Store diffrent overlap flags - 5',3', 0' [if gene is enclaved within our transcript] and 8' [if gene extends our transcript at both ends]
confList = [] ## Store overlap configuration
resList = [] ## Store a single merged list fo results
ent = i.split(delim)
trans = ent[name-1]
achr = ent[chromo-1]
astart = ent[start-1]
aend = ent[end-1]
astrand = ent[strand-1]
# print("\n***Entry:",trans,achr,astart,aend,astrand)
## Gene end overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND end between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime5 = cur.fetchall()
# print("5Prime\n%s" % prime5)
tempP5 = [] ## Temp store gene names for checking later
if prime5:
for i in prime5:
tempP5.append(i[4])
## Gene start is overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime3 = cur.fetchall()
# print("3Prime\n%s" % (prime3))
tempP3 = []
if prime3:
for i in prime3:
tempP3.append(i[4])
## Gene that completly enclaves our transcript <------ ----trans---- -------->
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start < %s AND end > %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime8 = cur.fetchall()
# print("8Prime\n%s\n" % prime8)
if prime5:
for i in prime5:
if i[4] in tempP3:
# print("Transcript enclaves the annotated gene'")
flag = 0
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
# sys.exit()
else:
# print("Gene overlaps only at one end")
if astrand == "+":
flag = 5
elif astrand == "-":
flag = 3
# print("Appending prime5:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime3:
for i in prime3:
if i[4] not in tempP5:
# print("Gene Overlaps only at one end")
if astrand == "+":
flag = 3
elif astrand == "-":
flag = 5
# print("Appending prime3:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime8:
for i in prime8:
# print("Annotated gene enclaves our transcript")
# print(i)
flag = 8
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print("geneList",geneList,"flagList",flagList,"confList",confList)
resList = list(zip(geneList,flagList,confList))
# print("Final Results",resList)
# print("FinalRes:%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
if geneList:
fh_out.write("%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
else:
## There are no overlaps
fh_out.write("%s\tNA\tNA\tNA\n" % (trans))
fh_out.close()
fh_in.close()
print("Exiting function - overlapCheck\n")
return outFile
def gtfParser(gtf):
print("Function: gtfParser")
## file I/O
fh_in = open(gtf,'r')
gtfRead = fh_in.readlines()
parsedGTF = [] ## List to hold parsed GTF entries
for i in gtfRead:
if i[0].isdigit():
ent = i.split("\t")
if ent[2] == "gene":
# print(ent)
achr = ent[0]
gStart = ent[3]
gEnd = ent[4]
gStrand = ent[6]
info = ent[8].strip("\n").split(";")
# print(info,len(info))
if len(info) == 5:
## Protein coding gene with a version number
gid = info[0].split()[1].replace('"','')
gVer = info[1].split()[1].replace('"','')
gSource = info[2].split()[1].replace('"','')
gType = info[3].split()[1].replace('"','')
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
elif len(info) == 4:
## miRNA with no version number
gid = info[0].split()[1].replace('"','')
gVer = "1"
gSource = info[1].split()[1].replace('"','')
gType = info[2].split()[1].replace('"','')
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
else:
pass
print("First 10 entries of parsedGTF list: %s" % (parsedGTF[:10]))
print ("Exiting function\n")
return parsedGTF
def | (summary):
'''Create a a list of summary file'''
print("\nFunction: summParser")
fh_in = open(summary,'r')
if head == 'Y':
fh_in.readline()
summRead = fh_in.readlines()
geneSet = set()
# for i in summRead:
# ent = i.split("\t")
# agene = ent[gene-1]
# geneSet.add(agene)
parsedSumm = []
acount = 0 ## To count the entries
for i in summRead:
# print(i)
ent = i.split("\t") ## It has to be tab seprated file always
agene = ent[gene-1]
if agene not in geneSet:
## New entry add
print("Total entries scanned: %s | Length of GTF Dictionary %s" % (acount,len(summDict)))
print("Exiting function - summaryDict\n")
return parsedSumm
def tableMaker(parsedGTF,parsedSumm):
'''
Make a track specific sqlite DB for probe ID and coords that will be used
to query probes on 20MB interval. Each probe entry has following info:
probe_id,FC,pval,chr,start,end
'''
mergedInfo = parsedGTF + parsedSumm
print("Function: tableMaker")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
cur = conn.cursor()
cur.execute('''DROP TABLE IF EXISTS %s''' % (annotable)) ### Drop Old table - while testing
conn.commit()
try:
cur.execute('''CREATE TABLE %s (chr integer, start integer, end integer, strand varchar(10), geneName varchar(255), geneVersion varchar(255), geneSource varchar(255), geneType varchar(255) )''' % (annotable))
conn.commit()
### Fill the table
acount = 0 ## COunt of number of gene entries
for ent in mergedInfo:
gChr,gStart,gEnd,gStrand,gid,gVer,gSource,gType = ent
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
cur.execute("INSERT INTO %s VALUES (%d,%d,%d,'%s','%s',%d,'%s','%s')" % (annotable, int(gChr), int(gStart), int(gEnd), str(gStrand), str(gid), int(gVer), str(gSource), str(gType) ))
acount +=1
except sqlite3.Error:
print('ERROR:',Error)
sys.exit()
cur.execute("SELECT * FROM %s LIMIT 10" % (annotable))
test = cur.fetchall()
print("First 10 entries:\n%s" % (test))
cur.execute("SELECT COUNT(*) FROM %s" % (annotable))
totalEnt = cur.fetchall()
print("\nTotal entries in table:%s | Total entries in file: %s" % (totalEnt[0][0],acount) )
conn.commit() ## Imporatnt to save table
print("Exiting function\n")
return annoDB,annotable,conn
def main():
if makeDB == 1:
parsedGTF = gtfParser(gtf)
annoDB,annotable,conn= tableMaker(parsedGTF)
elif makeDB == 0:
print("Existing annotation DB will be used, to make a new DB please turn on makeDB from settings")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
else:
print("'makeDB' variable takes boolean values")
resFile = overlapCheck(summary,conn,annotable)
print("Overlap Check complete - see '%s' for results" % (resFile))
if __name__ == '__main__':
main()
## v01 <-13th July -15
| summParser | identifier_name |
geneOverlap.v01.py | #!/usr/local/bin/python3
## Written by Atul Kakrana - Kakrana@udel.edu
## Script takes a (transcript) summary file with start,end,chr and strand info and finds overlap with GTF file
## Returns file with overlapping genes, type of overlap and orientation of overlap
## Type of overlap - 5' Annotated genes overlaps at 5' of transcript 3' overlaps at 3' of transcript 8' completly enclaves transcript
## 0' transcript overlaps annotated gene
## Orientation - F: Overlapping gene is at same strand as transcript R: both are on different strands
## USER SETTINGS #######################################
gtf = "Zea_mays.AGPv3.27.gtf"
summary = "Summary.txt" ## Summery file with chr,start,stop and strand
delim = "\t" ## Delimiter for summary file
head = 'Y' ## Header is summary file: 'Y' else: 'N'
name = 2
chromo = 6
start = 3
end = 4
strand = 5
geneType = 20 ## Column for coding or non-coding, if info not available then mention 99
makeDB = 0 ## If DB for GTF file is not present in present directory then make : 1 else: 0
## IMPORTS ############################################
import os,sys
import sqlite3
## FUNCTIONS ##########################################
def overlapCheck(summary,conn,annotable):
print("Function: overlapCheck")
## Test DB
cur = conn.cursor()
## Test
# cur.execute("PRAGMA table_info(%s)" % (annotable))
# desc = cur.fetchall()
# print(desc)
# cur.execute("SELECT geneName FROM %s LIMIT 10" % (annotable))
# test = cur.fetchall()
# print(test)
outFile = "%s.overlap.txt" % summary.rpartition(".")[0]
fh_out = open(outFile,'w')
fh_out.write("Trans\toverlapGenes\toverlapFLags\toverlapConf\n")
fh_in = open(summary,'r')
if head == "Y":
fh_in.readline()
sumRead = fh_in.readlines()
for i in sumRead:
geneList = [] ## Store overlap genes
flagList = [] ## Store diffrent overlap flags - 5',3', 0' [if gene is enclaved within our transcript] and 8' [if gene extends our transcript at both ends]
confList = [] ## Store overlap configuration
resList = [] ## Store a single merged list fo results
ent = i.split(delim)
trans = ent[name-1]
achr = ent[chromo-1]
astart = ent[start-1]
aend = ent[end-1]
astrand = ent[strand-1]
# print("\n***Entry:",trans,achr,astart,aend,astrand)
## Gene end overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND end between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime5 = cur.fetchall()
# print("5Prime\n%s" % prime5)
tempP5 = [] ## Temp store gene names for checking later
if prime5:
for i in prime5:
tempP5.append(i[4])
## Gene start is overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime3 = cur.fetchall()
# print("3Prime\n%s" % (prime3))
tempP3 = []
if prime3:
for i in prime3:
tempP3.append(i[4])
## Gene that completly enclaves our transcript <------ ----trans---- -------->
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start < %s AND end > %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime8 = cur.fetchall()
# print("8Prime\n%s\n" % prime8)
if prime5:
for i in prime5:
if i[4] in tempP3:
# print("Transcript enclaves the annotated gene'")
flag = 0
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
# sys.exit()
else:
# print("Gene overlaps only at one end")
if astrand == "+":
flag = 5
elif astrand == "-":
flag = 3
# print("Appending prime5:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime3:
for i in prime3:
if i[4] not in tempP5:
# print("Gene Overlaps only at one end")
if astrand == "+":
flag = 3
elif astrand == "-":
flag = 5
# print("Appending prime3:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime8:
for i in prime8:
# print("Annotated gene enclaves our transcript")
# print(i)
flag = 8
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print("geneList",geneList,"flagList",flagList,"confList",confList)
resList = list(zip(geneList,flagList,confList))
# print("Final Results",resList)
# print("FinalRes:%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
if geneList:
fh_out.write("%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
else:
## There are no overlaps
fh_out.write("%s\tNA\tNA\tNA\n" % (trans))
fh_out.close()
fh_in.close()
print("Exiting function - overlapCheck\n")
return outFile
def gtfParser(gtf):
|
def summParser(summary):
'''Create a a list of summary file'''
print("\nFunction: summParser")
fh_in = open(summary,'r')
if head == 'Y':
fh_in.readline()
summRead = fh_in.readlines()
geneSet = set()
# for i in summRead:
# ent = i.split("\t")
# agene = ent[gene-1]
# geneSet.add(agene)
parsedSumm = []
acount = 0 ## To count the entries
for i in summRead:
# print(i)
ent = i.split("\t") ## It has to be tab seprated file always
agene = ent[gene-1]
if agene not in geneSet:
## New entry add
print("Total entries scanned: %s | Length of GTF Dictionary %s" % (acount,len(summDict)))
print("Exiting function - summaryDict\n")
return parsedSumm
def tableMaker(parsedGTF,parsedSumm):
'''
Make a track specific sqlite DB for probe ID and coords that will be used
to query probes on 20MB interval. Each probe entry has following info:
probe_id,FC,pval,chr,start,end
'''
mergedInfo = parsedGTF + parsedSumm
print("Function: tableMaker")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
cur = conn.cursor()
cur.execute('''DROP TABLE IF EXISTS %s''' % (annotable)) ### Drop Old table - while testing
conn.commit()
try:
cur.execute('''CREATE TABLE %s (chr integer, start integer, end integer, strand varchar(10), geneName varchar(255), geneVersion varchar(255), geneSource varchar(255), geneType varchar(255) )''' % (annotable))
conn.commit()
### Fill the table
acount = 0 ## COunt of number of gene entries
for ent in mergedInfo:
gChr,gStart,gEnd,gStrand,gid,gVer,gSource,gType = ent
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
cur.execute("INSERT INTO %s VALUES (%d,%d,%d,'%s','%s',%d,'%s','%s')" % (annotable, int(gChr), int(gStart), int(gEnd), str(gStrand), str(gid), int(gVer), str(gSource), str(gType) ))
acount +=1
except sqlite3.Error:
print('ERROR:',Error)
sys.exit()
cur.execute("SELECT * FROM %s LIMIT 10" % (annotable))
test = cur.fetchall()
print("First 10 entries:\n%s" % (test))
cur.execute("SELECT COUNT(*) FROM %s" % (annotable))
totalEnt = cur.fetchall()
print("\nTotal entries in table:%s | Total entries in file: %s" % (totalEnt[0][0],acount) )
conn.commit() ## Imporatnt to save table
print("Exiting function\n")
return annoDB,annotable,conn
def main():
if makeDB == 1:
parsedGTF = gtfParser(gtf)
annoDB,annotable,conn= tableMaker(parsedGTF)
elif makeDB == 0:
print("Existing annotation DB will be used, to make a new DB please turn on makeDB from settings")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
else:
print("'makeDB' variable takes boolean values")
resFile = overlapCheck(summary,conn,annotable)
print("Overlap Check complete - see '%s' for results" % (resFile))
if __name__ == '__main__':
main()
## v01 <-13th July -15
| print("Function: gtfParser")
## file I/O
fh_in = open(gtf,'r')
gtfRead = fh_in.readlines()
parsedGTF = [] ## List to hold parsed GTF entries
for i in gtfRead:
if i[0].isdigit():
ent = i.split("\t")
if ent[2] == "gene":
# print(ent)
achr = ent[0]
gStart = ent[3]
gEnd = ent[4]
gStrand = ent[6]
info = ent[8].strip("\n").split(";")
# print(info,len(info))
if len(info) == 5:
## Protein coding gene with a version number
gid = info[0].split()[1].replace('"','')
gVer = info[1].split()[1].replace('"','')
gSource = info[2].split()[1].replace('"','')
gType = info[3].split()[1].replace('"','')
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
elif len(info) == 4:
## miRNA with no version number
gid = info[0].split()[1].replace('"','')
gVer = "1"
gSource = info[1].split()[1].replace('"','')
gType = info[2].split()[1].replace('"','')
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
else:
pass
print("First 10 entries of parsedGTF list: %s" % (parsedGTF[:10]))
print ("Exiting function\n")
return parsedGTF | identifier_body |
geneOverlap.v01.py | #!/usr/local/bin/python3
## Written by Atul Kakrana - Kakrana@udel.edu
## Script takes a (transcript) summary file with start,end,chr and strand info and finds overlap with GTF file
## Returns file with overlapping genes, type of overlap and orientation of overlap
## Type of overlap - 5' Annotated genes overlaps at 5' of transcript 3' overlaps at 3' of transcript 8' completly enclaves transcript
## 0' transcript overlaps annotated gene
## Orientation - F: Overlapping gene is at same strand as transcript R: both are on different strands
## USER SETTINGS #######################################
gtf = "Zea_mays.AGPv3.27.gtf"
summary = "Summary.txt" ## Summery file with chr,start,stop and strand
delim = "\t" ## Delimiter for summary file
head = 'Y' ## Header is summary file: 'Y' else: 'N'
name = 2
chromo = 6
start = 3
end = 4
strand = 5
geneType = 20 ## Column for coding or non-coding, if info not available then mention 99
makeDB = 0 ## If DB for GTF file is not present in present directory then make : 1 else: 0
## IMPORTS ############################################
import os,sys
import sqlite3
## FUNCTIONS ##########################################
def overlapCheck(summary,conn,annotable):
print("Function: overlapCheck")
## Test DB
cur = conn.cursor()
## Test
# cur.execute("PRAGMA table_info(%s)" % (annotable))
# desc = cur.fetchall()
# print(desc)
# cur.execute("SELECT geneName FROM %s LIMIT 10" % (annotable))
# test = cur.fetchall()
# print(test)
outFile = "%s.overlap.txt" % summary.rpartition(".")[0]
fh_out = open(outFile,'w')
fh_out.write("Trans\toverlapGenes\toverlapFLags\toverlapConf\n")
fh_in = open(summary,'r')
if head == "Y":
fh_in.readline()
sumRead = fh_in.readlines()
for i in sumRead:
geneList = [] ## Store overlap genes
flagList = [] ## Store diffrent overlap flags - 5',3', 0' [if gene is enclaved within our transcript] and 8' [if gene extends our transcript at both ends]
confList = [] ## Store overlap configuration
resList = [] ## Store a single merged list fo results
ent = i.split(delim)
trans = ent[name-1]
achr = ent[chromo-1]
astart = ent[start-1]
aend = ent[end-1]
astrand = ent[strand-1]
# print("\n***Entry:",trans,achr,astart,aend,astrand)
## Gene end overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND end between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime5 = cur.fetchall()
# print("5Prime\n%s" % prime5)
tempP5 = [] ## Temp store gene names for checking later
if prime5:
for i in prime5:
tempP5.append(i[4])
## Gene start is overlaps
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start between %s and %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime3 = cur.fetchall()
# print("3Prime\n%s" % (prime3))
tempP3 = []
if prime3:
for i in prime3:
tempP3.append(i[4])
## Gene that completly enclaves our transcript <------ ----trans---- -------->
cur.execute("SELECT * FROM %s WHERE chr = '%s' AND start < %s AND end > %s ORDER BY start asc" % (annotable,achr,astart,aend))
prime8 = cur.fetchall()
# print("8Prime\n%s\n" % prime8)
if prime5:
for i in prime5:
if i[4] in tempP3:
# print("Transcript enclaves the annotated gene'")
flag = 0
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
# sys.exit()
else:
# print("Gene overlaps only at one end")
if astrand == "+":
flag = 5
elif astrand == "-":
flag = 3
# print("Appending prime5:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime3:
for i in prime3:
if i[4] not in tempP5:
# print("Gene Overlaps only at one end")
if astrand == "+":
flag = 3
elif astrand == "-":
flag = 5
# print("Appending prime3:%s" % (i[4]))
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print(i)
if prime8:
for i in prime8:
# print("Annotated gene enclaves our transcript")
# print(i)
flag = 8
geneList.append(i[4])
flagList.append(flag)
if i[3] == astrand:
confList.append("F")
else:
confList.append("R")
# print("geneList",geneList,"flagList",flagList,"confList",confList)
resList = list(zip(geneList,flagList,confList))
# print("Final Results",resList)
# print("FinalRes:%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
if geneList:
fh_out.write("%s\t%s\t%s\t%s\n" % (trans,','.join( str(x) for x in geneList), ','.join(str(x) for x in flagList), ','.join(str(x) for x in confList) ))
else:
## There are no overlaps
|
fh_out.close()
fh_in.close()
print("Exiting function - overlapCheck\n")
return outFile
def gtfParser(gtf):
print("Function: gtfParser")
## file I/O
fh_in = open(gtf,'r')
gtfRead = fh_in.readlines()
parsedGTF = [] ## List to hold parsed GTF entries
for i in gtfRead:
if i[0].isdigit():
ent = i.split("\t")
if ent[2] == "gene":
# print(ent)
achr = ent[0]
gStart = ent[3]
gEnd = ent[4]
gStrand = ent[6]
info = ent[8].strip("\n").split(";")
# print(info,len(info))
if len(info) == 5:
## Protein coding gene with a version number
gid = info[0].split()[1].replace('"','')
gVer = info[1].split()[1].replace('"','')
gSource = info[2].split()[1].replace('"','')
gType = info[3].split()[1].replace('"','')
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
elif len(info) == 4:
## miRNA with no version number
gid = info[0].split()[1].replace('"','')
gVer = "1"
gSource = info[1].split()[1].replace('"','')
gType = info[2].split()[1].replace('"','')
parsedGTF.append((achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType))
else:
pass
print("First 10 entries of parsedGTF list: %s" % (parsedGTF[:10]))
print ("Exiting function\n")
return parsedGTF
def summParser(summary):
'''Create a a list of summary file'''
print("\nFunction: summParser")
fh_in = open(summary,'r')
if head == 'Y':
fh_in.readline()
summRead = fh_in.readlines()
geneSet = set()
# for i in summRead:
# ent = i.split("\t")
# agene = ent[gene-1]
# geneSet.add(agene)
parsedSumm = []
acount = 0 ## To count the entries
for i in summRead:
# print(i)
ent = i.split("\t") ## It has to be tab seprated file always
agene = ent[gene-1]
if agene not in geneSet:
## New entry add
print("Total entries scanned: %s | Length of GTF Dictionary %s" % (acount,len(summDict)))
print("Exiting function - summaryDict\n")
return parsedSumm
def tableMaker(parsedGTF,parsedSumm):
'''
Make a track specific sqlite DB for probe ID and coords that will be used
to query probes on 20MB interval. Each probe entry has following info:
probe_id,FC,pval,chr,start,end
'''
mergedInfo = parsedGTF + parsedSumm
print("Function: tableMaker")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
cur = conn.cursor()
cur.execute('''DROP TABLE IF EXISTS %s''' % (annotable)) ### Drop Old table - while testing
conn.commit()
try:
cur.execute('''CREATE TABLE %s (chr integer, start integer, end integer, strand varchar(10), geneName varchar(255), geneVersion varchar(255), geneSource varchar(255), geneType varchar(255) )''' % (annotable))
conn.commit()
### Fill the table
acount = 0 ## COunt of number of gene entries
for ent in mergedInfo:
gChr,gStart,gEnd,gStrand,gid,gVer,gSource,gType = ent
# print(achr,gStart,gEnd,gStrand,gid,gVer,gSource,gType)
cur.execute("INSERT INTO %s VALUES (%d,%d,%d,'%s','%s',%d,'%s','%s')" % (annotable, int(gChr), int(gStart), int(gEnd), str(gStrand), str(gid), int(gVer), str(gSource), str(gType) ))
acount +=1
except sqlite3.Error:
print('ERROR:',Error)
sys.exit()
cur.execute("SELECT * FROM %s LIMIT 10" % (annotable))
test = cur.fetchall()
print("First 10 entries:\n%s" % (test))
cur.execute("SELECT COUNT(*) FROM %s" % (annotable))
totalEnt = cur.fetchall()
print("\nTotal entries in table:%s | Total entries in file: %s" % (totalEnt[0][0],acount) )
conn.commit() ## Imporatnt to save table
print("Exiting function\n")
return annoDB,annotable,conn
def main():
if makeDB == 1:
parsedGTF = gtfParser(gtf)
annoDB,annotable,conn= tableMaker(parsedGTF)
elif makeDB == 0:
print("Existing annotation DB will be used, to make a new DB please turn on makeDB from settings")
annoDB = '%s.db' % (gtf.rpartition('.')[0])
annotable = "geneMaster"
conn = sqlite3.connect(annoDB)
else:
print("'makeDB' variable takes boolean values")
resFile = overlapCheck(summary,conn,annotable)
print("Overlap Check complete - see '%s' for results" % (resFile))
if __name__ == '__main__':
main()
## v01 <-13th July -15
| fh_out.write("%s\tNA\tNA\tNA\n" % (trans)) | conditional_block |
AntUtils.go | package utils
import (
"bytes"
"golang.org/x/net/html/atom"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/transform"
"io/ioutil"
)
func ConvertGBK(text1 string) string {
data, _ := ioutil.ReadAll(transform.NewReader(bytes.NewReader([]byte(text1)), simplifiedchinese.GBK.NewDecoder()))
text := string(data)
return text
}
func ConvertUTF8(text1 string) string {
data, _ := ioutil.ReadAll(transform.NewReader(bytes.NewReader([]byte(text1)), simplifiedchinese.GBK.NewEncoder()))
text := string(data)
return text
}
func GetAtomByString(a string) atom.Atom {
switch a {
case "A":
return atom.A
case "Abbr":
return atom.Abbr
case "Accept":
return atom.Accept
case "AcceptCharset":
return atom.AcceptCharset
case "Accesskey":
return atom.Accesskey
case "Action":
return atom.Action
case "Address":
return atom.Address
case "Align":
return atom.Align
case "Alt":
return atom.Alt
case "Annotation":
return atom.Annotation
case "AnnotationXml":
return atom.AnnotationXml
case "Applet":
return atom.Applet
case "Area":
return atom.Area
case "Article":
return atom.Article
case "Aside":
return atom.Aside
case "Async":
return atom.Async
case "Audio":
return atom.Audio
case "Autocomplete":
return atom.Autocomplete
}
return 0
/*
Autofocus Atom = 0xb309
Autoplay Atom = 0xce08
B Atom = 0x101
Base Atom = 0xd604
Basefont Atom = 0xd608
Bdi Atom = 0x1a03
Bdo Atom = 0xe703
Bgsound Atom = 0x11807
Big Atom = 0x12403
Blink Atom = 0x12705
Blockquote Atom = 0x12c0a
Body Atom = 0x2f04
Br Atom = 0x202
Button Atom = 0x13606
Canvas Atom = 0x7f06
Caption Atom = 0x1bb07
Center Atom = 0x5b506
Challenge Atom = 0x21f09
Charset Atom = 0x2807
Checked Atom = 0x32807
Cite Atom = 0x3c804 | Col Atom = 0x15003
Colgroup Atom = 0x15008
Color Atom = 0x15d05
Cols Atom = 0x16204
Colspan Atom = 0x16207
Command Atom = 0x17507
Content Atom = 0x42307
Contenteditable Atom = 0x4230f
Contextmenu Atom = 0x3310b
Controls Atom = 0x18808
Coords Atom = 0x19406
Crossorigin Atom = 0x19f0b
Data Atom = 0x44a04
Datalist Atom = 0x44a08
Datetime Atom = 0x23c08
Dd Atom = 0x26702
Default Atom = 0x8607
Defer Atom = 0x14b05
Del Atom = 0x3ef03
Desc Atom = 0x4db04
Details Atom = 0x4807
Dfn Atom = 0x6103
Dialog Atom = 0x1b06
Dir Atom = 0x6903
Dirname Atom = 0x6907
Disabled Atom = 0x10c08
Div Atom = 0x11303
Dl Atom = 0x11e02
Download Atom = 0x40008
Draggable Atom = 0x17b09
Dropzone Atom = 0x39108
Dt Atom = 0x50902
Em Atom = 0x6502
Embed Atom = 0x6505
Enctype Atom = 0x21107
Face Atom = 0x5b304
Fieldset Atom = 0x1b008
Figcaption Atom = 0x1b80a
Figure Atom = 0x1cc06
Font Atom = 0xda04
Footer Atom = 0x8d06
For Atom = 0x1d803
ForeignObject Atom = 0x1d80d
Foreignobject Atom = 0x1e50d
Form Atom = 0x1f204
Formaction Atom = 0x1f20a
Formenctype Atom = 0x20d0b
Formmethod Atom = 0x2280a
Formnovalidate Atom = 0x2320e
Formtarget Atom = 0x2470a
Frame Atom = 0x9a05
Frameset Atom = 0x9a08
H1 Atom = 0x26e02
H2 Atom = 0x29402
H3 Atom = 0x2a702
H4 Atom = 0x2e902
H5 Atom = 0x2f302
H6 Atom = 0x50b02
Head Atom = 0x2d504
Header Atom = 0x2d506
Headers Atom = 0x2d507
Height Atom = 0x25106
Hgroup Atom = 0x25906
Hidden Atom = 0x26506
High Atom = 0x26b04
Hr Atom = 0x27002
Href Atom = 0x27004
Hreflang Atom = 0x27008
Html Atom = 0x25504
HttpEquiv Atom = 0x2780a
I Atom = 0x601
Icon Atom = 0x42204
Id Atom = 0x8502
Iframe Atom = 0x29606
Image Atom = 0x29c05
Img Atom = 0x2a103
Input Atom = 0x3e805
Inputmode Atom = 0x3e809
Ins Atom = 0x1a803
Isindex Atom = 0x2a907
Ismap Atom = 0x2b005
Itemid Atom = 0x33c06
Itemprop Atom = 0x3c908
Itemref Atom = 0x5ad07
Itemscope Atom = 0x2b909
Itemtype Atom = 0x2c308
Kbd Atom = 0x1903
Keygen Atom = 0x3906
Keytype Atom = 0x53707
Kind Atom = 0x10904
Label Atom = 0xf005
Lang Atom = 0x27404
Legend Atom = 0x18206
Li Atom = 0x1202
Link Atom = 0x12804
List Atom = 0x44e04
Listing Atom = 0x44e07
Loop Atom = 0xf404
Low Atom = 0x11f03
Malignmark Atom = 0x100a
Manifest Atom = 0x5f108
Map Atom = 0x2b203
Mark Atom = 0x1604
Marquee Atom = 0x2cb07
Math Atom = 0x2d204
Max Atom = 0x2e103
Maxlength Atom = 0x2e109
Media Atom = 0x6e05
Mediagroup Atom = 0x6e0a
Menu Atom = 0x33804
Menuitem Atom = 0x33808
Meta Atom = 0x45d04
Meter Atom = 0x24205
Method Atom = 0x22c06
Mglyph Atom = 0x2a206
Mi Atom = 0x2eb02
Min Atom = 0x2eb03
Minlength Atom = 0x2eb09
Mn Atom = 0x23502
Mo Atom = 0x3ed02
Ms Atom = 0x2bc02
Mtext Atom = 0x2f505
Multiple Atom = 0x30308
Muted Atom = 0x30b05
Name Atom = 0x6c04
Nav Atom = 0x3e03
Nobr Atom = 0x5704
Noembed Atom = 0x6307
Noframes Atom = 0x9808
Noscript Atom = 0x3d208
Novalidate Atom = 0x2360a
Object Atom = 0x1ec06
Ol Atom = 0xc902
Onabort Atom = 0x13a07
Onafterprint Atom = 0x1c00c
Onautocomplete Atom = 0x1fa0e
Onautocompleteerror Atom = 0x1fa13
Onbeforeprint Atom = 0x6040d
Onbeforeunload Atom = 0x4e70e
Onblur Atom = 0xaa06
Oncancel Atom = 0xe908
Oncanplay Atom = 0x28509
Oncanplaythrough Atom = 0x28510
Onchange Atom = 0x3a708
Onclick Atom = 0x31007
Onclose Atom = 0x31707
Oncontextmenu Atom = 0x32f0d
Oncuechange Atom = 0x3420b
Ondblclick Atom = 0x34d0a
Ondrag Atom = 0x35706
Ondragend Atom = 0x35709
Ondragenter Atom = 0x3600b
Ondragleave Atom = 0x36b0b
Ondragover Atom = 0x3760a
Ondragstart Atom = 0x3800b
Ondrop Atom = 0x38f06
Ondurationchange Atom = 0x39f10
Onemptied Atom = 0x39609
Onended Atom = 0x3af07
Onerror Atom = 0x3b607
Onfocus Atom = 0x3bd07
Onhashchange Atom = 0x3da0c
Oninput Atom = 0x3e607
Oninvalid Atom = 0x3f209
Onkeydown Atom = 0x3fb09
Onkeypress Atom = 0x4080a
Onkeyup Atom = 0x41807
Onlanguagechange Atom = 0x43210
Onload Atom = 0x44206
Onloadeddata Atom = 0x4420c
Onloadedmetadata Atom = 0x45510
Onloadstart Atom = 0x46b0b
Onmessage Atom = 0x47609
Onmousedown Atom = 0x47f0b
Onmousemove Atom = 0x48a0b
Onmouseout Atom = 0x4950a
Onmouseover Atom = 0x4a20b
Onmouseup Atom = 0x4ad09
Onmousewheel Atom = 0x4b60c
Onoffline Atom = 0x4c209
Ononline Atom = 0x4cb08
Onpagehide Atom = 0x4d30a
Onpageshow Atom = 0x4fe0a
Onpause Atom = 0x50d07
Onplay Atom = 0x51706
Onplaying Atom = 0x51709
Onpopstate Atom = 0x5200a
Onprogress Atom = 0x52a0a
Onratechange Atom = 0x53e0c
Onreset Atom = 0x54a07
Onresize Atom = 0x55108
Onscroll Atom = 0x55f08
Onseeked Atom = 0x56708
Onseeking Atom = 0x56f09
Onselect Atom = 0x57808
Onshow Atom = 0x58206
Onsort Atom = 0x58b06
Onstalled Atom = 0x59509
Onstorage Atom = 0x59e09
Onsubmit Atom = 0x5a708
Onsuspend Atom = 0x5bb09
Ontimeupdate Atom = 0xdb0c
Ontoggle Atom = 0x5c408
Onunload Atom = 0x5cc08
Onvolumechange Atom = 0x5d40e
Onwaiting Atom = 0x5e209
Open Atom = 0x3cf04
Optgroup Atom = 0xf608
Optimum Atom = 0x5eb07
Option Atom = 0x60006
Output Atom = 0x49c06
P Atom = 0xc01
Param Atom = 0xc05
Pattern Atom = 0x5107
Ping Atom = 0x7704
Placeholder Atom = 0xc30b
Plaintext Atom = 0xfd09
Poster Atom = 0x15706
Pre Atom = 0x25e03
Preload Atom = 0x25e07
Progress Atom = 0x52c08
Prompt Atom = 0x5fa06
Public Atom = 0x41e06
Q Atom = 0x13101
Radiogroup Atom = 0x30a
Readonly Atom = 0x2fb08
Rel Atom = 0x25f03
Required Atom = 0x1d008
Reversed Atom = 0x5a08
Rows Atom = 0x9204
Rowspan Atom = 0x9207
Rp Atom = 0x1c602
Rt Atom = 0x13f02
Ruby Atom = 0xaf04
S Atom = 0x2c01
Samp Atom = 0x4e04
Sandbox Atom = 0xbb07
Scope Atom = 0x2bd05
Scoped Atom = 0x2bd06
Script Atom = 0x3d406
Seamless Atom = 0x31c08
Section Atom = 0x4e207
Select Atom = 0x57a06
Selected Atom = 0x57a08
Shape Atom = 0x4f905
Size Atom = 0x55504
Sizes Atom = 0x55505
Small Atom = 0x18f05
Sortable Atom = 0x58d08
Sorted Atom = 0x19906
Source Atom = 0x1aa06
Spacer Atom = 0x2db06
Span Atom = 0x9504
Spellcheck Atom = 0x3230a
Src Atom = 0x3c303
Srcdoc Atom = 0x3c306
Srclang Atom = 0x41107
Start Atom = 0x38605
Step Atom = 0x5f704
Strike Atom = 0x53306
Strong Atom = 0x55906
Style Atom = 0x61105
Sub Atom = 0x5a903
Summary Atom = 0x61607
Sup Atom = 0x61d03
Svg Atom = 0x62003
System Atom = 0x62306
Tabindex Atom = 0x46308
Table Atom = 0x42d05
Target Atom = 0x24b06
Tbody Atom = 0x2e05
Td Atom = 0x4702
Template Atom = 0x62608
Textarea Atom = 0x2f608
Tfoot Atom = 0x8c05
Th Atom = 0x22e02
Thead Atom = 0x2d405
Time Atom = 0xdd04
Title Atom = 0xa105
Tr Atom = 0x10502
Track Atom = 0x10505
Translate Atom = 0x14009
Tt Atom = 0x5302
Type Atom = 0x21404
Typemustmatch Atom = 0x2140d
U Atom = 0xb01
Ul Atom = 0x8a02
Usemap Atom = 0x51106
Value Atom = 0x4005
Var Atom = 0x11503
Video Atom = 0x28105
Wbr Atom = 0x12103
Width Atom = 0x50705
Wrap Atom = 0x58704
Xmp Atom = 0xc103*/
} | Class Atom = 0x4de05
Code Atom = 0x14904 | random_line_split |
AntUtils.go | package utils
import (
"bytes"
"golang.org/x/net/html/atom"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/transform"
"io/ioutil"
)
func ConvertGBK(text1 string) string |
func ConvertUTF8(text1 string) string {
data, _ := ioutil.ReadAll(transform.NewReader(bytes.NewReader([]byte(text1)), simplifiedchinese.GBK.NewEncoder()))
text := string(data)
return text
}
func GetAtomByString(a string) atom.Atom {
switch a {
case "A":
return atom.A
case "Abbr":
return atom.Abbr
case "Accept":
return atom.Accept
case "AcceptCharset":
return atom.AcceptCharset
case "Accesskey":
return atom.Accesskey
case "Action":
return atom.Action
case "Address":
return atom.Address
case "Align":
return atom.Align
case "Alt":
return atom.Alt
case "Annotation":
return atom.Annotation
case "AnnotationXml":
return atom.AnnotationXml
case "Applet":
return atom.Applet
case "Area":
return atom.Area
case "Article":
return atom.Article
case "Aside":
return atom.Aside
case "Async":
return atom.Async
case "Audio":
return atom.Audio
case "Autocomplete":
return atom.Autocomplete
}
return 0
/*
Autofocus Atom = 0xb309
Autoplay Atom = 0xce08
B Atom = 0x101
Base Atom = 0xd604
Basefont Atom = 0xd608
Bdi Atom = 0x1a03
Bdo Atom = 0xe703
Bgsound Atom = 0x11807
Big Atom = 0x12403
Blink Atom = 0x12705
Blockquote Atom = 0x12c0a
Body Atom = 0x2f04
Br Atom = 0x202
Button Atom = 0x13606
Canvas Atom = 0x7f06
Caption Atom = 0x1bb07
Center Atom = 0x5b506
Challenge Atom = 0x21f09
Charset Atom = 0x2807
Checked Atom = 0x32807
Cite Atom = 0x3c804
Class Atom = 0x4de05
Code Atom = 0x14904
Col Atom = 0x15003
Colgroup Atom = 0x15008
Color Atom = 0x15d05
Cols Atom = 0x16204
Colspan Atom = 0x16207
Command Atom = 0x17507
Content Atom = 0x42307
Contenteditable Atom = 0x4230f
Contextmenu Atom = 0x3310b
Controls Atom = 0x18808
Coords Atom = 0x19406
Crossorigin Atom = 0x19f0b
Data Atom = 0x44a04
Datalist Atom = 0x44a08
Datetime Atom = 0x23c08
Dd Atom = 0x26702
Default Atom = 0x8607
Defer Atom = 0x14b05
Del Atom = 0x3ef03
Desc Atom = 0x4db04
Details Atom = 0x4807
Dfn Atom = 0x6103
Dialog Atom = 0x1b06
Dir Atom = 0x6903
Dirname Atom = 0x6907
Disabled Atom = 0x10c08
Div Atom = 0x11303
Dl Atom = 0x11e02
Download Atom = 0x40008
Draggable Atom = 0x17b09
Dropzone Atom = 0x39108
Dt Atom = 0x50902
Em Atom = 0x6502
Embed Atom = 0x6505
Enctype Atom = 0x21107
Face Atom = 0x5b304
Fieldset Atom = 0x1b008
Figcaption Atom = 0x1b80a
Figure Atom = 0x1cc06
Font Atom = 0xda04
Footer Atom = 0x8d06
For Atom = 0x1d803
ForeignObject Atom = 0x1d80d
Foreignobject Atom = 0x1e50d
Form Atom = 0x1f204
Formaction Atom = 0x1f20a
Formenctype Atom = 0x20d0b
Formmethod Atom = 0x2280a
Formnovalidate Atom = 0x2320e
Formtarget Atom = 0x2470a
Frame Atom = 0x9a05
Frameset Atom = 0x9a08
H1 Atom = 0x26e02
H2 Atom = 0x29402
H3 Atom = 0x2a702
H4 Atom = 0x2e902
H5 Atom = 0x2f302
H6 Atom = 0x50b02
Head Atom = 0x2d504
Header Atom = 0x2d506
Headers Atom = 0x2d507
Height Atom = 0x25106
Hgroup Atom = 0x25906
Hidden Atom = 0x26506
High Atom = 0x26b04
Hr Atom = 0x27002
Href Atom = 0x27004
Hreflang Atom = 0x27008
Html Atom = 0x25504
HttpEquiv Atom = 0x2780a
I Atom = 0x601
Icon Atom = 0x42204
Id Atom = 0x8502
Iframe Atom = 0x29606
Image Atom = 0x29c05
Img Atom = 0x2a103
Input Atom = 0x3e805
Inputmode Atom = 0x3e809
Ins Atom = 0x1a803
Isindex Atom = 0x2a907
Ismap Atom = 0x2b005
Itemid Atom = 0x33c06
Itemprop Atom = 0x3c908
Itemref Atom = 0x5ad07
Itemscope Atom = 0x2b909
Itemtype Atom = 0x2c308
Kbd Atom = 0x1903
Keygen Atom = 0x3906
Keytype Atom = 0x53707
Kind Atom = 0x10904
Label Atom = 0xf005
Lang Atom = 0x27404
Legend Atom = 0x18206
Li Atom = 0x1202
Link Atom = 0x12804
List Atom = 0x44e04
Listing Atom = 0x44e07
Loop Atom = 0xf404
Low Atom = 0x11f03
Malignmark Atom = 0x100a
Manifest Atom = 0x5f108
Map Atom = 0x2b203
Mark Atom = 0x1604
Marquee Atom = 0x2cb07
Math Atom = 0x2d204
Max Atom = 0x2e103
Maxlength Atom = 0x2e109
Media Atom = 0x6e05
Mediagroup Atom = 0x6e0a
Menu Atom = 0x33804
Menuitem Atom = 0x33808
Meta Atom = 0x45d04
Meter Atom = 0x24205
Method Atom = 0x22c06
Mglyph Atom = 0x2a206
Mi Atom = 0x2eb02
Min Atom = 0x2eb03
Minlength Atom = 0x2eb09
Mn Atom = 0x23502
Mo Atom = 0x3ed02
Ms Atom = 0x2bc02
Mtext Atom = 0x2f505
Multiple Atom = 0x30308
Muted Atom = 0x30b05
Name Atom = 0x6c04
Nav Atom = 0x3e03
Nobr Atom = 0x5704
Noembed Atom = 0x6307
Noframes Atom = 0x9808
Noscript Atom = 0x3d208
Novalidate Atom = 0x2360a
Object Atom = 0x1ec06
Ol Atom = 0xc902
Onabort Atom = 0x13a07
Onafterprint Atom = 0x1c00c
Onautocomplete Atom = 0x1fa0e
Onautocompleteerror Atom = 0x1fa13
Onbeforeprint Atom = 0x6040d
Onbeforeunload Atom = 0x4e70e
Onblur Atom = 0xaa06
Oncancel Atom = 0xe908
Oncanplay Atom = 0x28509
Oncanplaythrough Atom = 0x28510
Onchange Atom = 0x3a708
Onclick Atom = 0x31007
Onclose Atom = 0x31707
Oncontextmenu Atom = 0x32f0d
Oncuechange Atom = 0x3420b
Ondblclick Atom = 0x34d0a
Ondrag Atom = 0x35706
Ondragend Atom = 0x35709
Ondragenter Atom = 0x3600b
Ondragleave Atom = 0x36b0b
Ondragover Atom = 0x3760a
Ondragstart Atom = 0x3800b
Ondrop Atom = 0x38f06
Ondurationchange Atom = 0x39f10
Onemptied Atom = 0x39609
Onended Atom = 0x3af07
Onerror Atom = 0x3b607
Onfocus Atom = 0x3bd07
Onhashchange Atom = 0x3da0c
Oninput Atom = 0x3e607
Oninvalid Atom = 0x3f209
Onkeydown Atom = 0x3fb09
Onkeypress Atom = 0x4080a
Onkeyup Atom = 0x41807
Onlanguagechange Atom = 0x43210
Onload Atom = 0x44206
Onloadeddata Atom = 0x4420c
Onloadedmetadata Atom = 0x45510
Onloadstart Atom = 0x46b0b
Onmessage Atom = 0x47609
Onmousedown Atom = 0x47f0b
Onmousemove Atom = 0x48a0b
Onmouseout Atom = 0x4950a
Onmouseover Atom = 0x4a20b
Onmouseup Atom = 0x4ad09
Onmousewheel Atom = 0x4b60c
Onoffline Atom = 0x4c209
Ononline Atom = 0x4cb08
Onpagehide Atom = 0x4d30a
Onpageshow Atom = 0x4fe0a
Onpause Atom = 0x50d07
Onplay Atom = 0x51706
Onplaying Atom = 0x51709
Onpopstate Atom = 0x5200a
Onprogress Atom = 0x52a0a
Onratechange Atom = 0x53e0c
Onreset Atom = 0x54a07
Onresize Atom = 0x55108
Onscroll Atom = 0x55f08
Onseeked Atom = 0x56708
Onseeking Atom = 0x56f09
Onselect Atom = 0x57808
Onshow Atom = 0x58206
Onsort Atom = 0x58b06
Onstalled Atom = 0x59509
Onstorage Atom = 0x59e09
Onsubmit Atom = 0x5a708
Onsuspend Atom = 0x5bb09
Ontimeupdate Atom = 0xdb0c
Ontoggle Atom = 0x5c408
Onunload Atom = 0x5cc08
Onvolumechange Atom = 0x5d40e
Onwaiting Atom = 0x5e209
Open Atom = 0x3cf04
Optgroup Atom = 0xf608
Optimum Atom = 0x5eb07
Option Atom = 0x60006
Output Atom = 0x49c06
P Atom = 0xc01
Param Atom = 0xc05
Pattern Atom = 0x5107
Ping Atom = 0x7704
Placeholder Atom = 0xc30b
Plaintext Atom = 0xfd09
Poster Atom = 0x15706
Pre Atom = 0x25e03
Preload Atom = 0x25e07
Progress Atom = 0x52c08
Prompt Atom = 0x5fa06
Public Atom = 0x41e06
Q Atom = 0x13101
Radiogroup Atom = 0x30a
Readonly Atom = 0x2fb08
Rel Atom = 0x25f03
Required Atom = 0x1d008
Reversed Atom = 0x5a08
Rows Atom = 0x9204
Rowspan Atom = 0x9207
Rp Atom = 0x1c602
Rt Atom = 0x13f02
Ruby Atom = 0xaf04
S Atom = 0x2c01
Samp Atom = 0x4e04
Sandbox Atom = 0xbb07
Scope Atom = 0x2bd05
Scoped Atom = 0x2bd06
Script Atom = 0x3d406
Seamless Atom = 0x31c08
Section Atom = 0x4e207
Select Atom = 0x57a06
Selected Atom = 0x57a08
Shape Atom = 0x4f905
Size Atom = 0x55504
Sizes Atom = 0x55505
Small Atom = 0x18f05
Sortable Atom = 0x58d08
Sorted Atom = 0x19906
Source Atom = 0x1aa06
Spacer Atom = 0x2db06
Span Atom = 0x9504
Spellcheck Atom = 0x3230a
Src Atom = 0x3c303
Srcdoc Atom = 0x3c306
Srclang Atom = 0x41107
Start Atom = 0x38605
Step Atom = 0x5f704
Strike Atom = 0x53306
Strong Atom = 0x55906
Style Atom = 0x61105
Sub Atom = 0x5a903
Summary Atom = 0x61607
Sup Atom = 0x61d03
Svg Atom = 0x62003
System Atom = 0x62306
Tabindex Atom = 0x46308
Table Atom = 0x42d05
Target Atom = 0x24b06
Tbody Atom = 0x2e05
Td Atom = 0x4702
Template Atom = 0x62608
Textarea Atom = 0x2f608
Tfoot Atom = 0x8c05
Th Atom = 0x22e02
Thead Atom = 0x2d405
Time Atom = 0xdd04
Title Atom = 0xa105
Tr Atom = 0x10502
Track Atom = 0x10505
Translate Atom = 0x14009
Tt Atom = 0x5302
Type Atom = 0x21404
Typemustmatch Atom = 0x2140d
U Atom = 0xb01
Ul Atom = 0x8a02
Usemap Atom = 0x51106
Value Atom = 0x4005
Var Atom = 0x11503
Video Atom = 0x28105
Wbr Atom = 0x12103
Width Atom = 0x50705
Wrap Atom = 0x58704
Xmp Atom = 0xc103*/
}
| {
data, _ := ioutil.ReadAll(transform.NewReader(bytes.NewReader([]byte(text1)), simplifiedchinese.GBK.NewDecoder()))
text := string(data)
return text
} | identifier_body |
AntUtils.go | package utils
import (
"bytes"
"golang.org/x/net/html/atom"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/transform"
"io/ioutil"
)
func ConvertGBK(text1 string) string {
data, _ := ioutil.ReadAll(transform.NewReader(bytes.NewReader([]byte(text1)), simplifiedchinese.GBK.NewDecoder()))
text := string(data)
return text
}
func | (text1 string) string {
data, _ := ioutil.ReadAll(transform.NewReader(bytes.NewReader([]byte(text1)), simplifiedchinese.GBK.NewEncoder()))
text := string(data)
return text
}
func GetAtomByString(a string) atom.Atom {
switch a {
case "A":
return atom.A
case "Abbr":
return atom.Abbr
case "Accept":
return atom.Accept
case "AcceptCharset":
return atom.AcceptCharset
case "Accesskey":
return atom.Accesskey
case "Action":
return atom.Action
case "Address":
return atom.Address
case "Align":
return atom.Align
case "Alt":
return atom.Alt
case "Annotation":
return atom.Annotation
case "AnnotationXml":
return atom.AnnotationXml
case "Applet":
return atom.Applet
case "Area":
return atom.Area
case "Article":
return atom.Article
case "Aside":
return atom.Aside
case "Async":
return atom.Async
case "Audio":
return atom.Audio
case "Autocomplete":
return atom.Autocomplete
}
return 0
/*
Autofocus Atom = 0xb309
Autoplay Atom = 0xce08
B Atom = 0x101
Base Atom = 0xd604
Basefont Atom = 0xd608
Bdi Atom = 0x1a03
Bdo Atom = 0xe703
Bgsound Atom = 0x11807
Big Atom = 0x12403
Blink Atom = 0x12705
Blockquote Atom = 0x12c0a
Body Atom = 0x2f04
Br Atom = 0x202
Button Atom = 0x13606
Canvas Atom = 0x7f06
Caption Atom = 0x1bb07
Center Atom = 0x5b506
Challenge Atom = 0x21f09
Charset Atom = 0x2807
Checked Atom = 0x32807
Cite Atom = 0x3c804
Class Atom = 0x4de05
Code Atom = 0x14904
Col Atom = 0x15003
Colgroup Atom = 0x15008
Color Atom = 0x15d05
Cols Atom = 0x16204
Colspan Atom = 0x16207
Command Atom = 0x17507
Content Atom = 0x42307
Contenteditable Atom = 0x4230f
Contextmenu Atom = 0x3310b
Controls Atom = 0x18808
Coords Atom = 0x19406
Crossorigin Atom = 0x19f0b
Data Atom = 0x44a04
Datalist Atom = 0x44a08
Datetime Atom = 0x23c08
Dd Atom = 0x26702
Default Atom = 0x8607
Defer Atom = 0x14b05
Del Atom = 0x3ef03
Desc Atom = 0x4db04
Details Atom = 0x4807
Dfn Atom = 0x6103
Dialog Atom = 0x1b06
Dir Atom = 0x6903
Dirname Atom = 0x6907
Disabled Atom = 0x10c08
Div Atom = 0x11303
Dl Atom = 0x11e02
Download Atom = 0x40008
Draggable Atom = 0x17b09
Dropzone Atom = 0x39108
Dt Atom = 0x50902
Em Atom = 0x6502
Embed Atom = 0x6505
Enctype Atom = 0x21107
Face Atom = 0x5b304
Fieldset Atom = 0x1b008
Figcaption Atom = 0x1b80a
Figure Atom = 0x1cc06
Font Atom = 0xda04
Footer Atom = 0x8d06
For Atom = 0x1d803
ForeignObject Atom = 0x1d80d
Foreignobject Atom = 0x1e50d
Form Atom = 0x1f204
Formaction Atom = 0x1f20a
Formenctype Atom = 0x20d0b
Formmethod Atom = 0x2280a
Formnovalidate Atom = 0x2320e
Formtarget Atom = 0x2470a
Frame Atom = 0x9a05
Frameset Atom = 0x9a08
H1 Atom = 0x26e02
H2 Atom = 0x29402
H3 Atom = 0x2a702
H4 Atom = 0x2e902
H5 Atom = 0x2f302
H6 Atom = 0x50b02
Head Atom = 0x2d504
Header Atom = 0x2d506
Headers Atom = 0x2d507
Height Atom = 0x25106
Hgroup Atom = 0x25906
Hidden Atom = 0x26506
High Atom = 0x26b04
Hr Atom = 0x27002
Href Atom = 0x27004
Hreflang Atom = 0x27008
Html Atom = 0x25504
HttpEquiv Atom = 0x2780a
I Atom = 0x601
Icon Atom = 0x42204
Id Atom = 0x8502
Iframe Atom = 0x29606
Image Atom = 0x29c05
Img Atom = 0x2a103
Input Atom = 0x3e805
Inputmode Atom = 0x3e809
Ins Atom = 0x1a803
Isindex Atom = 0x2a907
Ismap Atom = 0x2b005
Itemid Atom = 0x33c06
Itemprop Atom = 0x3c908
Itemref Atom = 0x5ad07
Itemscope Atom = 0x2b909
Itemtype Atom = 0x2c308
Kbd Atom = 0x1903
Keygen Atom = 0x3906
Keytype Atom = 0x53707
Kind Atom = 0x10904
Label Atom = 0xf005
Lang Atom = 0x27404
Legend Atom = 0x18206
Li Atom = 0x1202
Link Atom = 0x12804
List Atom = 0x44e04
Listing Atom = 0x44e07
Loop Atom = 0xf404
Low Atom = 0x11f03
Malignmark Atom = 0x100a
Manifest Atom = 0x5f108
Map Atom = 0x2b203
Mark Atom = 0x1604
Marquee Atom = 0x2cb07
Math Atom = 0x2d204
Max Atom = 0x2e103
Maxlength Atom = 0x2e109
Media Atom = 0x6e05
Mediagroup Atom = 0x6e0a
Menu Atom = 0x33804
Menuitem Atom = 0x33808
Meta Atom = 0x45d04
Meter Atom = 0x24205
Method Atom = 0x22c06
Mglyph Atom = 0x2a206
Mi Atom = 0x2eb02
Min Atom = 0x2eb03
Minlength Atom = 0x2eb09
Mn Atom = 0x23502
Mo Atom = 0x3ed02
Ms Atom = 0x2bc02
Mtext Atom = 0x2f505
Multiple Atom = 0x30308
Muted Atom = 0x30b05
Name Atom = 0x6c04
Nav Atom = 0x3e03
Nobr Atom = 0x5704
Noembed Atom = 0x6307
Noframes Atom = 0x9808
Noscript Atom = 0x3d208
Novalidate Atom = 0x2360a
Object Atom = 0x1ec06
Ol Atom = 0xc902
Onabort Atom = 0x13a07
Onafterprint Atom = 0x1c00c
Onautocomplete Atom = 0x1fa0e
Onautocompleteerror Atom = 0x1fa13
Onbeforeprint Atom = 0x6040d
Onbeforeunload Atom = 0x4e70e
Onblur Atom = 0xaa06
Oncancel Atom = 0xe908
Oncanplay Atom = 0x28509
Oncanplaythrough Atom = 0x28510
Onchange Atom = 0x3a708
Onclick Atom = 0x31007
Onclose Atom = 0x31707
Oncontextmenu Atom = 0x32f0d
Oncuechange Atom = 0x3420b
Ondblclick Atom = 0x34d0a
Ondrag Atom = 0x35706
Ondragend Atom = 0x35709
Ondragenter Atom = 0x3600b
Ondragleave Atom = 0x36b0b
Ondragover Atom = 0x3760a
Ondragstart Atom = 0x3800b
Ondrop Atom = 0x38f06
Ondurationchange Atom = 0x39f10
Onemptied Atom = 0x39609
Onended Atom = 0x3af07
Onerror Atom = 0x3b607
Onfocus Atom = 0x3bd07
Onhashchange Atom = 0x3da0c
Oninput Atom = 0x3e607
Oninvalid Atom = 0x3f209
Onkeydown Atom = 0x3fb09
Onkeypress Atom = 0x4080a
Onkeyup Atom = 0x41807
Onlanguagechange Atom = 0x43210
Onload Atom = 0x44206
Onloadeddata Atom = 0x4420c
Onloadedmetadata Atom = 0x45510
Onloadstart Atom = 0x46b0b
Onmessage Atom = 0x47609
Onmousedown Atom = 0x47f0b
Onmousemove Atom = 0x48a0b
Onmouseout Atom = 0x4950a
Onmouseover Atom = 0x4a20b
Onmouseup Atom = 0x4ad09
Onmousewheel Atom = 0x4b60c
Onoffline Atom = 0x4c209
Ononline Atom = 0x4cb08
Onpagehide Atom = 0x4d30a
Onpageshow Atom = 0x4fe0a
Onpause Atom = 0x50d07
Onplay Atom = 0x51706
Onplaying Atom = 0x51709
Onpopstate Atom = 0x5200a
Onprogress Atom = 0x52a0a
Onratechange Atom = 0x53e0c
Onreset Atom = 0x54a07
Onresize Atom = 0x55108
Onscroll Atom = 0x55f08
Onseeked Atom = 0x56708
Onseeking Atom = 0x56f09
Onselect Atom = 0x57808
Onshow Atom = 0x58206
Onsort Atom = 0x58b06
Onstalled Atom = 0x59509
Onstorage Atom = 0x59e09
Onsubmit Atom = 0x5a708
Onsuspend Atom = 0x5bb09
Ontimeupdate Atom = 0xdb0c
Ontoggle Atom = 0x5c408
Onunload Atom = 0x5cc08
Onvolumechange Atom = 0x5d40e
Onwaiting Atom = 0x5e209
Open Atom = 0x3cf04
Optgroup Atom = 0xf608
Optimum Atom = 0x5eb07
Option Atom = 0x60006
Output Atom = 0x49c06
P Atom = 0xc01
Param Atom = 0xc05
Pattern Atom = 0x5107
Ping Atom = 0x7704
Placeholder Atom = 0xc30b
Plaintext Atom = 0xfd09
Poster Atom = 0x15706
Pre Atom = 0x25e03
Preload Atom = 0x25e07
Progress Atom = 0x52c08
Prompt Atom = 0x5fa06
Public Atom = 0x41e06
Q Atom = 0x13101
Radiogroup Atom = 0x30a
Readonly Atom = 0x2fb08
Rel Atom = 0x25f03
Required Atom = 0x1d008
Reversed Atom = 0x5a08
Rows Atom = 0x9204
Rowspan Atom = 0x9207
Rp Atom = 0x1c602
Rt Atom = 0x13f02
Ruby Atom = 0xaf04
S Atom = 0x2c01
Samp Atom = 0x4e04
Sandbox Atom = 0xbb07
Scope Atom = 0x2bd05
Scoped Atom = 0x2bd06
Script Atom = 0x3d406
Seamless Atom = 0x31c08
Section Atom = 0x4e207
Select Atom = 0x57a06
Selected Atom = 0x57a08
Shape Atom = 0x4f905
Size Atom = 0x55504
Sizes Atom = 0x55505
Small Atom = 0x18f05
Sortable Atom = 0x58d08
Sorted Atom = 0x19906
Source Atom = 0x1aa06
Spacer Atom = 0x2db06
Span Atom = 0x9504
Spellcheck Atom = 0x3230a
Src Atom = 0x3c303
Srcdoc Atom = 0x3c306
Srclang Atom = 0x41107
Start Atom = 0x38605
Step Atom = 0x5f704
Strike Atom = 0x53306
Strong Atom = 0x55906
Style Atom = 0x61105
Sub Atom = 0x5a903
Summary Atom = 0x61607
Sup Atom = 0x61d03
Svg Atom = 0x62003
System Atom = 0x62306
Tabindex Atom = 0x46308
Table Atom = 0x42d05
Target Atom = 0x24b06
Tbody Atom = 0x2e05
Td Atom = 0x4702
Template Atom = 0x62608
Textarea Atom = 0x2f608
Tfoot Atom = 0x8c05
Th Atom = 0x22e02
Thead Atom = 0x2d405
Time Atom = 0xdd04
Title Atom = 0xa105
Tr Atom = 0x10502
Track Atom = 0x10505
Translate Atom = 0x14009
Tt Atom = 0x5302
Type Atom = 0x21404
Typemustmatch Atom = 0x2140d
U Atom = 0xb01
Ul Atom = 0x8a02
Usemap Atom = 0x51106
Value Atom = 0x4005
Var Atom = 0x11503
Video Atom = 0x28105
Wbr Atom = 0x12103
Width Atom = 0x50705
Wrap Atom = 0x58704
Xmp Atom = 0xc103*/
}
| ConvertUTF8 | identifier_name |
info_frames.py | """
AlbumInfo-related frames for the Album view.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Iterator, Collection, Any
from ds_tools.caching.decorators import cached_property
from tk_gui.elements import Element, HorizontalSeparator, Multiline, Text, Input, Image, Spacer
from tk_gui.elements.buttons import Button, EventButton as EButton
from tk_gui.elements.choices import ListBox, CheckBox, Combo
from tk_gui.elements.frame import InteractiveFrame, Frame, BasicRowFrame
from tk_gui.elements.menu import Menu, MenuItem
from tk_gui.elements.rating import Rating
from tk_gui.popups import pick_file_popup
from music.common.disco_entry import DiscoEntryType
from music.files import SongFile
from music.manager.update import TrackInfo, AlbumInfo
from ..utils import AlbumIdentifier, TrackIdentifier, get_album_info, get_album_dir, get_track_info, get_track_file
from .helpers import IText
from .images import AlbumCoverImageBuilder
from .list_box import EditableListBox
if TYPE_CHECKING:
from tk_gui.typing import Layout, Bool, XY
__all__ = ['AlbumInfoFrame', 'TrackInfoFrame']
log = logging.getLogger(__name__)
ValueEle = Text | Multiline | Rating | ListBox | Combo | EditableListBox | Input
LRG_FONT = ('Helvetica', 20)
class TagModMixin:
_tag_vals_and_eles: dict[str, tuple[Any, ValueEle]]
def _iter_changes(self) -> Iterator[tuple[str, ValueEle, Any, Any]]:
for key, (original_val, val_ele) in self._tag_vals_and_eles.items():
if (value := val_ele.value) != original_val:
yield key, val_ele, original_val, value
def reset_tag_values(self):
for key, val_ele, original_val, value in self._iter_changes():
match val_ele:
case ListBox() | EditableListBox():
val_ele.update(choices=original_val, replace=True, select=True)
case _: # Input() | Text() | CheckBox() | Combo() | Rating()
val_ele.update(original_val)
def get_modified(self) -> dict[str, tuple[Any, Any]]:
return {key: (original_val, value) for key, val_ele, original_val, value in self._iter_changes()}
class AlbumInfoFrame(TagModMixin, InteractiveFrame):
album_info: AlbumInfo
def __init__(self, album: AlbumIdentifier, cover_size: XY = (250, 250), **kwargs):
super().__init__(**kwargs)
self.album_info = get_album_info(album)
self.album_dir = get_album_dir(album)
self.cover_size = cover_size
self._tag_vals_and_eles = {}
# region Layout Generation
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield [self.cover_image_frame, TagFrame([*self.build_tag_rows()], disabled=self.disabled)]
yield [HorizontalSeparator()]
yield from self.build_buttons()
def build_meta_rows(self):
data = {'bitrate_str': set(), 'sample_rate_str': set(), 'bits_per_sample': set()}
for track in self.album_dir:
info = track.info
for key, values in data.items():
if value := info[key]:
values.add(str(value))
data = {key: ' / '.join(sorted(values)) for key, values in data.items()}
yield [
Text('Bitrate:'), IText(data['bitrate_str'], size=(18, 1)),
Text('Sample Rate:'), IText(data['sample_rate_str'], size=(18, 1)),
Text('Bit Depth:'), IText(data['bits_per_sample'], size=(18, 1)),
]
yield [HorizontalSeparator()]
def build_tag_rows(self):
tooltips = {
'name': 'The name that was / should be used for the album directory',
'parent': 'The name that was / should be used for the artist directory',
'singer': 'Solo singer of a group, when the album should be sorted under their group',
'solo_of_group': 'Whether the singer is a soloist',
}
disabled = self.disabled
for key, value in self.album_info.to_dict(skip={'tracks'}, genres_as_set=True).items():
if tooltip := tooltips.get(key):
kwargs = {'tooltip': tooltip}
else:
kwargs = {}
key_ele = label_ele(key, **kwargs)
if key == 'type':
types = [de.real_name for de in DiscoEntryType]
if value:
if isinstance(value, DiscoEntryType):
value = value.real_name
elif value not in types:
types.append(value)
val_ele = Combo(
types, value, size=(48, None), disabled=disabled, key=key, change_cb=self._update_numbered_type
)
elif key == 'genre':
val_ele = _genre_list_box(value, self.album_info, disabled, key=key)
elif key in {'mp4', 'solo_of_group'}:
kwargs['disabled'] = True if key == 'mp4' else disabled
val_ele = CheckBox('', default=value, pad=(0, 0), key=key, **kwargs)
else:
if key.startswith('wiki_'):
kwargs['link'] = True
elif key == 'number':
kwargs['change_cb'] = self._update_numbered_type
value = _normalize_input_value(value)
val_ele = Input(value, size=(50, 1), disabled=disabled, key=key, **kwargs)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [key_ele, val_ele]
@cached_property
def cover_image_frame(self) -> Frame:
class ImageMenu(Menu):
MenuItem('Replace', callback=self._replace_cover_image, enabled=lambda me: not self.disabled)
# TODO: Include get_wiki_cover_choice?
cover_builder = AlbumCoverImageBuilder(self.album_info, self.cover_size)
return cover_builder.make_thumbnail_frame(right_click_menu=ImageMenu())
# endregion
# region Layout Generation - Buttons
def build_buttons(self) -> Layout:
# These frames need to be in the same row for them to occupy the same space when visible
yield [self.view_buttons_frame, self.edit_buttons_frame]
@cached_property
def view_buttons_frame(self) -> Frame:
rows = [[BasicRowFrame(row, side='t')] for row in self._build_view_buttons()]
return Frame(rows, visible=self.disabled, side='t')
def _build_view_buttons(self) -> Iterator[list[Button]]: # noqa
kwargs = {'size': (18, 1), 'borderwidth': 3}
yield [
EButton('Clean & Add BPM', key='clean_and_add_bpm', **kwargs),
EButton('View All Tags', key='view_all_tags', **kwargs),
EButton('Edit', key='edit_album', **kwargs),
EButton('Wiki Update', key='wiki_update', **kwargs),
]
kwargs['size'] = (25, 1)
# TODO: Handle replacing inferior versions in real destination directory
yield [
# EButton('Sync Ratings Between Albums', key='sync_album_ratings', disabled=True, **kwargs),
EButton('Sort Into Library', key='sort_into_library', **kwargs),
# EButton('Copy Tags Between Albums', key='copy_album_tags', disabled=True, **kwargs),
]
yield [
EButton('Copy Tags To Album...', key='copy_src_album_tags', **kwargs),
EButton('Copy Tags From Album...', key='copy_dst_album_tags', **kwargs),
]
# TODO: Unify the above/below rows / shorten text / merge functionality with the sort view
yield [
EButton('Copy Tags To Lib Album...', key='copy_src_lib_album_tags', **kwargs),
EButton('Copy Tags From Lib Album...', key='copy_dst_lib_album_tags', **kwargs),
]
open_btn = EButton('\U0001f5c1', key='open', font=LRG_FONT, size=(10, 1), tooltip='Open Album', borderwidth=3)
album_dir = self.album_dir
# TODO: handle: music.files.exceptions.InvalidAlbumDir: Invalid album dir - contains directories
if len(album_dir.parent) > 1:
kwargs = dict(font=LRG_FONT, size=(5, 1), borderwidth=3)
yield [
EButton('\u2190', key='prev_dir', **kwargs) if album_dir.has_prev_sibling else Spacer(size=(90, 56)),
open_btn,
EButton('\u2192', key='next_dir', **kwargs) if album_dir.has_next_sibling else Spacer(size=(90, 56)),
]
else:
yield [open_btn]
@cached_property
def edit_buttons_frame(self) -> BasicRowFrame:
kwargs = {'size': (18, 1), 'borderwidth': 3}
row = [EButton('Review & Save Changes', key='save', **kwargs), EButton('Cancel', key='cancel', **kwargs)]
return BasicRowFrame(row, side='t', anchor='c', visible=not self.disabled)
# endregion
# region Event Handling
def enable(self):
|
def disable(self):
if self.disabled:
return
super().disable()
self.edit_buttons_frame.hide()
self.view_buttons_frame.show()
def _update_numbered_type(self, var_name, unknown, action):
# Registered as a change_cb for `type` and `number`
num_ele: Input = self._tag_vals_and_eles['number'][1]
value = ''
try:
value = num_ele.value.strip()
num_val = int(value)
except (TypeError, ValueError, AttributeError):
num_ele.validated(not value)
return
else:
num_ele.validated(True)
type_val = DiscoEntryType(self._tag_vals_and_eles['type'][1].value)
if type_val == DiscoEntryType.UNKNOWN:
return
num_type_ele: Input = self._tag_vals_and_eles['numbered_type'][1]
num_type_ele.update(type_val.format(num_val))
def _replace_cover_image(self, event=None):
if self.disabled:
return
if path := pick_file_popup(title='Pick new album cover'):
cover_path_ele: Input = self._tag_vals_and_eles['cover_path'][1]
cover_path_ele.update(path.as_posix())
image_ele: Image = self.cover_image_frame.rows[0].elements[0]
image_ele.image = path
# endregion
class TrackInfoFrame(TagModMixin, InteractiveFrame):
track_info: TrackInfo
song_file: SongFile
show_cover: Bool = False
def __init__(self, track: TrackIdentifier, **kwargs):
super().__init__(**kwargs)
self.track_info = get_track_info(track)
self.song_file = get_track_file(track)
self._tag_vals_and_eles = {}
@cached_property
def path_str(self) -> str:
return self.track_info.path.as_posix()
@cached_property
def file_name(self) -> str:
return self.track_info.path.name
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield from self.build_info_rows()
def build_meta_rows(self) -> Iterator[list[Element]]:
yield [Text('File:', size=(6, 1)), IText(self.file_name, size=(50, 1))]
sf = self.song_file
yield [
Text('Length:', size=(6, 1)), IText(sf.length_str, size=(10, 1)),
Text('Type:'), IText(sf.tag_version, size=(20, 1)),
]
def build_info_rows(self, keys: Collection[str] = None) -> Iterator[list[Element]]:
fields = ['artist', 'title', 'name', 'genre', 'disk', 'num', 'rating']
if keys:
fields = [f for f in fields if f not in keys]
track_info, disabled = self.track_info, self.disabled
for key in fields:
if key == 'genre':
value = track_info.genre_set.difference(track_info.album.genre_set)
val_ele = _genre_list_box(value, track_info, disabled)
elif key == 'rating':
if (value := track_info[key]) is None:
value = 0
val_ele = Rating(value, show_value=True, pad=(0, 0), disabled=disabled)
else:
value = _normalize_input_value(track_info[key])
val_ele = Input(value, size=(50, 1), disabled=disabled)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [label_ele(key, size=(6, 1)), val_ele]
def _genre_list_box(genres: Collection[str], info: TrackInfo | AlbumInfo, disabled: bool, **kwargs) -> EditableListBox:
kwargs.setdefault('add_title', 'Add genre')
kwargs.setdefault('add_prompt', f'Enter a new genre value to add to {info.title!r}')
kwargs.setdefault('list_width', 40)
return EditableListBox(sorted(genres), disabled=disabled, val_type=set, **kwargs)
def _normalize_input_value(value) -> str:
if value is None:
value = ''
elif not isinstance(value, str):
value = str(value)
return value
def label_ele(text: str, size: XY = (15, 1), **kwargs) -> Text:
return Text(text.replace('_', ' ').title(), size=size, **kwargs)
class TagFrame(InteractiveFrame):
def enable(self):
if not self.disabled:
return
for row in self.rows:
for ele in row.elements:
try:
if ele.key == 'mp4': # Read-only
continue
except AttributeError:
pass
try:
ele.enable() # noqa
except AttributeError:
pass
self.disabled = False
| if not self.disabled:
return
super().enable()
self.view_buttons_frame.hide()
self.edit_buttons_frame.show() | identifier_body |
info_frames.py | """
AlbumInfo-related frames for the Album view.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Iterator, Collection, Any
from ds_tools.caching.decorators import cached_property
from tk_gui.elements import Element, HorizontalSeparator, Multiline, Text, Input, Image, Spacer
from tk_gui.elements.buttons import Button, EventButton as EButton
from tk_gui.elements.choices import ListBox, CheckBox, Combo
from tk_gui.elements.frame import InteractiveFrame, Frame, BasicRowFrame
from tk_gui.elements.menu import Menu, MenuItem
from tk_gui.elements.rating import Rating
from tk_gui.popups import pick_file_popup
from music.common.disco_entry import DiscoEntryType
from music.files import SongFile
from music.manager.update import TrackInfo, AlbumInfo
from ..utils import AlbumIdentifier, TrackIdentifier, get_album_info, get_album_dir, get_track_info, get_track_file
from .helpers import IText
from .images import AlbumCoverImageBuilder
from .list_box import EditableListBox
if TYPE_CHECKING:
from tk_gui.typing import Layout, Bool, XY
__all__ = ['AlbumInfoFrame', 'TrackInfoFrame']
log = logging.getLogger(__name__)
ValueEle = Text | Multiline | Rating | ListBox | Combo | EditableListBox | Input
LRG_FONT = ('Helvetica', 20)
class TagModMixin:
_tag_vals_and_eles: dict[str, tuple[Any, ValueEle]]
def _iter_changes(self) -> Iterator[tuple[str, ValueEle, Any, Any]]:
for key, (original_val, val_ele) in self._tag_vals_and_eles.items():
if (value := val_ele.value) != original_val:
yield key, val_ele, original_val, value
def reset_tag_values(self):
for key, val_ele, original_val, value in self._iter_changes():
match val_ele:
case ListBox() | EditableListBox():
val_ele.update(choices=original_val, replace=True, select=True)
case _: # Input() | Text() | CheckBox() | Combo() | Rating()
val_ele.update(original_val)
def get_modified(self) -> dict[str, tuple[Any, Any]]:
return {key: (original_val, value) for key, val_ele, original_val, value in self._iter_changes()}
class AlbumInfoFrame(TagModMixin, InteractiveFrame):
album_info: AlbumInfo
def __init__(self, album: AlbumIdentifier, cover_size: XY = (250, 250), **kwargs):
super().__init__(**kwargs)
self.album_info = get_album_info(album)
self.album_dir = get_album_dir(album)
self.cover_size = cover_size
self._tag_vals_and_eles = {}
# region Layout Generation
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield [self.cover_image_frame, TagFrame([*self.build_tag_rows()], disabled=self.disabled)]
yield [HorizontalSeparator()]
yield from self.build_buttons()
def build_meta_rows(self):
data = {'bitrate_str': set(), 'sample_rate_str': set(), 'bits_per_sample': set()}
for track in self.album_dir:
info = track.info
for key, values in data.items():
if value := info[key]:
values.add(str(value))
data = {key: ' / '.join(sorted(values)) for key, values in data.items()}
yield [
Text('Bitrate:'), IText(data['bitrate_str'], size=(18, 1)),
Text('Sample Rate:'), IText(data['sample_rate_str'], size=(18, 1)),
Text('Bit Depth:'), IText(data['bits_per_sample'], size=(18, 1)),
]
yield [HorizontalSeparator()]
def build_tag_rows(self):
tooltips = {
'name': 'The name that was / should be used for the album directory',
'parent': 'The name that was / should be used for the artist directory',
'singer': 'Solo singer of a group, when the album should be sorted under their group',
'solo_of_group': 'Whether the singer is a soloist',
}
disabled = self.disabled
for key, value in self.album_info.to_dict(skip={'tracks'}, genres_as_set=True).items():
if tooltip := tooltips.get(key):
kwargs = {'tooltip': tooltip}
else:
kwargs = {}
key_ele = label_ele(key, **kwargs)
if key == 'type':
types = [de.real_name for de in DiscoEntryType]
if value:
if isinstance(value, DiscoEntryType):
value = value.real_name
elif value not in types:
types.append(value)
val_ele = Combo(
types, value, size=(48, None), disabled=disabled, key=key, change_cb=self._update_numbered_type
)
elif key == 'genre':
val_ele = _genre_list_box(value, self.album_info, disabled, key=key)
elif key in {'mp4', 'solo_of_group'}:
kwargs['disabled'] = True if key == 'mp4' else disabled
val_ele = CheckBox('', default=value, pad=(0, 0), key=key, **kwargs)
else:
if key.startswith('wiki_'):
kwargs['link'] = True
elif key == 'number':
kwargs['change_cb'] = self._update_numbered_type
value = _normalize_input_value(value)
val_ele = Input(value, size=(50, 1), disabled=disabled, key=key, **kwargs)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [key_ele, val_ele]
@cached_property
def cover_image_frame(self) -> Frame:
class ImageMenu(Menu):
MenuItem('Replace', callback=self._replace_cover_image, enabled=lambda me: not self.disabled)
# TODO: Include get_wiki_cover_choice?
cover_builder = AlbumCoverImageBuilder(self.album_info, self.cover_size)
return cover_builder.make_thumbnail_frame(right_click_menu=ImageMenu())
# endregion
# region Layout Generation - Buttons
def build_buttons(self) -> Layout:
# These frames need to be in the same row for them to occupy the same space when visible
yield [self.view_buttons_frame, self.edit_buttons_frame]
@cached_property
def view_buttons_frame(self) -> Frame:
rows = [[BasicRowFrame(row, side='t')] for row in self._build_view_buttons()]
return Frame(rows, visible=self.disabled, side='t')
def _build_view_buttons(self) -> Iterator[list[Button]]: # noqa
kwargs = {'size': (18, 1), 'borderwidth': 3}
yield [
EButton('Clean & Add BPM', key='clean_and_add_bpm', **kwargs),
EButton('View All Tags', key='view_all_tags', **kwargs),
EButton('Edit', key='edit_album', **kwargs),
EButton('Wiki Update', key='wiki_update', **kwargs),
]
kwargs['size'] = (25, 1)
# TODO: Handle replacing inferior versions in real destination directory
yield [
# EButton('Sync Ratings Between Albums', key='sync_album_ratings', disabled=True, **kwargs),
EButton('Sort Into Library', key='sort_into_library', **kwargs),
# EButton('Copy Tags Between Albums', key='copy_album_tags', disabled=True, **kwargs),
]
yield [
EButton('Copy Tags To Album...', key='copy_src_album_tags', **kwargs),
EButton('Copy Tags From Album...', key='copy_dst_album_tags', **kwargs),
]
# TODO: Unify the above/below rows / shorten text / merge functionality with the sort view
yield [
EButton('Copy Tags To Lib Album...', key='copy_src_lib_album_tags', **kwargs),
EButton('Copy Tags From Lib Album...', key='copy_dst_lib_album_tags', **kwargs),
]
open_btn = EButton('\U0001f5c1', key='open', font=LRG_FONT, size=(10, 1), tooltip='Open Album', borderwidth=3)
album_dir = self.album_dir
# TODO: handle: music.files.exceptions.InvalidAlbumDir: Invalid album dir - contains directories
if len(album_dir.parent) > 1:
kwargs = dict(font=LRG_FONT, size=(5, 1), borderwidth=3)
yield [
EButton('\u2190', key='prev_dir', **kwargs) if album_dir.has_prev_sibling else Spacer(size=(90, 56)),
open_btn,
EButton('\u2192', key='next_dir', **kwargs) if album_dir.has_next_sibling else Spacer(size=(90, 56)),
]
else:
yield [open_btn]
@cached_property
def edit_buttons_frame(self) -> BasicRowFrame:
kwargs = {'size': (18, 1), 'borderwidth': 3}
row = [EButton('Review & Save Changes', key='save', **kwargs), EButton('Cancel', key='cancel', **kwargs)]
return BasicRowFrame(row, side='t', anchor='c', visible=not self.disabled)
# endregion
# region Event Handling
def | (self):
if not self.disabled:
return
super().enable()
self.view_buttons_frame.hide()
self.edit_buttons_frame.show()
def disable(self):
if self.disabled:
return
super().disable()
self.edit_buttons_frame.hide()
self.view_buttons_frame.show()
def _update_numbered_type(self, var_name, unknown, action):
# Registered as a change_cb for `type` and `number`
num_ele: Input = self._tag_vals_and_eles['number'][1]
value = ''
try:
value = num_ele.value.strip()
num_val = int(value)
except (TypeError, ValueError, AttributeError):
num_ele.validated(not value)
return
else:
num_ele.validated(True)
type_val = DiscoEntryType(self._tag_vals_and_eles['type'][1].value)
if type_val == DiscoEntryType.UNKNOWN:
return
num_type_ele: Input = self._tag_vals_and_eles['numbered_type'][1]
num_type_ele.update(type_val.format(num_val))
def _replace_cover_image(self, event=None):
if self.disabled:
return
if path := pick_file_popup(title='Pick new album cover'):
cover_path_ele: Input = self._tag_vals_and_eles['cover_path'][1]
cover_path_ele.update(path.as_posix())
image_ele: Image = self.cover_image_frame.rows[0].elements[0]
image_ele.image = path
# endregion
class TrackInfoFrame(TagModMixin, InteractiveFrame):
track_info: TrackInfo
song_file: SongFile
show_cover: Bool = False
def __init__(self, track: TrackIdentifier, **kwargs):
super().__init__(**kwargs)
self.track_info = get_track_info(track)
self.song_file = get_track_file(track)
self._tag_vals_and_eles = {}
@cached_property
def path_str(self) -> str:
return self.track_info.path.as_posix()
@cached_property
def file_name(self) -> str:
return self.track_info.path.name
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield from self.build_info_rows()
def build_meta_rows(self) -> Iterator[list[Element]]:
yield [Text('File:', size=(6, 1)), IText(self.file_name, size=(50, 1))]
sf = self.song_file
yield [
Text('Length:', size=(6, 1)), IText(sf.length_str, size=(10, 1)),
Text('Type:'), IText(sf.tag_version, size=(20, 1)),
]
def build_info_rows(self, keys: Collection[str] = None) -> Iterator[list[Element]]:
fields = ['artist', 'title', 'name', 'genre', 'disk', 'num', 'rating']
if keys:
fields = [f for f in fields if f not in keys]
track_info, disabled = self.track_info, self.disabled
for key in fields:
if key == 'genre':
value = track_info.genre_set.difference(track_info.album.genre_set)
val_ele = _genre_list_box(value, track_info, disabled)
elif key == 'rating':
if (value := track_info[key]) is None:
value = 0
val_ele = Rating(value, show_value=True, pad=(0, 0), disabled=disabled)
else:
value = _normalize_input_value(track_info[key])
val_ele = Input(value, size=(50, 1), disabled=disabled)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [label_ele(key, size=(6, 1)), val_ele]
def _genre_list_box(genres: Collection[str], info: TrackInfo | AlbumInfo, disabled: bool, **kwargs) -> EditableListBox:
kwargs.setdefault('add_title', 'Add genre')
kwargs.setdefault('add_prompt', f'Enter a new genre value to add to {info.title!r}')
kwargs.setdefault('list_width', 40)
return EditableListBox(sorted(genres), disabled=disabled, val_type=set, **kwargs)
def _normalize_input_value(value) -> str:
if value is None:
value = ''
elif not isinstance(value, str):
value = str(value)
return value
def label_ele(text: str, size: XY = (15, 1), **kwargs) -> Text:
return Text(text.replace('_', ' ').title(), size=size, **kwargs)
class TagFrame(InteractiveFrame):
def enable(self):
if not self.disabled:
return
for row in self.rows:
for ele in row.elements:
try:
if ele.key == 'mp4': # Read-only
continue
except AttributeError:
pass
try:
ele.enable() # noqa
except AttributeError:
pass
self.disabled = False
| enable | identifier_name |
info_frames.py | """
AlbumInfo-related frames for the Album view.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Iterator, Collection, Any
from ds_tools.caching.decorators import cached_property
from tk_gui.elements import Element, HorizontalSeparator, Multiline, Text, Input, Image, Spacer
from tk_gui.elements.buttons import Button, EventButton as EButton
from tk_gui.elements.choices import ListBox, CheckBox, Combo
from tk_gui.elements.frame import InteractiveFrame, Frame, BasicRowFrame
from tk_gui.elements.menu import Menu, MenuItem
from tk_gui.elements.rating import Rating
from tk_gui.popups import pick_file_popup
from music.common.disco_entry import DiscoEntryType
from music.files import SongFile
from music.manager.update import TrackInfo, AlbumInfo
from ..utils import AlbumIdentifier, TrackIdentifier, get_album_info, get_album_dir, get_track_info, get_track_file
from .helpers import IText
from .images import AlbumCoverImageBuilder
from .list_box import EditableListBox
if TYPE_CHECKING:
from tk_gui.typing import Layout, Bool, XY
__all__ = ['AlbumInfoFrame', 'TrackInfoFrame']
log = logging.getLogger(__name__)
ValueEle = Text | Multiline | Rating | ListBox | Combo | EditableListBox | Input
LRG_FONT = ('Helvetica', 20)
class TagModMixin:
_tag_vals_and_eles: dict[str, tuple[Any, ValueEle]]
def _iter_changes(self) -> Iterator[tuple[str, ValueEle, Any, Any]]:
for key, (original_val, val_ele) in self._tag_vals_and_eles.items():
if (value := val_ele.value) != original_val:
yield key, val_ele, original_val, value
def reset_tag_values(self):
for key, val_ele, original_val, value in self._iter_changes():
match val_ele:
case ListBox() | EditableListBox():
val_ele.update(choices=original_val, replace=True, select=True)
case _: # Input() | Text() | CheckBox() | Combo() | Rating()
val_ele.update(original_val)
def get_modified(self) -> dict[str, tuple[Any, Any]]:
return {key: (original_val, value) for key, val_ele, original_val, value in self._iter_changes()}
class AlbumInfoFrame(TagModMixin, InteractiveFrame):
album_info: AlbumInfo
def __init__(self, album: AlbumIdentifier, cover_size: XY = (250, 250), **kwargs):
super().__init__(**kwargs)
self.album_info = get_album_info(album)
self.album_dir = get_album_dir(album)
self.cover_size = cover_size
self._tag_vals_and_eles = {}
# region Layout Generation
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield [self.cover_image_frame, TagFrame([*self.build_tag_rows()], disabled=self.disabled)]
yield [HorizontalSeparator()]
yield from self.build_buttons()
def build_meta_rows(self):
data = {'bitrate_str': set(), 'sample_rate_str': set(), 'bits_per_sample': set()}
for track in self.album_dir:
info = track.info
for key, values in data.items():
if value := info[key]:
values.add(str(value))
data = {key: ' / '.join(sorted(values)) for key, values in data.items()}
yield [
Text('Bitrate:'), IText(data['bitrate_str'], size=(18, 1)),
Text('Sample Rate:'), IText(data['sample_rate_str'], size=(18, 1)),
Text('Bit Depth:'), IText(data['bits_per_sample'], size=(18, 1)),
]
yield [HorizontalSeparator()]
def build_tag_rows(self):
tooltips = {
'name': 'The name that was / should be used for the album directory',
'parent': 'The name that was / should be used for the artist directory',
'singer': 'Solo singer of a group, when the album should be sorted under their group',
'solo_of_group': 'Whether the singer is a soloist',
}
disabled = self.disabled
for key, value in self.album_info.to_dict(skip={'tracks'}, genres_as_set=True).items():
if tooltip := tooltips.get(key):
kwargs = {'tooltip': tooltip}
else:
kwargs = {}
key_ele = label_ele(key, **kwargs)
if key == 'type':
types = [de.real_name for de in DiscoEntryType]
if value:
if isinstance(value, DiscoEntryType):
value = value.real_name
elif value not in types: | val_ele = _genre_list_box(value, self.album_info, disabled, key=key)
elif key in {'mp4', 'solo_of_group'}:
kwargs['disabled'] = True if key == 'mp4' else disabled
val_ele = CheckBox('', default=value, pad=(0, 0), key=key, **kwargs)
else:
if key.startswith('wiki_'):
kwargs['link'] = True
elif key == 'number':
kwargs['change_cb'] = self._update_numbered_type
value = _normalize_input_value(value)
val_ele = Input(value, size=(50, 1), disabled=disabled, key=key, **kwargs)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [key_ele, val_ele]
@cached_property
def cover_image_frame(self) -> Frame:
class ImageMenu(Menu):
MenuItem('Replace', callback=self._replace_cover_image, enabled=lambda me: not self.disabled)
# TODO: Include get_wiki_cover_choice?
cover_builder = AlbumCoverImageBuilder(self.album_info, self.cover_size)
return cover_builder.make_thumbnail_frame(right_click_menu=ImageMenu())
# endregion
# region Layout Generation - Buttons
def build_buttons(self) -> Layout:
# These frames need to be in the same row for them to occupy the same space when visible
yield [self.view_buttons_frame, self.edit_buttons_frame]
@cached_property
def view_buttons_frame(self) -> Frame:
rows = [[BasicRowFrame(row, side='t')] for row in self._build_view_buttons()]
return Frame(rows, visible=self.disabled, side='t')
def _build_view_buttons(self) -> Iterator[list[Button]]: # noqa
kwargs = {'size': (18, 1), 'borderwidth': 3}
yield [
EButton('Clean & Add BPM', key='clean_and_add_bpm', **kwargs),
EButton('View All Tags', key='view_all_tags', **kwargs),
EButton('Edit', key='edit_album', **kwargs),
EButton('Wiki Update', key='wiki_update', **kwargs),
]
kwargs['size'] = (25, 1)
# TODO: Handle replacing inferior versions in real destination directory
yield [
# EButton('Sync Ratings Between Albums', key='sync_album_ratings', disabled=True, **kwargs),
EButton('Sort Into Library', key='sort_into_library', **kwargs),
# EButton('Copy Tags Between Albums', key='copy_album_tags', disabled=True, **kwargs),
]
yield [
EButton('Copy Tags To Album...', key='copy_src_album_tags', **kwargs),
EButton('Copy Tags From Album...', key='copy_dst_album_tags', **kwargs),
]
# TODO: Unify the above/below rows / shorten text / merge functionality with the sort view
yield [
EButton('Copy Tags To Lib Album...', key='copy_src_lib_album_tags', **kwargs),
EButton('Copy Tags From Lib Album...', key='copy_dst_lib_album_tags', **kwargs),
]
open_btn = EButton('\U0001f5c1', key='open', font=LRG_FONT, size=(10, 1), tooltip='Open Album', borderwidth=3)
album_dir = self.album_dir
# TODO: handle: music.files.exceptions.InvalidAlbumDir: Invalid album dir - contains directories
if len(album_dir.parent) > 1:
kwargs = dict(font=LRG_FONT, size=(5, 1), borderwidth=3)
yield [
EButton('\u2190', key='prev_dir', **kwargs) if album_dir.has_prev_sibling else Spacer(size=(90, 56)),
open_btn,
EButton('\u2192', key='next_dir', **kwargs) if album_dir.has_next_sibling else Spacer(size=(90, 56)),
]
else:
yield [open_btn]
@cached_property
def edit_buttons_frame(self) -> BasicRowFrame:
kwargs = {'size': (18, 1), 'borderwidth': 3}
row = [EButton('Review & Save Changes', key='save', **kwargs), EButton('Cancel', key='cancel', **kwargs)]
return BasicRowFrame(row, side='t', anchor='c', visible=not self.disabled)
# endregion
# region Event Handling
def enable(self):
if not self.disabled:
return
super().enable()
self.view_buttons_frame.hide()
self.edit_buttons_frame.show()
def disable(self):
if self.disabled:
return
super().disable()
self.edit_buttons_frame.hide()
self.view_buttons_frame.show()
def _update_numbered_type(self, var_name, unknown, action):
# Registered as a change_cb for `type` and `number`
num_ele: Input = self._tag_vals_and_eles['number'][1]
value = ''
try:
value = num_ele.value.strip()
num_val = int(value)
except (TypeError, ValueError, AttributeError):
num_ele.validated(not value)
return
else:
num_ele.validated(True)
type_val = DiscoEntryType(self._tag_vals_and_eles['type'][1].value)
if type_val == DiscoEntryType.UNKNOWN:
return
num_type_ele: Input = self._tag_vals_and_eles['numbered_type'][1]
num_type_ele.update(type_val.format(num_val))
def _replace_cover_image(self, event=None):
if self.disabled:
return
if path := pick_file_popup(title='Pick new album cover'):
cover_path_ele: Input = self._tag_vals_and_eles['cover_path'][1]
cover_path_ele.update(path.as_posix())
image_ele: Image = self.cover_image_frame.rows[0].elements[0]
image_ele.image = path
# endregion
class TrackInfoFrame(TagModMixin, InteractiveFrame):
track_info: TrackInfo
song_file: SongFile
show_cover: Bool = False
def __init__(self, track: TrackIdentifier, **kwargs):
super().__init__(**kwargs)
self.track_info = get_track_info(track)
self.song_file = get_track_file(track)
self._tag_vals_and_eles = {}
@cached_property
def path_str(self) -> str:
return self.track_info.path.as_posix()
@cached_property
def file_name(self) -> str:
return self.track_info.path.name
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield from self.build_info_rows()
def build_meta_rows(self) -> Iterator[list[Element]]:
yield [Text('File:', size=(6, 1)), IText(self.file_name, size=(50, 1))]
sf = self.song_file
yield [
Text('Length:', size=(6, 1)), IText(sf.length_str, size=(10, 1)),
Text('Type:'), IText(sf.tag_version, size=(20, 1)),
]
def build_info_rows(self, keys: Collection[str] = None) -> Iterator[list[Element]]:
fields = ['artist', 'title', 'name', 'genre', 'disk', 'num', 'rating']
if keys:
fields = [f for f in fields if f not in keys]
track_info, disabled = self.track_info, self.disabled
for key in fields:
if key == 'genre':
value = track_info.genre_set.difference(track_info.album.genre_set)
val_ele = _genre_list_box(value, track_info, disabled)
elif key == 'rating':
if (value := track_info[key]) is None:
value = 0
val_ele = Rating(value, show_value=True, pad=(0, 0), disabled=disabled)
else:
value = _normalize_input_value(track_info[key])
val_ele = Input(value, size=(50, 1), disabled=disabled)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [label_ele(key, size=(6, 1)), val_ele]
def _genre_list_box(genres: Collection[str], info: TrackInfo | AlbumInfo, disabled: bool, **kwargs) -> EditableListBox:
kwargs.setdefault('add_title', 'Add genre')
kwargs.setdefault('add_prompt', f'Enter a new genre value to add to {info.title!r}')
kwargs.setdefault('list_width', 40)
return EditableListBox(sorted(genres), disabled=disabled, val_type=set, **kwargs)
def _normalize_input_value(value) -> str:
if value is None:
value = ''
elif not isinstance(value, str):
value = str(value)
return value
def label_ele(text: str, size: XY = (15, 1), **kwargs) -> Text:
return Text(text.replace('_', ' ').title(), size=size, **kwargs)
class TagFrame(InteractiveFrame):
def enable(self):
if not self.disabled:
return
for row in self.rows:
for ele in row.elements:
try:
if ele.key == 'mp4': # Read-only
continue
except AttributeError:
pass
try:
ele.enable() # noqa
except AttributeError:
pass
self.disabled = False | types.append(value)
val_ele = Combo(
types, value, size=(48, None), disabled=disabled, key=key, change_cb=self._update_numbered_type
)
elif key == 'genre': | random_line_split |
info_frames.py | """
AlbumInfo-related frames for the Album view.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Iterator, Collection, Any
from ds_tools.caching.decorators import cached_property
from tk_gui.elements import Element, HorizontalSeparator, Multiline, Text, Input, Image, Spacer
from tk_gui.elements.buttons import Button, EventButton as EButton
from tk_gui.elements.choices import ListBox, CheckBox, Combo
from tk_gui.elements.frame import InteractiveFrame, Frame, BasicRowFrame
from tk_gui.elements.menu import Menu, MenuItem
from tk_gui.elements.rating import Rating
from tk_gui.popups import pick_file_popup
from music.common.disco_entry import DiscoEntryType
from music.files import SongFile
from music.manager.update import TrackInfo, AlbumInfo
from ..utils import AlbumIdentifier, TrackIdentifier, get_album_info, get_album_dir, get_track_info, get_track_file
from .helpers import IText
from .images import AlbumCoverImageBuilder
from .list_box import EditableListBox
if TYPE_CHECKING:
from tk_gui.typing import Layout, Bool, XY
__all__ = ['AlbumInfoFrame', 'TrackInfoFrame']
log = logging.getLogger(__name__)
ValueEle = Text | Multiline | Rating | ListBox | Combo | EditableListBox | Input
LRG_FONT = ('Helvetica', 20)
class TagModMixin:
_tag_vals_and_eles: dict[str, tuple[Any, ValueEle]]
def _iter_changes(self) -> Iterator[tuple[str, ValueEle, Any, Any]]:
for key, (original_val, val_ele) in self._tag_vals_and_eles.items():
if (value := val_ele.value) != original_val:
yield key, val_ele, original_val, value
def reset_tag_values(self):
for key, val_ele, original_val, value in self._iter_changes():
match val_ele:
case ListBox() | EditableListBox():
val_ele.update(choices=original_val, replace=True, select=True)
case _: # Input() | Text() | CheckBox() | Combo() | Rating()
val_ele.update(original_val)
def get_modified(self) -> dict[str, tuple[Any, Any]]:
return {key: (original_val, value) for key, val_ele, original_val, value in self._iter_changes()}
class AlbumInfoFrame(TagModMixin, InteractiveFrame):
album_info: AlbumInfo
def __init__(self, album: AlbumIdentifier, cover_size: XY = (250, 250), **kwargs):
super().__init__(**kwargs)
self.album_info = get_album_info(album)
self.album_dir = get_album_dir(album)
self.cover_size = cover_size
self._tag_vals_and_eles = {}
# region Layout Generation
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield [self.cover_image_frame, TagFrame([*self.build_tag_rows()], disabled=self.disabled)]
yield [HorizontalSeparator()]
yield from self.build_buttons()
def build_meta_rows(self):
data = {'bitrate_str': set(), 'sample_rate_str': set(), 'bits_per_sample': set()}
for track in self.album_dir:
info = track.info
for key, values in data.items():
if value := info[key]:
values.add(str(value))
data = {key: ' / '.join(sorted(values)) for key, values in data.items()}
yield [
Text('Bitrate:'), IText(data['bitrate_str'], size=(18, 1)),
Text('Sample Rate:'), IText(data['sample_rate_str'], size=(18, 1)),
Text('Bit Depth:'), IText(data['bits_per_sample'], size=(18, 1)),
]
yield [HorizontalSeparator()]
def build_tag_rows(self):
tooltips = {
'name': 'The name that was / should be used for the album directory',
'parent': 'The name that was / should be used for the artist directory',
'singer': 'Solo singer of a group, when the album should be sorted under their group',
'solo_of_group': 'Whether the singer is a soloist',
}
disabled = self.disabled
for key, value in self.album_info.to_dict(skip={'tracks'}, genres_as_set=True).items():
if tooltip := tooltips.get(key):
kwargs = {'tooltip': tooltip}
else:
kwargs = {}
key_ele = label_ele(key, **kwargs)
if key == 'type':
types = [de.real_name for de in DiscoEntryType]
if value:
if isinstance(value, DiscoEntryType):
value = value.real_name
elif value not in types:
types.append(value)
val_ele = Combo(
types, value, size=(48, None), disabled=disabled, key=key, change_cb=self._update_numbered_type
)
elif key == 'genre':
val_ele = _genre_list_box(value, self.album_info, disabled, key=key)
elif key in {'mp4', 'solo_of_group'}:
kwargs['disabled'] = True if key == 'mp4' else disabled
val_ele = CheckBox('', default=value, pad=(0, 0), key=key, **kwargs)
else:
if key.startswith('wiki_'):
kwargs['link'] = True
elif key == 'number':
kwargs['change_cb'] = self._update_numbered_type
value = _normalize_input_value(value)
val_ele = Input(value, size=(50, 1), disabled=disabled, key=key, **kwargs)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [key_ele, val_ele]
@cached_property
def cover_image_frame(self) -> Frame:
class ImageMenu(Menu):
MenuItem('Replace', callback=self._replace_cover_image, enabled=lambda me: not self.disabled)
# TODO: Include get_wiki_cover_choice?
cover_builder = AlbumCoverImageBuilder(self.album_info, self.cover_size)
return cover_builder.make_thumbnail_frame(right_click_menu=ImageMenu())
# endregion
# region Layout Generation - Buttons
def build_buttons(self) -> Layout:
# These frames need to be in the same row for them to occupy the same space when visible
yield [self.view_buttons_frame, self.edit_buttons_frame]
@cached_property
def view_buttons_frame(self) -> Frame:
rows = [[BasicRowFrame(row, side='t')] for row in self._build_view_buttons()]
return Frame(rows, visible=self.disabled, side='t')
def _build_view_buttons(self) -> Iterator[list[Button]]: # noqa
kwargs = {'size': (18, 1), 'borderwidth': 3}
yield [
EButton('Clean & Add BPM', key='clean_and_add_bpm', **kwargs),
EButton('View All Tags', key='view_all_tags', **kwargs),
EButton('Edit', key='edit_album', **kwargs),
EButton('Wiki Update', key='wiki_update', **kwargs),
]
kwargs['size'] = (25, 1)
# TODO: Handle replacing inferior versions in real destination directory
yield [
# EButton('Sync Ratings Between Albums', key='sync_album_ratings', disabled=True, **kwargs),
EButton('Sort Into Library', key='sort_into_library', **kwargs),
# EButton('Copy Tags Between Albums', key='copy_album_tags', disabled=True, **kwargs),
]
yield [
EButton('Copy Tags To Album...', key='copy_src_album_tags', **kwargs),
EButton('Copy Tags From Album...', key='copy_dst_album_tags', **kwargs),
]
# TODO: Unify the above/below rows / shorten text / merge functionality with the sort view
yield [
EButton('Copy Tags To Lib Album...', key='copy_src_lib_album_tags', **kwargs),
EButton('Copy Tags From Lib Album...', key='copy_dst_lib_album_tags', **kwargs),
]
open_btn = EButton('\U0001f5c1', key='open', font=LRG_FONT, size=(10, 1), tooltip='Open Album', borderwidth=3)
album_dir = self.album_dir
# TODO: handle: music.files.exceptions.InvalidAlbumDir: Invalid album dir - contains directories
if len(album_dir.parent) > 1:
kwargs = dict(font=LRG_FONT, size=(5, 1), borderwidth=3)
yield [
EButton('\u2190', key='prev_dir', **kwargs) if album_dir.has_prev_sibling else Spacer(size=(90, 56)),
open_btn,
EButton('\u2192', key='next_dir', **kwargs) if album_dir.has_next_sibling else Spacer(size=(90, 56)),
]
else:
yield [open_btn]
@cached_property
def edit_buttons_frame(self) -> BasicRowFrame:
kwargs = {'size': (18, 1), 'borderwidth': 3}
row = [EButton('Review & Save Changes', key='save', **kwargs), EButton('Cancel', key='cancel', **kwargs)]
return BasicRowFrame(row, side='t', anchor='c', visible=not self.disabled)
# endregion
# region Event Handling
def enable(self):
if not self.disabled:
return
super().enable()
self.view_buttons_frame.hide()
self.edit_buttons_frame.show()
def disable(self):
if self.disabled:
|
super().disable()
self.edit_buttons_frame.hide()
self.view_buttons_frame.show()
def _update_numbered_type(self, var_name, unknown, action):
# Registered as a change_cb for `type` and `number`
num_ele: Input = self._tag_vals_and_eles['number'][1]
value = ''
try:
value = num_ele.value.strip()
num_val = int(value)
except (TypeError, ValueError, AttributeError):
num_ele.validated(not value)
return
else:
num_ele.validated(True)
type_val = DiscoEntryType(self._tag_vals_and_eles['type'][1].value)
if type_val == DiscoEntryType.UNKNOWN:
return
num_type_ele: Input = self._tag_vals_and_eles['numbered_type'][1]
num_type_ele.update(type_val.format(num_val))
def _replace_cover_image(self, event=None):
if self.disabled:
return
if path := pick_file_popup(title='Pick new album cover'):
cover_path_ele: Input = self._tag_vals_and_eles['cover_path'][1]
cover_path_ele.update(path.as_posix())
image_ele: Image = self.cover_image_frame.rows[0].elements[0]
image_ele.image = path
# endregion
class TrackInfoFrame(TagModMixin, InteractiveFrame):
track_info: TrackInfo
song_file: SongFile
show_cover: Bool = False
def __init__(self, track: TrackIdentifier, **kwargs):
super().__init__(**kwargs)
self.track_info = get_track_info(track)
self.song_file = get_track_file(track)
self._tag_vals_and_eles = {}
@cached_property
def path_str(self) -> str:
return self.track_info.path.as_posix()
@cached_property
def file_name(self) -> str:
return self.track_info.path.name
def get_custom_layout(self) -> Layout:
yield from self.build_meta_rows()
yield from self.build_info_rows()
def build_meta_rows(self) -> Iterator[list[Element]]:
yield [Text('File:', size=(6, 1)), IText(self.file_name, size=(50, 1))]
sf = self.song_file
yield [
Text('Length:', size=(6, 1)), IText(sf.length_str, size=(10, 1)),
Text('Type:'), IText(sf.tag_version, size=(20, 1)),
]
def build_info_rows(self, keys: Collection[str] = None) -> Iterator[list[Element]]:
fields = ['artist', 'title', 'name', 'genre', 'disk', 'num', 'rating']
if keys:
fields = [f for f in fields if f not in keys]
track_info, disabled = self.track_info, self.disabled
for key in fields:
if key == 'genre':
value = track_info.genre_set.difference(track_info.album.genre_set)
val_ele = _genre_list_box(value, track_info, disabled)
elif key == 'rating':
if (value := track_info[key]) is None:
value = 0
val_ele = Rating(value, show_value=True, pad=(0, 0), disabled=disabled)
else:
value = _normalize_input_value(track_info[key])
val_ele = Input(value, size=(50, 1), disabled=disabled)
self._tag_vals_and_eles[key] = (value, val_ele)
yield [label_ele(key, size=(6, 1)), val_ele]
def _genre_list_box(genres: Collection[str], info: TrackInfo | AlbumInfo, disabled: bool, **kwargs) -> EditableListBox:
kwargs.setdefault('add_title', 'Add genre')
kwargs.setdefault('add_prompt', f'Enter a new genre value to add to {info.title!r}')
kwargs.setdefault('list_width', 40)
return EditableListBox(sorted(genres), disabled=disabled, val_type=set, **kwargs)
def _normalize_input_value(value) -> str:
if value is None:
value = ''
elif not isinstance(value, str):
value = str(value)
return value
def label_ele(text: str, size: XY = (15, 1), **kwargs) -> Text:
return Text(text.replace('_', ' ').title(), size=size, **kwargs)
class TagFrame(InteractiveFrame):
def enable(self):
if not self.disabled:
return
for row in self.rows:
for ele in row.elements:
try:
if ele.key == 'mp4': # Read-only
continue
except AttributeError:
pass
try:
ele.enable() # noqa
except AttributeError:
pass
self.disabled = False
| return | conditional_block |
input_subset_image_labels.py | """
Input pipeline for Open Images v4 image labels subset. For training and evaluation
a tf.data.Dataset is generated from the {train, val}-imageid2imagelabels.p, using a tf.py_func for generating ground truth.
In this case we let TF parallelize GT generation instead of a creation with a serial generator. Time is reduced by 2.
The Python generator emmits imageid s and a list of classes.
Open Images v4 bounding boxes subset average image size: (?, ?).
"""
import json
import os.path as op
import operator
import pickle
import pprint
import collections
import tensorflow as tf
import sys, glob
from os.path import join, split, realpath
import functools
sys.path.append(split(split(realpath(__file__))[0])[0])
# from preprocessing import augmentation_library as augment
from PIL import Image
import numpy as np
from datetime import datetime
from input_pipelines.utils import from_0_1_to_m1_1, resize_images_and_labels, get_temp_Nb
from utils.utils import _replacevoids
# public functions:
# train_input, evaluate_input, predict_input
SHUFFLE_BUFFER = 2000
NUM_PARALLEL_CALLS = 15
MAX_N_MIDS = 500
PATH_train = '/media/panos/data/datasets/open_images_v4/subset_street_scenes_image_labels/train'
FILEPATH_train_imageid2imagelabels = '/media/panos/data/datasets/open_images_v4/subset_street_scenes_image_labels/train-imageid2positiveimagelabels.p'
# check _generate_rla for verification before changing this
mid2cid = collections.OrderedDict(
{'/m/0199g': 0, # bicycle
'/m/01bjv': 1, # bus
'/m/0k4j': 2, # car
'/m/04_sv': 3, # motorcycle
'/m/07jdr': 4, # train
'/m/07r04': 5, # truck
'/m/01g317': 6, # human (person originally but may include also rider)
'/m/04yx4': 7, # man
'/m/03bt1vf': 8, # woman
'/m/01bl7v': 9, # boy
'/m/05r655': 10, # girl
'/m/015qff': 11, # traffic light
'/m/01mqdt': 12, # traffic sign
'/m/02pv19': 13, # stop sign
'void': 14,
})
def | (filepath):
"""outputs an image (np.float32, 3D) and the
generated weak labels from image-level labels (np.float32, 3D)
"""
with open(filepath, 'rb') as fp:
imageid2mids = pickle.load(fp)
for imageid, mids in imageid2mids.items():
Np = MAX_N_MIDS - len(mids)
if Np < 0:
tf.logging.warn(f'Np = {Np}.')
# pad to MAX_N_MIDS
mids.extend([''.encode('utf-8')]*Np)
yield imageid.encode('utf-8'), mids, Np
def _generate_rla(imageid, mids, rim_size):
# mids: binary np.string, (Nlabels,)
# coords_normalized: np.float32, (Nlabels, 4)
# rim_size: np.int32, (2,)
mids = list(mids)
# zeros_slice = np.zeros(rim_size, dtype=np.float32)
# ones_slice = np.ones(rim_size, dtype=np.float32)
# mid_column = np.zeros((len(mid2cid)), dtype=np.int32)
rla = np.zeros(len(mid2cid), dtype=np.float32)
turn_on_void = True
for mid, cid in mid2cid.items():
if mid.encode('utf-8') in mids:
rla[cid] = 1.
turn_on_void = False
# if empty change the void cid to one
if turn_on_void:
rla[-1] = 1.
# per-pixel normalize rla to a dense multinomial distribution
rla /= np.sum(rla)
# let TF do the tiling as it is faster
# rla = np.tile(rla, (*rim_size, 1))
# assert np.all(np.abs(np.sum(rla, axis=2) - np.ones(rla.shape[:2], dtype=np.float32)) < 0.01), (
# f'some pixels in rla for imageid {imageid} doesn\'t represent a multinomial distribution.')
return rla
def _train_prebatch_processing(imageid, mids, Np, params):
path = tf.strings.join([PATH_train, tf.strings.join([imageid, '.jpg'])], separator='/')
rim = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)
rim = tf.image.convert_image_dtype(rim, tf.float32)
mids = mids[:-Np]
rla = tf.py_func(_generate_rla, [imageid, mids, tf.shape(rim)[:2]], tf.float32, stateful=False)
rla.set_shape((None,))
rla = tf.tile(rla[tf.newaxis, tf.newaxis, ...], tf.concat([tf.shape(rim)[:2], (1,)], 0))
sfe = (params.height_feature_extractor, params.width_feature_extractor)
## prepare
pass
## preprocess
proimage, prolabel = resize_images_and_labels(rim[tf.newaxis, ...],
rla[tf.newaxis, ...],
sfe,
preserve_aspect_ratio=params.preserve_aspect_ratio)
proimage, prolabel = proimage[0], prolabel[0]
# pre-batching augmentations
pass
return proimage, prolabel, imageid
def _train_postbatching_processing(pims, plas, imageids, config, params):
# augmentation
# random_X requires batch dimension (0) to be defined
# pims.set_shape((get_temp_Nb(config, params.Nb), None, None, None))
# plas.set_shape((get_temp_Nb(config, params.Nb), None, None))
#pims = augment.random_color(pims)
#pims = augment.random_blur(pims)
# pims, plas = augment.random_flipping(pims, plas)
# training_lids2cids = _replacevoids(params.training_problem_def['lids2cids'])
# pims, plas = augment.random_scaling(
# pims, plas, [1.0, 2.0], max(training_lids2cids))
# center to [-1, 1)
pims = from_0_1_to_m1_1(pims)
return pims, plas, imageids
def prebatch_dataset(config, params):
dataset = tf.data.Dataset.from_generator(
functools.partial(_imageid_and_mids_generator, FILEPATH_train_imageid2imagelabels),
(tf.string, tf.string, tf.int32),
output_shapes=((), (None,), ()))
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(SHUFFLE_BUFFER))
dataset = dataset.map(
functools.partial(_train_prebatch_processing, params=params),
num_parallel_calls=NUM_PARALLEL_CALLS)
return dataset
def postbatch_dataset(dataset, config, params):
dataset = dataset.map(
functools.partial(_train_postbatching_processing, config=config, params=params),
num_parallel_calls=NUM_PARALLEL_CALLS)
return dataset
def train_input(config, params):
"""
Returns a tf.data.Dataset for training from Open Images data
"""
def _grouping(pim, pla, iid):
# group dataset elements as required by estimator
features = {
# 'rawimages': rim,
'proimages': pim,
'imageids': iid,
# 'rawimagespaths': imp,
# 'rawlabelspaths': lap,
}
labels = {
# 'rawlabels': rla,
'prolabels': pla,
}
# next line for distributed debugging
# tf.string tensors is not supported for DMA read/write to GPUs (TF bug)
if params.distribute:
# del features['rawimagespaths']
# del features['rawlabelspaths']
del features['imageids']
return (features, labels)
with tf.name_scope('input_pipeline'):
dataset = prebatch_dataset(config, params)
dataset = dataset.batch(get_temp_Nb(config, params.Nb))
dataset = postbatch_dataset(dataset, config, params)
dataset = dataset.map(_grouping, num_parallel_calls=NUM_PARALLEL_CALLS)
options = tf.data.Options()
options.experimental_autotune = True
# seems than on average gives faster results
dataset = dataset.prefetch(None).with_options(options)
return dataset
# def _evaluate_preprocess(image, label, params):
# _SIZE_FEATURE_EXTRACTOR = (params.height_feature_extractor, params.width_feature_extractor)
# ## prepare
# image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# evaluation_lids2cids = _replacevoids(params.evaluation_problem_def['lids2cids'])
# label = tf.gather(tf.cast(evaluation_lids2cids, tf.int32), tf.to_int32(label))
# ## preprocess
# proimage = tf.image.resize_images(image, _SIZE_FEATURE_EXTRACTOR)
# prolabel = tf.image.resize_images(label[..., tf.newaxis],
# _SIZE_FEATURE_EXTRACTOR,
# method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[..., 0]
# proimage = from_0_1_to_m1_1(proimage)
# print('debug: proimage, prolabel', proimage, prolabel)
# return image, label, proimage, prolabel
# def _evaluate_parse_and_preprocess(im_la_files, data_location, params):
# image, label, im_path, la_path = _load_and_decode(data_location, im_la_files)
# image, label, proimage, prolabel = _evaluate_preprocess(image, label, params)
# return image, label, proimage, prolabel, im_path, la_path
# def evaluate_input(config, params):
# del config
# data_location = params.dataset_directory
# filenames_list = params.filelist_filepath
# filenames_string = tf.cast(filenames_list, tf.string)
# dataset = tf.data.TextLineDataset(filenames=filenames_string)
# dataset = dataset.map(
# functools.partial(_evaluate_parse_and_preprocess, data_location=data_location, params=params),
# num_parallel_calls=30)
# # IMPORTANT: if Nb > 1, then shape of dataset elements must be the same
# dataset = dataset.batch(params.Nb)
# def _grouping(rim, rla, pim, pla, imp, lap):
# # group dataset elements as required by estimator
# features = {
# 'rawimages': rim,
# 'proimages': pim,
# 'rawimagespaths': imp,
# 'rawlabelspaths': lap,
# }
# labels = {
# 'rawlabels': rla,
# 'prolabels': pla,
# }
# return (features, labels)
# dataset = dataset.map(_grouping, num_parallel_calls=30)
# return dataset
# def _predict_image_generator(params):
# SUPPORTED_EXTENSIONS = ['png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG', 'ppm', 'PPM']
# fnames = []
# for se in SUPPORTED_EXTENSIONS:
# fnames.extend(glob.glob(join(params.predict_dir, '*.' + se), recursive=True))
# for im_fname in fnames:
# start = datetime.now()
# im = Image.open(im_fname)
# # next line is time consuming (can take up to 400ms for im of 2 MPixels)
# im_array = np.array(im)
# # print('reading time:', datetime.now() - start)
# yield im_array, im_fname.encode('utf-8'), im_array.shape[0], im_array.shape[1]
# def _predict_preprocess(image, params):
# image.set_shape((None, None, 3))
# image = tf.image.convert_image_dtype(image, tf.float32)
# image = tf.image.resize_images(image, [params.height_feature_extractor, params.width_feature_extractor])
# proimage = from_0_1_to_m1_1(image)
# return proimage
# def predict_input(config, params):
# del config
# dataset = tf.data.Dataset.from_generator(lambda: _predict_image_generator(params),
# output_types=(tf.uint8, tf.string, tf.int32, tf.int32))
# dataset = dataset.map(lambda im, im_path, height, width: (
# im_path, im, _predict_preprocess(im, params)), num_parallel_calls=30)
# dataset = dataset.batch(params.Nb)
# dataset = dataset.prefetch(None)
# def _grouping(imp, rim, pim):
# # group dataset elements as required by estimator
# features = {'rawimages': rim,
# 'proimages': pim,
# 'rawimagespaths': imp}
# return features
# dataset = dataset.map(_grouping, num_parallel_calls=30)
# return dataset
# def add_train_input_pipeline_arguments(argparser, ctx=None):
# """
# Add arguments required by the input pipeline.
# Arguments:
# argparser: an argparse.ArgumentParser object to add arguments
# ctx: a context object with a suffix_name attribute
# """
# context_name = ctx.suffix_name if ctx else ''
# argparser.add_argument('dataset_directory' + ('_' + context_name if ctx else ''), type=str,
# default='/media/panos/data/datasets/apolloscape/',
# help='Dataset directory including final /.')
# argparser.add_argument('filelist_filepath' + ('_' + context_name if ctx else ''), type=str,
# default='/media/panos/data/datasets/apolloscape/public_image_lists/road01_ins_train.lst',
# help='List file as provided by the original dataset authors.')
# argparser.add_argument('--preserve_aspect_ratio' + ('_' + context_name if ctx else ''), action='store_true',
# help='Resizes the input images respecting the aspect ratio using cropings.')
# def add_evaluate_input_pipeline_arguments(argparser):
# """
# Add arguments required by the input pipeline.
# Arguments:
# argparser: an argparse.ArgumentParser object to add arguments
# """
# argparser.add_argument('dataset_directory', type=str, default='/media/panos/data/datasets/apolloscape/',
# help='Dataset directory including final /.')
# argparser.add_argument('filelist_filepath', type=str, default='/media/panos/data/datasets/apolloscape/public_image_lists/road01_ins_val.lst',
# help='List file as provided by the original dataset authors.')
| _imageid_and_mids_generator | identifier_name |
input_subset_image_labels.py | """
Input pipeline for Open Images v4 image labels subset. For training and evaluation
a tf.data.Dataset is generated from the {train, val}-imageid2imagelabels.p, using a tf.py_func for generating ground truth.
In this case we let TF parallelize GT generation instead of a creation with a serial generator. Time is reduced by 2.
The Python generator emmits imageid s and a list of classes.
Open Images v4 bounding boxes subset average image size: (?, ?).
"""
import json
import os.path as op
import operator
import pickle
import pprint
import collections
import tensorflow as tf
import sys, glob
from os.path import join, split, realpath
import functools
sys.path.append(split(split(realpath(__file__))[0])[0])
# from preprocessing import augmentation_library as augment
from PIL import Image
import numpy as np
from datetime import datetime
from input_pipelines.utils import from_0_1_to_m1_1, resize_images_and_labels, get_temp_Nb
from utils.utils import _replacevoids
# public functions:
# train_input, evaluate_input, predict_input
SHUFFLE_BUFFER = 2000
NUM_PARALLEL_CALLS = 15
MAX_N_MIDS = 500
PATH_train = '/media/panos/data/datasets/open_images_v4/subset_street_scenes_image_labels/train'
FILEPATH_train_imageid2imagelabels = '/media/panos/data/datasets/open_images_v4/subset_street_scenes_image_labels/train-imageid2positiveimagelabels.p'
# check _generate_rla for verification before changing this
mid2cid = collections.OrderedDict(
{'/m/0199g': 0, # bicycle
'/m/01bjv': 1, # bus
'/m/0k4j': 2, # car
'/m/04_sv': 3, # motorcycle
'/m/07jdr': 4, # train
'/m/07r04': 5, # truck
'/m/01g317': 6, # human (person originally but may include also rider)
'/m/04yx4': 7, # man
'/m/03bt1vf': 8, # woman
'/m/01bl7v': 9, # boy
'/m/05r655': 10, # girl
'/m/015qff': 11, # traffic light
'/m/01mqdt': 12, # traffic sign
'/m/02pv19': 13, # stop sign
'void': 14,
})
def _imageid_and_mids_generator(filepath):
"""outputs an image (np.float32, 3D) and the
generated weak labels from image-level labels (np.float32, 3D)
"""
with open(filepath, 'rb') as fp:
imageid2mids = pickle.load(fp)
for imageid, mids in imageid2mids.items():
Np = MAX_N_MIDS - len(mids)
if Np < 0:
tf.logging.warn(f'Np = {Np}.')
# pad to MAX_N_MIDS
mids.extend([''.encode('utf-8')]*Np)
yield imageid.encode('utf-8'), mids, Np
def _generate_rla(imageid, mids, rim_size):
# mids: binary np.string, (Nlabels,)
# coords_normalized: np.float32, (Nlabels, 4)
# rim_size: np.int32, (2,)
mids = list(mids)
# zeros_slice = np.zeros(rim_size, dtype=np.float32)
# ones_slice = np.ones(rim_size, dtype=np.float32)
# mid_column = np.zeros((len(mid2cid)), dtype=np.int32)
rla = np.zeros(len(mid2cid), dtype=np.float32)
turn_on_void = True
for mid, cid in mid2cid.items():
if mid.encode('utf-8') in mids:
rla[cid] = 1.
turn_on_void = False
# if empty change the void cid to one
if turn_on_void:
rla[-1] = 1.
# per-pixel normalize rla to a dense multinomial distribution
rla /= np.sum(rla)
# let TF do the tiling as it is faster
# rla = np.tile(rla, (*rim_size, 1))
# assert np.all(np.abs(np.sum(rla, axis=2) - np.ones(rla.shape[:2], dtype=np.float32)) < 0.01), (
# f'some pixels in rla for imageid {imageid} doesn\'t represent a multinomial distribution.')
return rla
def _train_prebatch_processing(imageid, mids, Np, params):
path = tf.strings.join([PATH_train, tf.strings.join([imageid, '.jpg'])], separator='/')
rim = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)
rim = tf.image.convert_image_dtype(rim, tf.float32)
mids = mids[:-Np]
rla = tf.py_func(_generate_rla, [imageid, mids, tf.shape(rim)[:2]], tf.float32, stateful=False)
rla.set_shape((None,))
rla = tf.tile(rla[tf.newaxis, tf.newaxis, ...], tf.concat([tf.shape(rim)[:2], (1,)], 0))
sfe = (params.height_feature_extractor, params.width_feature_extractor)
## prepare
pass
## preprocess
proimage, prolabel = resize_images_and_labels(rim[tf.newaxis, ...],
rla[tf.newaxis, ...],
sfe,
preserve_aspect_ratio=params.preserve_aspect_ratio)
proimage, prolabel = proimage[0], prolabel[0]
# pre-batching augmentations
pass
return proimage, prolabel, imageid
def _train_postbatching_processing(pims, plas, imageids, config, params):
# augmentation
# random_X requires batch dimension (0) to be defined
# pims.set_shape((get_temp_Nb(config, params.Nb), None, None, None))
# plas.set_shape((get_temp_Nb(config, params.Nb), None, None))
#pims = augment.random_color(pims)
#pims = augment.random_blur(pims)
# pims, plas = augment.random_flipping(pims, plas)
# training_lids2cids = _replacevoids(params.training_problem_def['lids2cids'])
# pims, plas = augment.random_scaling(
# pims, plas, [1.0, 2.0], max(training_lids2cids))
# center to [-1, 1)
pims = from_0_1_to_m1_1(pims)
return pims, plas, imageids
def prebatch_dataset(config, params):
dataset = tf.data.Dataset.from_generator(
functools.partial(_imageid_and_mids_generator, FILEPATH_train_imageid2imagelabels),
(tf.string, tf.string, tf.int32),
output_shapes=((), (None,), ()))
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(SHUFFLE_BUFFER))
dataset = dataset.map(
functools.partial(_train_prebatch_processing, params=params),
num_parallel_calls=NUM_PARALLEL_CALLS)
return dataset
def postbatch_dataset(dataset, config, params):
dataset = dataset.map(
functools.partial(_train_postbatching_processing, config=config, params=params),
num_parallel_calls=NUM_PARALLEL_CALLS)
return dataset
def train_input(config, params):
"""
Returns a tf.data.Dataset for training from Open Images data
"""
def _grouping(pim, pla, iid):
# group dataset elements as required by estimator
features = {
# 'rawimages': rim,
'proimages': pim,
'imageids': iid,
# 'rawimagespaths': imp,
# 'rawlabelspaths': lap,
}
labels = {
# 'rawlabels': rla,
'prolabels': pla,
}
# next line for distributed debugging
# tf.string tensors is not supported for DMA read/write to GPUs (TF bug)
if params.distribute:
# del features['rawimagespaths']
# del features['rawlabelspaths']
del features['imageids']
return (features, labels)
with tf.name_scope('input_pipeline'):
dataset = prebatch_dataset(config, params)
dataset = dataset.batch(get_temp_Nb(config, params.Nb))
dataset = postbatch_dataset(dataset, config, params)
dataset = dataset.map(_grouping, num_parallel_calls=NUM_PARALLEL_CALLS)
options = tf.data.Options()
options.experimental_autotune = True
# seems than on average gives faster results
dataset = dataset.prefetch(None).with_options(options)
return dataset
# def _evaluate_preprocess(image, label, params):
# _SIZE_FEATURE_EXTRACTOR = (params.height_feature_extractor, params.width_feature_extractor)
# ## prepare
# image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# evaluation_lids2cids = _replacevoids(params.evaluation_problem_def['lids2cids'])
# label = tf.gather(tf.cast(evaluation_lids2cids, tf.int32), tf.to_int32(label))
# ## preprocess
# proimage = tf.image.resize_images(image, _SIZE_FEATURE_EXTRACTOR)
# prolabel = tf.image.resize_images(label[..., tf.newaxis],
# _SIZE_FEATURE_EXTRACTOR,
# method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[..., 0]
# proimage = from_0_1_to_m1_1(proimage)
# print('debug: proimage, prolabel', proimage, prolabel)
# return image, label, proimage, prolabel
# def _evaluate_parse_and_preprocess(im_la_files, data_location, params):
# image, label, im_path, la_path = _load_and_decode(data_location, im_la_files)
# image, label, proimage, prolabel = _evaluate_preprocess(image, label, params)
# return image, label, proimage, prolabel, im_path, la_path
# def evaluate_input(config, params):
# del config
# data_location = params.dataset_directory
# filenames_list = params.filelist_filepath
# filenames_string = tf.cast(filenames_list, tf.string)
# dataset = tf.data.TextLineDataset(filenames=filenames_string)
# dataset = dataset.map(
# functools.partial(_evaluate_parse_and_preprocess, data_location=data_location, params=params),
# num_parallel_calls=30)
# # IMPORTANT: if Nb > 1, then shape of dataset elements must be the same | # features = {
# 'rawimages': rim,
# 'proimages': pim,
# 'rawimagespaths': imp,
# 'rawlabelspaths': lap,
# }
# labels = {
# 'rawlabels': rla,
# 'prolabels': pla,
# }
# return (features, labels)
# dataset = dataset.map(_grouping, num_parallel_calls=30)
# return dataset
# def _predict_image_generator(params):
# SUPPORTED_EXTENSIONS = ['png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG', 'ppm', 'PPM']
# fnames = []
# for se in SUPPORTED_EXTENSIONS:
# fnames.extend(glob.glob(join(params.predict_dir, '*.' + se), recursive=True))
# for im_fname in fnames:
# start = datetime.now()
# im = Image.open(im_fname)
# # next line is time consuming (can take up to 400ms for im of 2 MPixels)
# im_array = np.array(im)
# # print('reading time:', datetime.now() - start)
# yield im_array, im_fname.encode('utf-8'), im_array.shape[0], im_array.shape[1]
# def _predict_preprocess(image, params):
# image.set_shape((None, None, 3))
# image = tf.image.convert_image_dtype(image, tf.float32)
# image = tf.image.resize_images(image, [params.height_feature_extractor, params.width_feature_extractor])
# proimage = from_0_1_to_m1_1(image)
# return proimage
# def predict_input(config, params):
# del config
# dataset = tf.data.Dataset.from_generator(lambda: _predict_image_generator(params),
# output_types=(tf.uint8, tf.string, tf.int32, tf.int32))
# dataset = dataset.map(lambda im, im_path, height, width: (
# im_path, im, _predict_preprocess(im, params)), num_parallel_calls=30)
# dataset = dataset.batch(params.Nb)
# dataset = dataset.prefetch(None)
# def _grouping(imp, rim, pim):
# # group dataset elements as required by estimator
# features = {'rawimages': rim,
# 'proimages': pim,
# 'rawimagespaths': imp}
# return features
# dataset = dataset.map(_grouping, num_parallel_calls=30)
# return dataset
# def add_train_input_pipeline_arguments(argparser, ctx=None):
# """
# Add arguments required by the input pipeline.
# Arguments:
# argparser: an argparse.ArgumentParser object to add arguments
# ctx: a context object with a suffix_name attribute
# """
# context_name = ctx.suffix_name if ctx else ''
# argparser.add_argument('dataset_directory' + ('_' + context_name if ctx else ''), type=str,
# default='/media/panos/data/datasets/apolloscape/',
# help='Dataset directory including final /.')
# argparser.add_argument('filelist_filepath' + ('_' + context_name if ctx else ''), type=str,
# default='/media/panos/data/datasets/apolloscape/public_image_lists/road01_ins_train.lst',
# help='List file as provided by the original dataset authors.')
# argparser.add_argument('--preserve_aspect_ratio' + ('_' + context_name if ctx else ''), action='store_true',
# help='Resizes the input images respecting the aspect ratio using cropings.')
# def add_evaluate_input_pipeline_arguments(argparser):
# """
# Add arguments required by the input pipeline.
# Arguments:
# argparser: an argparse.ArgumentParser object to add arguments
# """
# argparser.add_argument('dataset_directory', type=str, default='/media/panos/data/datasets/apolloscape/',
# help='Dataset directory including final /.')
# argparser.add_argument('filelist_filepath', type=str, default='/media/panos/data/datasets/apolloscape/public_image_lists/road01_ins_val.lst',
# help='List file as provided by the original dataset authors.') | # dataset = dataset.batch(params.Nb)
# def _grouping(rim, rla, pim, pla, imp, lap):
# # group dataset elements as required by estimator | random_line_split |
input_subset_image_labels.py | """
Input pipeline for Open Images v4 image labels subset. For training and evaluation
a tf.data.Dataset is generated from the {train, val}-imageid2imagelabels.p, using a tf.py_func for generating ground truth.
In this case we let TF parallelize GT generation instead of a creation with a serial generator. Time is reduced by 2.
The Python generator emmits imageid s and a list of classes.
Open Images v4 bounding boxes subset average image size: (?, ?).
"""
import json
import os.path as op
import operator
import pickle
import pprint
import collections
import tensorflow as tf
import sys, glob
from os.path import join, split, realpath
import functools
sys.path.append(split(split(realpath(__file__))[0])[0])
# from preprocessing import augmentation_library as augment
from PIL import Image
import numpy as np
from datetime import datetime
from input_pipelines.utils import from_0_1_to_m1_1, resize_images_and_labels, get_temp_Nb
from utils.utils import _replacevoids
# public functions:
# train_input, evaluate_input, predict_input
SHUFFLE_BUFFER = 2000
NUM_PARALLEL_CALLS = 15
MAX_N_MIDS = 500
PATH_train = '/media/panos/data/datasets/open_images_v4/subset_street_scenes_image_labels/train'
FILEPATH_train_imageid2imagelabels = '/media/panos/data/datasets/open_images_v4/subset_street_scenes_image_labels/train-imageid2positiveimagelabels.p'
# check _generate_rla for verification before changing this
mid2cid = collections.OrderedDict(
{'/m/0199g': 0, # bicycle
'/m/01bjv': 1, # bus
'/m/0k4j': 2, # car
'/m/04_sv': 3, # motorcycle
'/m/07jdr': 4, # train
'/m/07r04': 5, # truck
'/m/01g317': 6, # human (person originally but may include also rider)
'/m/04yx4': 7, # man
'/m/03bt1vf': 8, # woman
'/m/01bl7v': 9, # boy
'/m/05r655': 10, # girl
'/m/015qff': 11, # traffic light
'/m/01mqdt': 12, # traffic sign
'/m/02pv19': 13, # stop sign
'void': 14,
})
def _imageid_and_mids_generator(filepath):
|
def _generate_rla(imageid, mids, rim_size):
# mids: binary np.string, (Nlabels,)
# coords_normalized: np.float32, (Nlabels, 4)
# rim_size: np.int32, (2,)
mids = list(mids)
# zeros_slice = np.zeros(rim_size, dtype=np.float32)
# ones_slice = np.ones(rim_size, dtype=np.float32)
# mid_column = np.zeros((len(mid2cid)), dtype=np.int32)
rla = np.zeros(len(mid2cid), dtype=np.float32)
turn_on_void = True
for mid, cid in mid2cid.items():
if mid.encode('utf-8') in mids:
rla[cid] = 1.
turn_on_void = False
# if empty change the void cid to one
if turn_on_void:
rla[-1] = 1.
# per-pixel normalize rla to a dense multinomial distribution
rla /= np.sum(rla)
# let TF do the tiling as it is faster
# rla = np.tile(rla, (*rim_size, 1))
# assert np.all(np.abs(np.sum(rla, axis=2) - np.ones(rla.shape[:2], dtype=np.float32)) < 0.01), (
# f'some pixels in rla for imageid {imageid} doesn\'t represent a multinomial distribution.')
return rla
def _train_prebatch_processing(imageid, mids, Np, params):
path = tf.strings.join([PATH_train, tf.strings.join([imageid, '.jpg'])], separator='/')
rim = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)
rim = tf.image.convert_image_dtype(rim, tf.float32)
mids = mids[:-Np]
rla = tf.py_func(_generate_rla, [imageid, mids, tf.shape(rim)[:2]], tf.float32, stateful=False)
rla.set_shape((None,))
rla = tf.tile(rla[tf.newaxis, tf.newaxis, ...], tf.concat([tf.shape(rim)[:2], (1,)], 0))
sfe = (params.height_feature_extractor, params.width_feature_extractor)
## prepare
pass
## preprocess
proimage, prolabel = resize_images_and_labels(rim[tf.newaxis, ...],
rla[tf.newaxis, ...],
sfe,
preserve_aspect_ratio=params.preserve_aspect_ratio)
proimage, prolabel = proimage[0], prolabel[0]
# pre-batching augmentations
pass
return proimage, prolabel, imageid
def _train_postbatching_processing(pims, plas, imageids, config, params):
# augmentation
# random_X requires batch dimension (0) to be defined
# pims.set_shape((get_temp_Nb(config, params.Nb), None, None, None))
# plas.set_shape((get_temp_Nb(config, params.Nb), None, None))
#pims = augment.random_color(pims)
#pims = augment.random_blur(pims)
# pims, plas = augment.random_flipping(pims, plas)
# training_lids2cids = _replacevoids(params.training_problem_def['lids2cids'])
# pims, plas = augment.random_scaling(
# pims, plas, [1.0, 2.0], max(training_lids2cids))
# center to [-1, 1)
pims = from_0_1_to_m1_1(pims)
return pims, plas, imageids
def prebatch_dataset(config, params):
dataset = tf.data.Dataset.from_generator(
functools.partial(_imageid_and_mids_generator, FILEPATH_train_imageid2imagelabels),
(tf.string, tf.string, tf.int32),
output_shapes=((), (None,), ()))
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(SHUFFLE_BUFFER))
dataset = dataset.map(
functools.partial(_train_prebatch_processing, params=params),
num_parallel_calls=NUM_PARALLEL_CALLS)
return dataset
def postbatch_dataset(dataset, config, params):
dataset = dataset.map(
functools.partial(_train_postbatching_processing, config=config, params=params),
num_parallel_calls=NUM_PARALLEL_CALLS)
return dataset
def train_input(config, params):
"""
Returns a tf.data.Dataset for training from Open Images data
"""
def _grouping(pim, pla, iid):
# group dataset elements as required by estimator
features = {
# 'rawimages': rim,
'proimages': pim,
'imageids': iid,
# 'rawimagespaths': imp,
# 'rawlabelspaths': lap,
}
labels = {
# 'rawlabels': rla,
'prolabels': pla,
}
# next line for distributed debugging
# tf.string tensors is not supported for DMA read/write to GPUs (TF bug)
if params.distribute:
# del features['rawimagespaths']
# del features['rawlabelspaths']
del features['imageids']
return (features, labels)
with tf.name_scope('input_pipeline'):
dataset = prebatch_dataset(config, params)
dataset = dataset.batch(get_temp_Nb(config, params.Nb))
dataset = postbatch_dataset(dataset, config, params)
dataset = dataset.map(_grouping, num_parallel_calls=NUM_PARALLEL_CALLS)
options = tf.data.Options()
options.experimental_autotune = True
# seems than on average gives faster results
dataset = dataset.prefetch(None).with_options(options)
return dataset
# def _evaluate_preprocess(image, label, params):
# _SIZE_FEATURE_EXTRACTOR = (params.height_feature_extractor, params.width_feature_extractor)
# ## prepare
# image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# evaluation_lids2cids = _replacevoids(params.evaluation_problem_def['lids2cids'])
# label = tf.gather(tf.cast(evaluation_lids2cids, tf.int32), tf.to_int32(label))
# ## preprocess
# proimage = tf.image.resize_images(image, _SIZE_FEATURE_EXTRACTOR)
# prolabel = tf.image.resize_images(label[..., tf.newaxis],
# _SIZE_FEATURE_EXTRACTOR,
# method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[..., 0]
# proimage = from_0_1_to_m1_1(proimage)
# print('debug: proimage, prolabel', proimage, prolabel)
# return image, label, proimage, prolabel
# def _evaluate_parse_and_preprocess(im_la_files, data_location, params):
# image, label, im_path, la_path = _load_and_decode(data_location, im_la_files)
# image, label, proimage, prolabel = _evaluate_preprocess(image, label, params)
# return image, label, proimage, prolabel, im_path, la_path
# def evaluate_input(config, params):
# del config
# data_location = params.dataset_directory
# filenames_list = params.filelist_filepath
# filenames_string = tf.cast(filenames_list, tf.string)
# dataset = tf.data.TextLineDataset(filenames=filenames_string)
# dataset = dataset.map(
# functools.partial(_evaluate_parse_and_preprocess, data_location=data_location, params=params),
# num_parallel_calls=30)
# # IMPORTANT: if Nb > 1, then shape of dataset elements must be the same
# dataset = dataset.batch(params.Nb)
# def _grouping(rim, rla, pim, pla, imp, lap):
# # group dataset elements as required by estimator
# features = {
# 'rawimages': rim,
# 'proimages': pim,
# 'rawimagespaths': imp,
# 'rawlabelspaths': lap,
# }
# labels = {
# 'rawlabels': rla,
# 'prolabels': pla,
# }
# return (features, labels)
# dataset = dataset.map(_grouping, num_parallel_calls=30)
# return dataset
# def _predict_image_generator(params):
# SUPPORTED_EXTENSIONS = ['png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG', 'ppm', 'PPM']
# fnames = []
# for se in SUPPORTED_EXTENSIONS:
# fnames.extend(glob.glob(join(params.predict_dir, '*.' + se), recursive=True))
# for im_fname in fnames:
# start = datetime.now()
# im = Image.open(im_fname)
# # next line is time consuming (can take up to 400ms for im of 2 MPixels)
# im_array = np.array(im)
# # print('reading time:', datetime.now() - start)
# yield im_array, im_fname.encode('utf-8'), im_array.shape[0], im_array.shape[1]
# def _predict_preprocess(image, params):
# image.set_shape((None, None, 3))
# image = tf.image.convert_image_dtype(image, tf.float32)
# image = tf.image.resize_images(image, [params.height_feature_extractor, params.width_feature_extractor])
# proimage = from_0_1_to_m1_1(image)
# return proimage
# def predict_input(config, params):
# del config
# dataset = tf.data.Dataset.from_generator(lambda: _predict_image_generator(params),
# output_types=(tf.uint8, tf.string, tf.int32, tf.int32))
# dataset = dataset.map(lambda im, im_path, height, width: (
# im_path, im, _predict_preprocess(im, params)), num_parallel_calls=30)
# dataset = dataset.batch(params.Nb)
# dataset = dataset.prefetch(None)
# def _grouping(imp, rim, pim):
# # group dataset elements as required by estimator
# features = {'rawimages': rim,
# 'proimages': pim,
# 'rawimagespaths': imp}
# return features
# dataset = dataset.map(_grouping, num_parallel_calls=30)
# return dataset
# def add_train_input_pipeline_arguments(argparser, ctx=None):
# """
# Add arguments required by the input pipeline.
# Arguments:
# argparser: an argparse.ArgumentParser object to add arguments
# ctx: a context object with a suffix_name attribute
# """
# context_name = ctx.suffix_name if ctx else ''
# argparser.add_argument('dataset_directory' + ('_' + context_name if ctx else ''), type=str,
# default='/media/panos/data/datasets/apolloscape/',
# help='Dataset directory including final /.')
# argparser.add_argument('filelist_filepath' + ('_' + context_name if ctx else ''), type=str,
# default='/media/panos/data/datasets/apolloscape/public_image_lists/road01_ins_train.lst',
# help='List file as provided by the original dataset authors.')
# argparser.add_argument('--preserve_aspect_ratio' + ('_' + context_name if ctx else ''), action='store_true',
# help='Resizes the input images respecting the aspect ratio using cropings.')
# def add_evaluate_input_pipeline_arguments(argparser):
# """
# Add arguments required by the input pipeline.
# Arguments:
# argparser: an argparse.ArgumentParser object to add arguments
# """
# argparser.add_argument('dataset_directory', type=str, default='/media/panos/data/datasets/apolloscape/',
# help='Dataset directory including final /.')
# argparser.add_argument('filelist_filepath', type=str, default='/media/panos/data/datasets/apolloscape/public_image_lists/road01_ins_val.lst',
# help='List file as provided by the original dataset authors.')
| """outputs an image (np.float32, 3D) and the
generated weak labels from image-level labels (np.float32, 3D)
"""
with open(filepath, 'rb') as fp:
imageid2mids = pickle.load(fp)
for imageid, mids in imageid2mids.items():
Np = MAX_N_MIDS - len(mids)
if Np < 0:
tf.logging.warn(f'Np = {Np}.')
# pad to MAX_N_MIDS
mids.extend([''.encode('utf-8')]*Np)
yield imageid.encode('utf-8'), mids, Np | identifier_body |
input_subset_image_labels.py | """
Input pipeline for Open Images v4 image labels subset. For training and evaluation
a tf.data.Dataset is generated from the {train, val}-imageid2imagelabels.p, using a tf.py_func for generating ground truth.
In this case we let TF parallelize GT generation instead of a creation with a serial generator. Time is reduced by 2.
The Python generator emmits imageid s and a list of classes.
Open Images v4 bounding boxes subset average image size: (?, ?).
"""
import json
import os.path as op
import operator
import pickle
import pprint
import collections
import tensorflow as tf
import sys, glob
from os.path import join, split, realpath
import functools
sys.path.append(split(split(realpath(__file__))[0])[0])
# from preprocessing import augmentation_library as augment
from PIL import Image
import numpy as np
from datetime import datetime
from input_pipelines.utils import from_0_1_to_m1_1, resize_images_and_labels, get_temp_Nb
from utils.utils import _replacevoids
# public functions:
# train_input, evaluate_input, predict_input
SHUFFLE_BUFFER = 2000
NUM_PARALLEL_CALLS = 15
MAX_N_MIDS = 500
PATH_train = '/media/panos/data/datasets/open_images_v4/subset_street_scenes_image_labels/train'
FILEPATH_train_imageid2imagelabels = '/media/panos/data/datasets/open_images_v4/subset_street_scenes_image_labels/train-imageid2positiveimagelabels.p'
# check _generate_rla for verification before changing this
mid2cid = collections.OrderedDict(
{'/m/0199g': 0, # bicycle
'/m/01bjv': 1, # bus
'/m/0k4j': 2, # car
'/m/04_sv': 3, # motorcycle
'/m/07jdr': 4, # train
'/m/07r04': 5, # truck
'/m/01g317': 6, # human (person originally but may include also rider)
'/m/04yx4': 7, # man
'/m/03bt1vf': 8, # woman
'/m/01bl7v': 9, # boy
'/m/05r655': 10, # girl
'/m/015qff': 11, # traffic light
'/m/01mqdt': 12, # traffic sign
'/m/02pv19': 13, # stop sign
'void': 14,
})
def _imageid_and_mids_generator(filepath):
"""outputs an image (np.float32, 3D) and the
generated weak labels from image-level labels (np.float32, 3D)
"""
with open(filepath, 'rb') as fp:
imageid2mids = pickle.load(fp)
for imageid, mids in imageid2mids.items():
Np = MAX_N_MIDS - len(mids)
if Np < 0:
tf.logging.warn(f'Np = {Np}.')
# pad to MAX_N_MIDS
mids.extend([''.encode('utf-8')]*Np)
yield imageid.encode('utf-8'), mids, Np
def _generate_rla(imageid, mids, rim_size):
# mids: binary np.string, (Nlabels,)
# coords_normalized: np.float32, (Nlabels, 4)
# rim_size: np.int32, (2,)
mids = list(mids)
# zeros_slice = np.zeros(rim_size, dtype=np.float32)
# ones_slice = np.ones(rim_size, dtype=np.float32)
# mid_column = np.zeros((len(mid2cid)), dtype=np.int32)
rla = np.zeros(len(mid2cid), dtype=np.float32)
turn_on_void = True
for mid, cid in mid2cid.items():
if mid.encode('utf-8') in mids:
|
# if empty change the void cid to one
if turn_on_void:
rla[-1] = 1.
# per-pixel normalize rla to a dense multinomial distribution
rla /= np.sum(rla)
# let TF do the tiling as it is faster
# rla = np.tile(rla, (*rim_size, 1))
# assert np.all(np.abs(np.sum(rla, axis=2) - np.ones(rla.shape[:2], dtype=np.float32)) < 0.01), (
# f'some pixels in rla for imageid {imageid} doesn\'t represent a multinomial distribution.')
return rla
def _train_prebatch_processing(imageid, mids, Np, params):
path = tf.strings.join([PATH_train, tf.strings.join([imageid, '.jpg'])], separator='/')
rim = tf.image.decode_jpeg(tf.io.read_file(path), channels=3)
rim = tf.image.convert_image_dtype(rim, tf.float32)
mids = mids[:-Np]
rla = tf.py_func(_generate_rla, [imageid, mids, tf.shape(rim)[:2]], tf.float32, stateful=False)
rla.set_shape((None,))
rla = tf.tile(rla[tf.newaxis, tf.newaxis, ...], tf.concat([tf.shape(rim)[:2], (1,)], 0))
sfe = (params.height_feature_extractor, params.width_feature_extractor)
## prepare
pass
## preprocess
proimage, prolabel = resize_images_and_labels(rim[tf.newaxis, ...],
rla[tf.newaxis, ...],
sfe,
preserve_aspect_ratio=params.preserve_aspect_ratio)
proimage, prolabel = proimage[0], prolabel[0]
# pre-batching augmentations
pass
return proimage, prolabel, imageid
def _train_postbatching_processing(pims, plas, imageids, config, params):
# augmentation
# random_X requires batch dimension (0) to be defined
# pims.set_shape((get_temp_Nb(config, params.Nb), None, None, None))
# plas.set_shape((get_temp_Nb(config, params.Nb), None, None))
#pims = augment.random_color(pims)
#pims = augment.random_blur(pims)
# pims, plas = augment.random_flipping(pims, plas)
# training_lids2cids = _replacevoids(params.training_problem_def['lids2cids'])
# pims, plas = augment.random_scaling(
# pims, plas, [1.0, 2.0], max(training_lids2cids))
# center to [-1, 1)
pims = from_0_1_to_m1_1(pims)
return pims, plas, imageids
def prebatch_dataset(config, params):
dataset = tf.data.Dataset.from_generator(
functools.partial(_imageid_and_mids_generator, FILEPATH_train_imageid2imagelabels),
(tf.string, tf.string, tf.int32),
output_shapes=((), (None,), ()))
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(SHUFFLE_BUFFER))
dataset = dataset.map(
functools.partial(_train_prebatch_processing, params=params),
num_parallel_calls=NUM_PARALLEL_CALLS)
return dataset
def postbatch_dataset(dataset, config, params):
dataset = dataset.map(
functools.partial(_train_postbatching_processing, config=config, params=params),
num_parallel_calls=NUM_PARALLEL_CALLS)
return dataset
def train_input(config, params):
"""
Returns a tf.data.Dataset for training from Open Images data
"""
def _grouping(pim, pla, iid):
# group dataset elements as required by estimator
features = {
# 'rawimages': rim,
'proimages': pim,
'imageids': iid,
# 'rawimagespaths': imp,
# 'rawlabelspaths': lap,
}
labels = {
# 'rawlabels': rla,
'prolabels': pla,
}
# next line for distributed debugging
# tf.string tensors is not supported for DMA read/write to GPUs (TF bug)
if params.distribute:
# del features['rawimagespaths']
# del features['rawlabelspaths']
del features['imageids']
return (features, labels)
with tf.name_scope('input_pipeline'):
dataset = prebatch_dataset(config, params)
dataset = dataset.batch(get_temp_Nb(config, params.Nb))
dataset = postbatch_dataset(dataset, config, params)
dataset = dataset.map(_grouping, num_parallel_calls=NUM_PARALLEL_CALLS)
options = tf.data.Options()
options.experimental_autotune = True
# seems than on average gives faster results
dataset = dataset.prefetch(None).with_options(options)
return dataset
# def _evaluate_preprocess(image, label, params):
# _SIZE_FEATURE_EXTRACTOR = (params.height_feature_extractor, params.width_feature_extractor)
# ## prepare
# image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# evaluation_lids2cids = _replacevoids(params.evaluation_problem_def['lids2cids'])
# label = tf.gather(tf.cast(evaluation_lids2cids, tf.int32), tf.to_int32(label))
# ## preprocess
# proimage = tf.image.resize_images(image, _SIZE_FEATURE_EXTRACTOR)
# prolabel = tf.image.resize_images(label[..., tf.newaxis],
# _SIZE_FEATURE_EXTRACTOR,
# method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)[..., 0]
# proimage = from_0_1_to_m1_1(proimage)
# print('debug: proimage, prolabel', proimage, prolabel)
# return image, label, proimage, prolabel
# def _evaluate_parse_and_preprocess(im_la_files, data_location, params):
# image, label, im_path, la_path = _load_and_decode(data_location, im_la_files)
# image, label, proimage, prolabel = _evaluate_preprocess(image, label, params)
# return image, label, proimage, prolabel, im_path, la_path
# def evaluate_input(config, params):
# del config
# data_location = params.dataset_directory
# filenames_list = params.filelist_filepath
# filenames_string = tf.cast(filenames_list, tf.string)
# dataset = tf.data.TextLineDataset(filenames=filenames_string)
# dataset = dataset.map(
# functools.partial(_evaluate_parse_and_preprocess, data_location=data_location, params=params),
# num_parallel_calls=30)
# # IMPORTANT: if Nb > 1, then shape of dataset elements must be the same
# dataset = dataset.batch(params.Nb)
# def _grouping(rim, rla, pim, pla, imp, lap):
# # group dataset elements as required by estimator
# features = {
# 'rawimages': rim,
# 'proimages': pim,
# 'rawimagespaths': imp,
# 'rawlabelspaths': lap,
# }
# labels = {
# 'rawlabels': rla,
# 'prolabels': pla,
# }
# return (features, labels)
# dataset = dataset.map(_grouping, num_parallel_calls=30)
# return dataset
# def _predict_image_generator(params):
# SUPPORTED_EXTENSIONS = ['png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG', 'ppm', 'PPM']
# fnames = []
# for se in SUPPORTED_EXTENSIONS:
# fnames.extend(glob.glob(join(params.predict_dir, '*.' + se), recursive=True))
# for im_fname in fnames:
# start = datetime.now()
# im = Image.open(im_fname)
# # next line is time consuming (can take up to 400ms for im of 2 MPixels)
# im_array = np.array(im)
# # print('reading time:', datetime.now() - start)
# yield im_array, im_fname.encode('utf-8'), im_array.shape[0], im_array.shape[1]
# def _predict_preprocess(image, params):
# image.set_shape((None, None, 3))
# image = tf.image.convert_image_dtype(image, tf.float32)
# image = tf.image.resize_images(image, [params.height_feature_extractor, params.width_feature_extractor])
# proimage = from_0_1_to_m1_1(image)
# return proimage
# def predict_input(config, params):
# del config
# dataset = tf.data.Dataset.from_generator(lambda: _predict_image_generator(params),
# output_types=(tf.uint8, tf.string, tf.int32, tf.int32))
# dataset = dataset.map(lambda im, im_path, height, width: (
# im_path, im, _predict_preprocess(im, params)), num_parallel_calls=30)
# dataset = dataset.batch(params.Nb)
# dataset = dataset.prefetch(None)
# def _grouping(imp, rim, pim):
# # group dataset elements as required by estimator
# features = {'rawimages': rim,
# 'proimages': pim,
# 'rawimagespaths': imp}
# return features
# dataset = dataset.map(_grouping, num_parallel_calls=30)
# return dataset
# def add_train_input_pipeline_arguments(argparser, ctx=None):
# """
# Add arguments required by the input pipeline.
# Arguments:
# argparser: an argparse.ArgumentParser object to add arguments
# ctx: a context object with a suffix_name attribute
# """
# context_name = ctx.suffix_name if ctx else ''
# argparser.add_argument('dataset_directory' + ('_' + context_name if ctx else ''), type=str,
# default='/media/panos/data/datasets/apolloscape/',
# help='Dataset directory including final /.')
# argparser.add_argument('filelist_filepath' + ('_' + context_name if ctx else ''), type=str,
# default='/media/panos/data/datasets/apolloscape/public_image_lists/road01_ins_train.lst',
# help='List file as provided by the original dataset authors.')
# argparser.add_argument('--preserve_aspect_ratio' + ('_' + context_name if ctx else ''), action='store_true',
# help='Resizes the input images respecting the aspect ratio using cropings.')
# def add_evaluate_input_pipeline_arguments(argparser):
# """
# Add arguments required by the input pipeline.
# Arguments:
# argparser: an argparse.ArgumentParser object to add arguments
# """
# argparser.add_argument('dataset_directory', type=str, default='/media/panos/data/datasets/apolloscape/',
# help='Dataset directory including final /.')
# argparser.add_argument('filelist_filepath', type=str, default='/media/panos/data/datasets/apolloscape/public_image_lists/road01_ins_val.lst',
# help='List file as provided by the original dataset authors.')
| rla[cid] = 1.
turn_on_void = False | conditional_block |
models.py | import os
from data import *
from baselines import *
import numpy as np
import torch
import torch.nn as nn
def get_model_type(args):
# Model and data
mto = str(args.mto)
mto += '_' + str(args.K)
mto += '_' + str(args.l2)
mto += '_' + str(args.topk_att)
mto += '_' + str(args.num_att_ctx)
mto += '_' + str(args.seq_len)
mto += "_rep" if args.fr_rep else ""
mto += "_ctx" if args.fr_ctx else ""
mto += '.pt'
if args.model == "POP":
MODEL = POP
elif args.model == "REP":
MODEL = REP
elif args.model == "MF":
MODEL = MF
elif args.model == "FPMC":
MODEL = FPMC
elif args.model == "LiveRec":
MODEL = LiveRec
return os.path.join(args.model_path,mto),MODEL
class PointWiseFeedForward(nn.Module):
def __init__(self, hidden_units, dropout_rate):
super(PointWiseFeedForward, self).__init__()
self.conv1 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout1 = nn.Dropout(p=dropout_rate)
self.relu = nn.ReLU()
self.conv2 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout2 = nn.Dropout(p=dropout_rate)
def forward(self, inputs):
outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
outputs = outputs.transpose(-1, -2) # as Conv1D requires (N, C, Length)
outputs += inputs
return outputs
class Attention(nn.Module):
def __init__(self, args, num_att, num_heads, causality=False):
super(Attention, self).__init__()
self.args = args
self.causality = causality
self.attention_layernorms = nn.ModuleList()
self.attention_layers = nn.ModuleList()
self.forward_layernorms = nn.ModuleList()
self.forward_layers = nn.ModuleList()
self.last_layernorm = nn.LayerNorm(args.K, eps=1e-8)
for _ in range(num_att):
new_attn_layernorm = nn.LayerNorm(args.K, eps=1e-8)
self.attention_layernorms.append(new_attn_layernorm)
new_attn_layer = nn.MultiheadAttention(args.K,
num_heads,
0.2)
self.attention_layers.append(new_attn_layer)
new_fwd_layernorm = nn.LayerNorm(args.K, eps=1e-8)
self.forward_layernorms.append(new_fwd_layernorm)
new_fwd_layer = PointWiseFeedForward(args.K, 0.2)
self.forward_layers.append(new_fwd_layer)
def forward(self, seqs, timeline_mask=None):
if self.causality:
tl = seqs.shape[1] # time dim len for enforce causality
attention_mask = ~torch.tril(torch.ones((tl, tl),
dtype=torch.bool,
device=self.args.device))
else: attention_mask = None
if timeline_mask != None:
seqs *= ~timeline_mask.unsqueeze(-1)
for i in range(len(self.attention_layers)):
seqs = torch.transpose(seqs, 0, 1)
Q = self.attention_layernorms[i](seqs)
mha_outputs, _ = self.attention_layers[i](Q, seqs, seqs,
attn_mask=attention_mask)
seqs = Q + mha_outputs
seqs = torch.transpose(seqs, 0, 1)
seqs = self.forward_layernorms[i](seqs)
seqs = self.forward_layers[i](seqs)
if timeline_mask != None:
seqs *= ~timeline_mask.unsqueeze(-1)
return self.last_layernorm(seqs)
class LiveRec(nn.Module):
def __init__(self, args):
super(LiveRec, self).__init__()
self.args = args
self.item_embedding = nn.Embedding(args.N+1, args.K, padding_idx=0)
self.pos_emb = nn.Embedding(args.seq_len, args.K)
self.emb_dropout = nn.Dropout(p=0.2)
# Sequence encoding attention
self.att = Attention(args,
args.num_att,
args.num_heads,
causality=True)
# Availability attention
self.att_ctx = Attention(args,
args.num_att_ctx,
args.num_heads_ctx,
causality=False)
# Time interval embedding
# 24h cycles, except for the first one set to 12h
self.boundaries = torch.LongTensor([0]+list(range(77,3000+144, 144))).to(args.device)
self.rep_emb = nn.Embedding(len(self.boundaries)+2, args.K, padding_idx=0)
def forward(self, log_seqs):
seqs = self.item_embedding(log_seqs)
seqs *= self.item_embedding.embedding_dim ** 0.5
positions = np.tile(np.array(range(log_seqs.shape[1])), [log_seqs.shape[0], 1])
seqs += self.pos_emb(torch.LongTensor(positions).to(self.args.device))
seqs = self.emb_dropout(seqs)
timeline_mask = (log_seqs == 0).to(self.args.device)
feats = self.att(seqs, timeline_mask)
return feats
def predict(self,feats,inputs,items,ctx,data):
if ctx!=None: i_embs = ctx
else: self.item_embedding(items)
return (feats * i_embs).sum(dim=-1)
def compute_rank(self,data,store,k=10):
inputs = data[:,:,3] # inputs
pos = data[:,:,5] # targets
xtsy = data[:,:,6] # targets ts
feats = self(inputs)
# Add time interval embeddings
if self.args.fr_ctx:
ctx,batch_inds = self.get_ctx_att(data,feats)
# identify repeated interactions in the batch
mask = torch.ones_like(pos[:,-1]).type(torch.bool)
for b in range(pos.shape[0]):
avt = pos[b,:-1]
avt = avt[avt!=0]
mask[b] = pos[b,-1] in avt
store['ratio'] += [float(pos[b,-1] in avt)]
for b in range(inputs.shape[0]):
step = xtsy[b,-1].item()
av = torch.LongTensor(self.args.ts[step]).to(self.args.device)
av_embs = self.item_embedding(av)
if self.args.fr_ctx:
ctx_expand = torch.zeros(self.args.av_tens.shape[1],self.args.K,device=self.args.device)
ctx_expand[batch_inds[b,-1,:],:] = ctx[b,-1,:,:]
scores = (feats[b,-1,:] * ctx_expand).sum(-1)
scores = scores[:len(av)]
else:
scores = (feats[b,-1,:] * av_embs).sum(-1)
iseq = pos[b,-1] == av
idx = torch.where(iseq)[0]
rank = torch.where(torch.argsort(scores, descending=True)==idx)[0].item()
if mask[b]: # rep
store['rrep'] += [rank]
else:
store['rnew'] += [rank]
store['rall'] += [rank]
return store
def get_ctx_att(self,data,feats,neg=None):
if not self.args.fr_ctx: |
inputs,pos,xtsy = data[:,:,3],data[:,:,5],data[:,:,6]
# unbatch indices
ci = torch.nonzero(inputs, as_tuple=False)
flat_xtsy = xtsy[ci[:,0],ci[:,1]]
av = self.args.av_tens[flat_xtsy,:]
av_embs = self.item_embedding(av)
# repeat consumption: time interval embeddings
if self.args.fr_rep:
av_rep_batch = self.get_av_rep(data)
av_rep_flat = av_rep_batch[ci[:,0],ci[:,1]]
rep_enc = self.rep_emb(av_rep_flat)
av_embs += rep_enc
flat_feats = feats[ci[:,0],ci[:,1],:]
flat_feats = flat_feats.unsqueeze(1).expand(flat_feats.shape[0],
self.args.av_tens.shape[-1],
flat_feats.shape[1])
scores = (av_embs * flat_feats).sum(-1)
inds = scores.topk(self.args.topk_att,dim=1).indices
# embed selected items
seqs = torch.gather(av_embs, 1, inds.unsqueeze(2) \
.expand(-1,-1,self.args.K))
seqs = self.att_ctx(seqs)
def expand_att(items):
av_pos = torch.where(av==items[ci[:,0],ci[:,1]].unsqueeze(1))[1]
is_in = torch.any(inds == av_pos.unsqueeze(1),1)
att_feats = torch.zeros(av.shape[0],self.args.K).to(self.args.device)
att_feats[is_in,:] = seqs[is_in,torch.where(av_pos.unsqueeze(1) == inds)[1],:]
out = torch.zeros(inputs.shape[0],inputs.shape[1],self.args.K).to(self.args.device)
out[ci[:,0],ci[:,1],:] = att_feats
return out
# training
if pos != None and neg != None:
return expand_att(pos),expand_att(neg)
# testing
else:
out = torch.zeros(inputs.shape[0],inputs.shape[1],seqs.shape[1],self.args.K).to(self.args.device)
out[ci[:,0],ci[:,1],:] = seqs
batch_inds = torch.zeros(inputs.shape[0],inputs.shape[1],inds.shape[1],dtype=torch.long).to(self.args.device)
batch_inds[ci[:,0],ci[:,1],:] = inds
return out,batch_inds
def train_step(self, data, use_ctx=False): # for training
inputs,pos = data[:,:,3],data[:,:,5]
neg = sample_negs(data,self.args).to(self.args.device)
feats = self(inputs)
ctx_pos,ctx_neg = None,None
if self.args.fr_ctx:
ctx_pos,ctx_neg = self.get_ctx_att(data,feats,neg)
pos_logits = self.predict(feats,inputs,pos,ctx_pos,data)
neg_logits = self.predict(feats,inputs,neg,ctx_neg,data)
loss = (-torch.log(pos_logits[inputs!=0].sigmoid()+1e-24)
-torch.log(1-neg_logits[inputs!=0].sigmoid()+1e-24)).sum()
return loss
def get_av_rep(self,data):
bs = data.shape[0]
inputs = data[:,:,3] # inputs
xtsb = data[:,:,2] # inputs ts
xtsy = data[:,:,6] # targets ts
av_batch = self.args.av_tens[xtsy.view(-1),:]
av_batch = av_batch.view(xtsy.shape[0],xtsy.shape[1],-1)
av_batch *= (xtsy!=0).unsqueeze(2) # masking pad inputs
av_batch = av_batch.to(self.args.device)
mask_caus = 1-torch.tril(torch.ones(self.args.seq_len,self.args.seq_len),diagonal=-1)
mask_caus = mask_caus.unsqueeze(0).unsqueeze(3)
mask_caus = mask_caus.expand(bs,-1,-1,self.args.av_tens.shape[-1])
mask_caus = mask_caus.type(torch.bool).to(self.args.device)
tile = torch.arange(self.args.seq_len).unsqueeze(0).repeat(bs,1).to(self.args.device)
bm = (inputs.unsqueeze(2).unsqueeze(3)==av_batch.unsqueeze(1).expand(-1,self.args.seq_len,-1,-1))
bm &= mask_caus
# **WARNING** this is a hacky way to get the last non-zero element in the sequence.
# It works with pytorch 1.8.1 but might break in the future.
sm = bm.type(torch.int).argmax(1)
sm = torch.any(bm,1) * sm
sm = (torch.gather(xtsy, 1, tile).unsqueeze(2) -
torch.gather(xtsb.unsqueeze(2).expand(-1,-1,self.args.av_tens.shape[-1]), 1, sm))
sm = torch.bucketize(sm, self.boundaries)+1
sm = torch.any(bm,1) * sm
sm *= av_batch!=0
sm *= inputs.unsqueeze(2)!=0
return sm
| return None | conditional_block |
models.py | import os
from data import *
from baselines import *
import numpy as np
import torch
import torch.nn as nn
def get_model_type(args):
# Model and data
mto = str(args.mto)
mto += '_' + str(args.K)
mto += '_' + str(args.l2)
mto += '_' + str(args.topk_att)
mto += '_' + str(args.num_att_ctx)
mto += '_' + str(args.seq_len)
mto += "_rep" if args.fr_rep else ""
mto += "_ctx" if args.fr_ctx else ""
mto += '.pt'
if args.model == "POP":
MODEL = POP
elif args.model == "REP":
MODEL = REP
elif args.model == "MF":
MODEL = MF
elif args.model == "FPMC":
MODEL = FPMC
elif args.model == "LiveRec":
MODEL = LiveRec
return os.path.join(args.model_path,mto),MODEL
class PointWiseFeedForward(nn.Module):
def __init__(self, hidden_units, dropout_rate):
super(PointWiseFeedForward, self).__init__()
self.conv1 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout1 = nn.Dropout(p=dropout_rate)
self.relu = nn.ReLU()
self.conv2 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout2 = nn.Dropout(p=dropout_rate)
def forward(self, inputs):
outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
outputs = outputs.transpose(-1, -2) # as Conv1D requires (N, C, Length)
outputs += inputs
return outputs
class Attention(nn.Module):
def __init__(self, args, num_att, num_heads, causality=False):
super(Attention, self).__init__()
self.args = args
self.causality = causality
self.attention_layernorms = nn.ModuleList()
self.attention_layers = nn.ModuleList()
self.forward_layernorms = nn.ModuleList()
self.forward_layers = nn.ModuleList()
self.last_layernorm = nn.LayerNorm(args.K, eps=1e-8)
for _ in range(num_att):
new_attn_layernorm = nn.LayerNorm(args.K, eps=1e-8)
self.attention_layernorms.append(new_attn_layernorm)
new_attn_layer = nn.MultiheadAttention(args.K,
num_heads,
0.2)
self.attention_layers.append(new_attn_layer)
new_fwd_layernorm = nn.LayerNorm(args.K, eps=1e-8)
self.forward_layernorms.append(new_fwd_layernorm)
new_fwd_layer = PointWiseFeedForward(args.K, 0.2)
self.forward_layers.append(new_fwd_layer)
def forward(self, seqs, timeline_mask=None):
if self.causality:
tl = seqs.shape[1] # time dim len for enforce causality
attention_mask = ~torch.tril(torch.ones((tl, tl),
dtype=torch.bool,
device=self.args.device))
else: attention_mask = None
if timeline_mask != None:
seqs *= ~timeline_mask.unsqueeze(-1)
for i in range(len(self.attention_layers)):
seqs = torch.transpose(seqs, 0, 1)
Q = self.attention_layernorms[i](seqs)
mha_outputs, _ = self.attention_layers[i](Q, seqs, seqs,
attn_mask=attention_mask)
seqs = Q + mha_outputs
seqs = torch.transpose(seqs, 0, 1)
seqs = self.forward_layernorms[i](seqs)
seqs = self.forward_layers[i](seqs)
if timeline_mask != None:
seqs *= ~timeline_mask.unsqueeze(-1)
return self.last_layernorm(seqs)
class LiveRec(nn.Module):
def __init__(self, args):
super(LiveRec, self).__init__()
self.args = args
self.item_embedding = nn.Embedding(args.N+1, args.K, padding_idx=0)
self.pos_emb = nn.Embedding(args.seq_len, args.K)
self.emb_dropout = nn.Dropout(p=0.2)
# Sequence encoding attention
self.att = Attention(args,
args.num_att,
args.num_heads,
causality=True)
# Availability attention
self.att_ctx = Attention(args,
args.num_att_ctx,
args.num_heads_ctx,
causality=False)
# Time interval embedding
# 24h cycles, except for the first one set to 12h
self.boundaries = torch.LongTensor([0]+list(range(77,3000+144, 144))).to(args.device)
self.rep_emb = nn.Embedding(len(self.boundaries)+2, args.K, padding_idx=0)
def forward(self, log_seqs):
seqs = self.item_embedding(log_seqs)
seqs *= self.item_embedding.embedding_dim ** 0.5
positions = np.tile(np.array(range(log_seqs.shape[1])), [log_seqs.shape[0], 1])
seqs += self.pos_emb(torch.LongTensor(positions).to(self.args.device))
seqs = self.emb_dropout(seqs)
timeline_mask = (log_seqs == 0).to(self.args.device)
feats = self.att(seqs, timeline_mask)
return feats
def predict(self,feats,inputs,items,ctx,data):
if ctx!=None: i_embs = ctx
else: self.item_embedding(items)
return (feats * i_embs).sum(dim=-1)
def compute_rank(self,data,store,k=10):
|
def get_ctx_att(self,data,feats,neg=None):
if not self.args.fr_ctx: return None
inputs,pos,xtsy = data[:,:,3],data[:,:,5],data[:,:,6]
# unbatch indices
ci = torch.nonzero(inputs, as_tuple=False)
flat_xtsy = xtsy[ci[:,0],ci[:,1]]
av = self.args.av_tens[flat_xtsy,:]
av_embs = self.item_embedding(av)
# repeat consumption: time interval embeddings
if self.args.fr_rep:
av_rep_batch = self.get_av_rep(data)
av_rep_flat = av_rep_batch[ci[:,0],ci[:,1]]
rep_enc = self.rep_emb(av_rep_flat)
av_embs += rep_enc
flat_feats = feats[ci[:,0],ci[:,1],:]
flat_feats = flat_feats.unsqueeze(1).expand(flat_feats.shape[0],
self.args.av_tens.shape[-1],
flat_feats.shape[1])
scores = (av_embs * flat_feats).sum(-1)
inds = scores.topk(self.args.topk_att,dim=1).indices
# embed selected items
seqs = torch.gather(av_embs, 1, inds.unsqueeze(2) \
.expand(-1,-1,self.args.K))
seqs = self.att_ctx(seqs)
def expand_att(items):
av_pos = torch.where(av==items[ci[:,0],ci[:,1]].unsqueeze(1))[1]
is_in = torch.any(inds == av_pos.unsqueeze(1),1)
att_feats = torch.zeros(av.shape[0],self.args.K).to(self.args.device)
att_feats[is_in,:] = seqs[is_in,torch.where(av_pos.unsqueeze(1) == inds)[1],:]
out = torch.zeros(inputs.shape[0],inputs.shape[1],self.args.K).to(self.args.device)
out[ci[:,0],ci[:,1],:] = att_feats
return out
# training
if pos != None and neg != None:
return expand_att(pos),expand_att(neg)
# testing
else:
out = torch.zeros(inputs.shape[0],inputs.shape[1],seqs.shape[1],self.args.K).to(self.args.device)
out[ci[:,0],ci[:,1],:] = seqs
batch_inds = torch.zeros(inputs.shape[0],inputs.shape[1],inds.shape[1],dtype=torch.long).to(self.args.device)
batch_inds[ci[:,0],ci[:,1],:] = inds
return out,batch_inds
def train_step(self, data, use_ctx=False): # for training
inputs,pos = data[:,:,3],data[:,:,5]
neg = sample_negs(data,self.args).to(self.args.device)
feats = self(inputs)
ctx_pos,ctx_neg = None,None
if self.args.fr_ctx:
ctx_pos,ctx_neg = self.get_ctx_att(data,feats,neg)
pos_logits = self.predict(feats,inputs,pos,ctx_pos,data)
neg_logits = self.predict(feats,inputs,neg,ctx_neg,data)
loss = (-torch.log(pos_logits[inputs!=0].sigmoid()+1e-24)
-torch.log(1-neg_logits[inputs!=0].sigmoid()+1e-24)).sum()
return loss
def get_av_rep(self,data):
bs = data.shape[0]
inputs = data[:,:,3] # inputs
xtsb = data[:,:,2] # inputs ts
xtsy = data[:,:,6] # targets ts
av_batch = self.args.av_tens[xtsy.view(-1),:]
av_batch = av_batch.view(xtsy.shape[0],xtsy.shape[1],-1)
av_batch *= (xtsy!=0).unsqueeze(2) # masking pad inputs
av_batch = av_batch.to(self.args.device)
mask_caus = 1-torch.tril(torch.ones(self.args.seq_len,self.args.seq_len),diagonal=-1)
mask_caus = mask_caus.unsqueeze(0).unsqueeze(3)
mask_caus = mask_caus.expand(bs,-1,-1,self.args.av_tens.shape[-1])
mask_caus = mask_caus.type(torch.bool).to(self.args.device)
tile = torch.arange(self.args.seq_len).unsqueeze(0).repeat(bs,1).to(self.args.device)
bm = (inputs.unsqueeze(2).unsqueeze(3)==av_batch.unsqueeze(1).expand(-1,self.args.seq_len,-1,-1))
bm &= mask_caus
# **WARNING** this is a hacky way to get the last non-zero element in the sequence.
# It works with pytorch 1.8.1 but might break in the future.
sm = bm.type(torch.int).argmax(1)
sm = torch.any(bm,1) * sm
sm = (torch.gather(xtsy, 1, tile).unsqueeze(2) -
torch.gather(xtsb.unsqueeze(2).expand(-1,-1,self.args.av_tens.shape[-1]), 1, sm))
sm = torch.bucketize(sm, self.boundaries)+1
sm = torch.any(bm,1) * sm
sm *= av_batch!=0
sm *= inputs.unsqueeze(2)!=0
return sm
| inputs = data[:,:,3] # inputs
pos = data[:,:,5] # targets
xtsy = data[:,:,6] # targets ts
feats = self(inputs)
# Add time interval embeddings
if self.args.fr_ctx:
ctx,batch_inds = self.get_ctx_att(data,feats)
# identify repeated interactions in the batch
mask = torch.ones_like(pos[:,-1]).type(torch.bool)
for b in range(pos.shape[0]):
avt = pos[b,:-1]
avt = avt[avt!=0]
mask[b] = pos[b,-1] in avt
store['ratio'] += [float(pos[b,-1] in avt)]
for b in range(inputs.shape[0]):
step = xtsy[b,-1].item()
av = torch.LongTensor(self.args.ts[step]).to(self.args.device)
av_embs = self.item_embedding(av)
if self.args.fr_ctx:
ctx_expand = torch.zeros(self.args.av_tens.shape[1],self.args.K,device=self.args.device)
ctx_expand[batch_inds[b,-1,:],:] = ctx[b,-1,:,:]
scores = (feats[b,-1,:] * ctx_expand).sum(-1)
scores = scores[:len(av)]
else:
scores = (feats[b,-1,:] * av_embs).sum(-1)
iseq = pos[b,-1] == av
idx = torch.where(iseq)[0]
rank = torch.where(torch.argsort(scores, descending=True)==idx)[0].item()
if mask[b]: # rep
store['rrep'] += [rank]
else:
store['rnew'] += [rank]
store['rall'] += [rank]
return store | identifier_body |
models.py | import os
from data import *
from baselines import *
import numpy as np
import torch
import torch.nn as nn
def get_model_type(args):
# Model and data
mto = str(args.mto)
mto += '_' + str(args.K)
mto += '_' + str(args.l2)
mto += '_' + str(args.topk_att)
mto += '_' + str(args.num_att_ctx)
mto += '_' + str(args.seq_len)
mto += "_rep" if args.fr_rep else ""
mto += "_ctx" if args.fr_ctx else ""
mto += '.pt'
if args.model == "POP":
MODEL = POP
elif args.model == "REP":
MODEL = REP
elif args.model == "MF":
MODEL = MF
elif args.model == "FPMC":
MODEL = FPMC
elif args.model == "LiveRec":
MODEL = LiveRec
return os.path.join(args.model_path,mto),MODEL
class PointWiseFeedForward(nn.Module):
def __init__(self, hidden_units, dropout_rate):
super(PointWiseFeedForward, self).__init__()
self.conv1 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout1 = nn.Dropout(p=dropout_rate)
self.relu = nn.ReLU()
self.conv2 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout2 = nn.Dropout(p=dropout_rate)
def forward(self, inputs):
outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
outputs = outputs.transpose(-1, -2) # as Conv1D requires (N, C, Length)
outputs += inputs
return outputs
class Attention(nn.Module):
def __init__(self, args, num_att, num_heads, causality=False):
super(Attention, self).__init__()
self.args = args
self.causality = causality
self.attention_layernorms = nn.ModuleList()
self.attention_layers = nn.ModuleList()
self.forward_layernorms = nn.ModuleList()
self.forward_layers = nn.ModuleList()
self.last_layernorm = nn.LayerNorm(args.K, eps=1e-8)
for _ in range(num_att):
new_attn_layernorm = nn.LayerNorm(args.K, eps=1e-8)
self.attention_layernorms.append(new_attn_layernorm)
new_attn_layer = nn.MultiheadAttention(args.K,
num_heads,
0.2)
self.attention_layers.append(new_attn_layer)
new_fwd_layernorm = nn.LayerNorm(args.K, eps=1e-8)
self.forward_layernorms.append(new_fwd_layernorm)
new_fwd_layer = PointWiseFeedForward(args.K, 0.2)
self.forward_layers.append(new_fwd_layer)
def forward(self, seqs, timeline_mask=None):
if self.causality:
tl = seqs.shape[1] # time dim len for enforce causality
attention_mask = ~torch.tril(torch.ones((tl, tl),
dtype=torch.bool,
device=self.args.device))
else: attention_mask = None
if timeline_mask != None:
seqs *= ~timeline_mask.unsqueeze(-1)
for i in range(len(self.attention_layers)):
seqs = torch.transpose(seqs, 0, 1)
Q = self.attention_layernorms[i](seqs)
mha_outputs, _ = self.attention_layers[i](Q, seqs, seqs,
attn_mask=attention_mask)
seqs = Q + mha_outputs
seqs = torch.transpose(seqs, 0, 1)
seqs = self.forward_layernorms[i](seqs)
seqs = self.forward_layers[i](seqs)
if timeline_mask != None:
seqs *= ~timeline_mask.unsqueeze(-1)
return self.last_layernorm(seqs)
class LiveRec(nn.Module):
def __init__(self, args):
super(LiveRec, self).__init__()
self.args = args
self.item_embedding = nn.Embedding(args.N+1, args.K, padding_idx=0)
self.pos_emb = nn.Embedding(args.seq_len, args.K)
self.emb_dropout = nn.Dropout(p=0.2)
# Sequence encoding attention
self.att = Attention(args,
args.num_att,
args.num_heads,
causality=True)
# Availability attention
self.att_ctx = Attention(args,
args.num_att_ctx,
args.num_heads_ctx,
causality=False)
# Time interval embedding
# 24h cycles, except for the first one set to 12h
self.boundaries = torch.LongTensor([0]+list(range(77,3000+144, 144))).to(args.device)
self.rep_emb = nn.Embedding(len(self.boundaries)+2, args.K, padding_idx=0)
def forward(self, log_seqs):
seqs = self.item_embedding(log_seqs)
seqs *= self.item_embedding.embedding_dim ** 0.5
positions = np.tile(np.array(range(log_seqs.shape[1])), [log_seqs.shape[0], 1])
seqs += self.pos_emb(torch.LongTensor(positions).to(self.args.device))
seqs = self.emb_dropout(seqs)
timeline_mask = (log_seqs == 0).to(self.args.device)
feats = self.att(seqs, timeline_mask)
return feats
def predict(self,feats,inputs,items,ctx,data):
if ctx!=None: i_embs = ctx
else: self.item_embedding(items)
return (feats * i_embs).sum(dim=-1)
def compute_rank(self,data,store,k=10):
inputs = data[:,:,3] # inputs
pos = data[:,:,5] # targets
xtsy = data[:,:,6] # targets ts
feats = self(inputs)
# Add time interval embeddings
if self.args.fr_ctx:
ctx,batch_inds = self.get_ctx_att(data,feats)
# identify repeated interactions in the batch
mask = torch.ones_like(pos[:,-1]).type(torch.bool)
for b in range(pos.shape[0]):
avt = pos[b,:-1]
avt = avt[avt!=0]
mask[b] = pos[b,-1] in avt
store['ratio'] += [float(pos[b,-1] in avt)]
for b in range(inputs.shape[0]):
step = xtsy[b,-1].item()
av = torch.LongTensor(self.args.ts[step]).to(self.args.device)
av_embs = self.item_embedding(av)
if self.args.fr_ctx:
ctx_expand = torch.zeros(self.args.av_tens.shape[1],self.args.K,device=self.args.device)
ctx_expand[batch_inds[b,-1,:],:] = ctx[b,-1,:,:]
scores = (feats[b,-1,:] * ctx_expand).sum(-1)
scores = scores[:len(av)]
else:
scores = (feats[b,-1,:] * av_embs).sum(-1)
iseq = pos[b,-1] == av
idx = torch.where(iseq)[0]
rank = torch.where(torch.argsort(scores, descending=True)==idx)[0].item()
if mask[b]: # rep
store['rrep'] += [rank]
else:
store['rnew'] += [rank]
store['rall'] += [rank]
return store
def get_ctx_att(self,data,feats,neg=None):
if not self.args.fr_ctx: return None
inputs,pos,xtsy = data[:,:,3],data[:,:,5],data[:,:,6]
# unbatch indices
ci = torch.nonzero(inputs, as_tuple=False)
flat_xtsy = xtsy[ci[:,0],ci[:,1]]
av = self.args.av_tens[flat_xtsy,:]
av_embs = self.item_embedding(av)
# repeat consumption: time interval embeddings
if self.args.fr_rep:
av_rep_batch = self.get_av_rep(data)
av_rep_flat = av_rep_batch[ci[:,0],ci[:,1]]
rep_enc = self.rep_emb(av_rep_flat)
av_embs += rep_enc
flat_feats = feats[ci[:,0],ci[:,1],:]
flat_feats = flat_feats.unsqueeze(1).expand(flat_feats.shape[0],
self.args.av_tens.shape[-1],
flat_feats.shape[1])
scores = (av_embs * flat_feats).sum(-1)
inds = scores.topk(self.args.topk_att,dim=1).indices
# embed selected items
seqs = torch.gather(av_embs, 1, inds.unsqueeze(2) \
.expand(-1,-1,self.args.K))
seqs = self.att_ctx(seqs)
def expand_att(items):
av_pos = torch.where(av==items[ci[:,0],ci[:,1]].unsqueeze(1))[1]
is_in = torch.any(inds == av_pos.unsqueeze(1),1)
att_feats = torch.zeros(av.shape[0],self.args.K).to(self.args.device)
att_feats[is_in,:] = seqs[is_in,torch.where(av_pos.unsqueeze(1) == inds)[1],:]
out = torch.zeros(inputs.shape[0],inputs.shape[1],self.args.K).to(self.args.device)
out[ci[:,0],ci[:,1],:] = att_feats
return out
# training
if pos != None and neg != None:
return expand_att(pos),expand_att(neg)
# testing
else:
out = torch.zeros(inputs.shape[0],inputs.shape[1],seqs.shape[1],self.args.K).to(self.args.device)
out[ci[:,0],ci[:,1],:] = seqs
batch_inds = torch.zeros(inputs.shape[0],inputs.shape[1],inds.shape[1],dtype=torch.long).to(self.args.device)
batch_inds[ci[:,0],ci[:,1],:] = inds
return out,batch_inds
def | (self, data, use_ctx=False): # for training
inputs,pos = data[:,:,3],data[:,:,5]
neg = sample_negs(data,self.args).to(self.args.device)
feats = self(inputs)
ctx_pos,ctx_neg = None,None
if self.args.fr_ctx:
ctx_pos,ctx_neg = self.get_ctx_att(data,feats,neg)
pos_logits = self.predict(feats,inputs,pos,ctx_pos,data)
neg_logits = self.predict(feats,inputs,neg,ctx_neg,data)
loss = (-torch.log(pos_logits[inputs!=0].sigmoid()+1e-24)
-torch.log(1-neg_logits[inputs!=0].sigmoid()+1e-24)).sum()
return loss
def get_av_rep(self,data):
bs = data.shape[0]
inputs = data[:,:,3] # inputs
xtsb = data[:,:,2] # inputs ts
xtsy = data[:,:,6] # targets ts
av_batch = self.args.av_tens[xtsy.view(-1),:]
av_batch = av_batch.view(xtsy.shape[0],xtsy.shape[1],-1)
av_batch *= (xtsy!=0).unsqueeze(2) # masking pad inputs
av_batch = av_batch.to(self.args.device)
mask_caus = 1-torch.tril(torch.ones(self.args.seq_len,self.args.seq_len),diagonal=-1)
mask_caus = mask_caus.unsqueeze(0).unsqueeze(3)
mask_caus = mask_caus.expand(bs,-1,-1,self.args.av_tens.shape[-1])
mask_caus = mask_caus.type(torch.bool).to(self.args.device)
tile = torch.arange(self.args.seq_len).unsqueeze(0).repeat(bs,1).to(self.args.device)
bm = (inputs.unsqueeze(2).unsqueeze(3)==av_batch.unsqueeze(1).expand(-1,self.args.seq_len,-1,-1))
bm &= mask_caus
# **WARNING** this is a hacky way to get the last non-zero element in the sequence.
# It works with pytorch 1.8.1 but might break in the future.
sm = bm.type(torch.int).argmax(1)
sm = torch.any(bm,1) * sm
sm = (torch.gather(xtsy, 1, tile).unsqueeze(2) -
torch.gather(xtsb.unsqueeze(2).expand(-1,-1,self.args.av_tens.shape[-1]), 1, sm))
sm = torch.bucketize(sm, self.boundaries)+1
sm = torch.any(bm,1) * sm
sm *= av_batch!=0
sm *= inputs.unsqueeze(2)!=0
return sm
| train_step | identifier_name |
models.py | import os
from data import *
from baselines import *
import numpy as np
import torch
import torch.nn as nn
def get_model_type(args):
# Model and data
mto = str(args.mto)
mto += '_' + str(args.K)
mto += '_' + str(args.l2)
mto += '_' + str(args.topk_att)
mto += '_' + str(args.num_att_ctx)
mto += '_' + str(args.seq_len)
mto += "_rep" if args.fr_rep else ""
mto += "_ctx" if args.fr_ctx else ""
mto += '.pt'
if args.model == "POP":
MODEL = POP
elif args.model == "REP":
MODEL = REP
elif args.model == "MF":
MODEL = MF
elif args.model == "FPMC":
MODEL = FPMC
elif args.model == "LiveRec":
MODEL = LiveRec
return os.path.join(args.model_path,mto),MODEL
class PointWiseFeedForward(nn.Module):
def __init__(self, hidden_units, dropout_rate):
super(PointWiseFeedForward, self).__init__()
self.conv1 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout1 = nn.Dropout(p=dropout_rate)
self.relu = nn.ReLU()
self.conv2 = nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
self.dropout2 = nn.Dropout(p=dropout_rate)
def forward(self, inputs):
outputs = self.dropout2(self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2))))))
outputs = outputs.transpose(-1, -2) # as Conv1D requires (N, C, Length)
outputs += inputs
return outputs
class Attention(nn.Module):
def __init__(self, args, num_att, num_heads, causality=False):
super(Attention, self).__init__()
self.args = args
self.causality = causality
self.attention_layernorms = nn.ModuleList()
self.attention_layers = nn.ModuleList()
self.forward_layernorms = nn.ModuleList()
self.forward_layers = nn.ModuleList()
self.last_layernorm = nn.LayerNorm(args.K, eps=1e-8)
for _ in range(num_att):
new_attn_layernorm = nn.LayerNorm(args.K, eps=1e-8)
self.attention_layernorms.append(new_attn_layernorm)
new_attn_layer = nn.MultiheadAttention(args.K,
num_heads,
0.2)
self.attention_layers.append(new_attn_layer)
new_fwd_layernorm = nn.LayerNorm(args.K, eps=1e-8)
self.forward_layernorms.append(new_fwd_layernorm)
new_fwd_layer = PointWiseFeedForward(args.K, 0.2)
self.forward_layers.append(new_fwd_layer)
def forward(self, seqs, timeline_mask=None):
if self.causality:
tl = seqs.shape[1] # time dim len for enforce causality
attention_mask = ~torch.tril(torch.ones((tl, tl),
dtype=torch.bool,
device=self.args.device))
else: attention_mask = None
if timeline_mask != None:
seqs *= ~timeline_mask.unsqueeze(-1)
for i in range(len(self.attention_layers)):
seqs = torch.transpose(seqs, 0, 1)
Q = self.attention_layernorms[i](seqs)
mha_outputs, _ = self.attention_layers[i](Q, seqs, seqs,
attn_mask=attention_mask)
seqs = Q + mha_outputs
seqs = torch.transpose(seqs, 0, 1)
seqs = self.forward_layernorms[i](seqs)
seqs = self.forward_layers[i](seqs)
if timeline_mask != None:
seqs *= ~timeline_mask.unsqueeze(-1)
return self.last_layernorm(seqs)
class LiveRec(nn.Module):
def __init__(self, args):
super(LiveRec, self).__init__()
self.args = args
self.item_embedding = nn.Embedding(args.N+1, args.K, padding_idx=0)
self.pos_emb = nn.Embedding(args.seq_len, args.K)
self.emb_dropout = nn.Dropout(p=0.2)
# Sequence encoding attention
self.att = Attention(args,
args.num_att,
args.num_heads,
causality=True)
# Availability attention
self.att_ctx = Attention(args,
args.num_att_ctx,
args.num_heads_ctx,
causality=False)
# Time interval embedding
# 24h cycles, except for the first one set to 12h
self.boundaries = torch.LongTensor([0]+list(range(77,3000+144, 144))).to(args.device)
self.rep_emb = nn.Embedding(len(self.boundaries)+2, args.K, padding_idx=0)
def forward(self, log_seqs):
seqs = self.item_embedding(log_seqs)
seqs *= self.item_embedding.embedding_dim ** 0.5
positions = np.tile(np.array(range(log_seqs.shape[1])), [log_seqs.shape[0], 1])
seqs += self.pos_emb(torch.LongTensor(positions).to(self.args.device))
seqs = self.emb_dropout(seqs)
timeline_mask = (log_seqs == 0).to(self.args.device)
feats = self.att(seqs, timeline_mask)
return feats
def predict(self,feats,inputs,items,ctx,data):
if ctx!=None: i_embs = ctx
else: self.item_embedding(items)
return (feats * i_embs).sum(dim=-1)
def compute_rank(self,data,store,k=10):
inputs = data[:,:,3] # inputs
pos = data[:,:,5] # targets
xtsy = data[:,:,6] # targets ts
feats = self(inputs)
# Add time interval embeddings
if self.args.fr_ctx:
ctx,batch_inds = self.get_ctx_att(data,feats)
# identify repeated interactions in the batch
mask = torch.ones_like(pos[:,-1]).type(torch.bool)
for b in range(pos.shape[0]):
avt = pos[b,:-1]
avt = avt[avt!=0]
mask[b] = pos[b,-1] in avt
store['ratio'] += [float(pos[b,-1] in avt)]
for b in range(inputs.shape[0]):
step = xtsy[b,-1].item()
av = torch.LongTensor(self.args.ts[step]).to(self.args.device)
av_embs = self.item_embedding(av)
if self.args.fr_ctx:
ctx_expand = torch.zeros(self.args.av_tens.shape[1],self.args.K,device=self.args.device)
ctx_expand[batch_inds[b,-1,:],:] = ctx[b,-1,:,:]
scores = (feats[b,-1,:] * ctx_expand).sum(-1)
scores = scores[:len(av)]
else:
scores = (feats[b,-1,:] * av_embs).sum(-1)
iseq = pos[b,-1] == av
idx = torch.where(iseq)[0]
rank = torch.where(torch.argsort(scores, descending=True)==idx)[0].item()
if mask[b]: # rep
store['rrep'] += [rank]
else:
store['rnew'] += [rank]
store['rall'] += [rank]
return store
def get_ctx_att(self,data,feats,neg=None):
if not self.args.fr_ctx: return None
inputs,pos,xtsy = data[:,:,3],data[:,:,5],data[:,:,6]
# unbatch indices
ci = torch.nonzero(inputs, as_tuple=False)
flat_xtsy = xtsy[ci[:,0],ci[:,1]]
av = self.args.av_tens[flat_xtsy,:]
av_embs = self.item_embedding(av)
# repeat consumption: time interval embeddings
if self.args.fr_rep:
av_rep_batch = self.get_av_rep(data)
av_rep_flat = av_rep_batch[ci[:,0],ci[:,1]]
rep_enc = self.rep_emb(av_rep_flat)
av_embs += rep_enc
flat_feats = feats[ci[:,0],ci[:,1],:]
flat_feats = flat_feats.unsqueeze(1).expand(flat_feats.shape[0],
self.args.av_tens.shape[-1],
flat_feats.shape[1])
scores = (av_embs * flat_feats).sum(-1)
inds = scores.topk(self.args.topk_att,dim=1).indices
# embed selected items
seqs = torch.gather(av_embs, 1, inds.unsqueeze(2) \
.expand(-1,-1,self.args.K))
seqs = self.att_ctx(seqs)
def expand_att(items):
av_pos = torch.where(av==items[ci[:,0],ci[:,1]].unsqueeze(1))[1]
is_in = torch.any(inds == av_pos.unsqueeze(1),1)
att_feats = torch.zeros(av.shape[0],self.args.K).to(self.args.device)
att_feats[is_in,:] = seqs[is_in,torch.where(av_pos.unsqueeze(1) == inds)[1],:]
out = torch.zeros(inputs.shape[0],inputs.shape[1],self.args.K).to(self.args.device)
out[ci[:,0],ci[:,1],:] = att_feats
return out
# training
if pos != None and neg != None:
return expand_att(pos),expand_att(neg)
# testing
else:
out = torch.zeros(inputs.shape[0],inputs.shape[1],seqs.shape[1],self.args.K).to(self.args.device)
out[ci[:,0],ci[:,1],:] = seqs
batch_inds = torch.zeros(inputs.shape[0],inputs.shape[1],inds.shape[1],dtype=torch.long).to(self.args.device)
batch_inds[ci[:,0],ci[:,1],:] = inds
return out,batch_inds
def train_step(self, data, use_ctx=False): # for training
inputs,pos = data[:,:,3],data[:,:,5]
neg = sample_negs(data,self.args).to(self.args.device)
feats = self(inputs)
ctx_pos,ctx_neg = None,None
if self.args.fr_ctx:
ctx_pos,ctx_neg = self.get_ctx_att(data,feats,neg)
pos_logits = self.predict(feats,inputs,pos,ctx_pos,data)
neg_logits = self.predict(feats,inputs,neg,ctx_neg,data)
loss = (-torch.log(pos_logits[inputs!=0].sigmoid()+1e-24)
-torch.log(1-neg_logits[inputs!=0].sigmoid()+1e-24)).sum()
return loss
|
av_batch = self.args.av_tens[xtsy.view(-1),:]
av_batch = av_batch.view(xtsy.shape[0],xtsy.shape[1],-1)
av_batch *= (xtsy!=0).unsqueeze(2) # masking pad inputs
av_batch = av_batch.to(self.args.device)
mask_caus = 1-torch.tril(torch.ones(self.args.seq_len,self.args.seq_len),diagonal=-1)
mask_caus = mask_caus.unsqueeze(0).unsqueeze(3)
mask_caus = mask_caus.expand(bs,-1,-1,self.args.av_tens.shape[-1])
mask_caus = mask_caus.type(torch.bool).to(self.args.device)
tile = torch.arange(self.args.seq_len).unsqueeze(0).repeat(bs,1).to(self.args.device)
bm = (inputs.unsqueeze(2).unsqueeze(3)==av_batch.unsqueeze(1).expand(-1,self.args.seq_len,-1,-1))
bm &= mask_caus
# **WARNING** this is a hacky way to get the last non-zero element in the sequence.
# It works with pytorch 1.8.1 but might break in the future.
sm = bm.type(torch.int).argmax(1)
sm = torch.any(bm,1) * sm
sm = (torch.gather(xtsy, 1, tile).unsqueeze(2) -
torch.gather(xtsb.unsqueeze(2).expand(-1,-1,self.args.av_tens.shape[-1]), 1, sm))
sm = torch.bucketize(sm, self.boundaries)+1
sm = torch.any(bm,1) * sm
sm *= av_batch!=0
sm *= inputs.unsqueeze(2)!=0
return sm | def get_av_rep(self,data):
bs = data.shape[0]
inputs = data[:,:,3] # inputs
xtsb = data[:,:,2] # inputs ts
xtsy = data[:,:,6] # targets ts | random_line_split |
bindings.rs | //! Setting up and responding to user defined key/mouse bindings
use crate::{
core::{State, Xid},
pure::geometry::Point,
x::XConn,
Error, Result,
};
#[cfg(feature = "keysyms")]
use penrose_keysyms::XKeySym;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, convert::TryFrom, fmt, process::Command};
use strum::{EnumIter, IntoEnumIterator};
use tracing::trace;
/// Run the xmodmap command to dump the system keymap table.
///
/// This is done in a form that we can load in and convert back to key
/// codes. This lets the user define key bindings in the way that they
/// would expect while also ensuring that it is east to debug any odd
/// issues with bindings by referring the user to the xmodmap output.
///
/// # Panics
/// This function will panic if it is unable to fetch keycodes using the xmodmap
/// binary on your system or if the output of `xmodmap -pke` is not valid
pub fn keycodes_from_xmodmap() -> Result<HashMap<String, u8>> {
let output = Command::new("xmodmap").arg("-pke").output()?;
let m = String::from_utf8(output.stdout)?
.lines()
.flat_map(|l| {
let mut words = l.split_whitespace(); // keycode <code> = <names ...>
let key_code: u8 = match words.nth(1) {
Some(word) => match word.parse() {
Ok(val) => val,
Err(e) => panic!("{}", e),
},
None => panic!("unexpected output format from xmodmap -pke"),
};
words.skip(1).map(move |name| (name.into(), key_code))
})
.collect();
Ok(m)
}
fn parse_binding(pattern: &str, known_codes: &HashMap<String, u8>) -> Result<KeyCode> {
let mut parts: Vec<&str> = pattern.split('-').collect();
let name = parts.remove(parts.len() - 1);
match known_codes.get(name) {
Some(code) => {
let mask = parts
.iter()
.map(|&s| ModifierKey::try_from(s))
.try_fold(0, |acc, v| v.map(|inner| acc | u16::from(inner)))?;
trace!(?pattern, mask, code, "parsed keybinding");
Ok(KeyCode { mask, code: *code })
}
None => Err(Error::UnknownKeyName {
name: name.to_owned(),
}),
}
}
/// Parse string format key bindings into [KeyCode] based [KeyBindings] using
/// the command line `xmodmap` utility.
///
/// See [keycodes_from_xmodmap] for details of how `xmodmap` is used.
pub fn parse_keybindings_with_xmodmap<S, X>(
str_bindings: HashMap<S, Box<dyn KeyEventHandler<X>>>,
) -> Result<KeyBindings<X>>
where
S: AsRef<str>,
X: XConn,
{
let m = keycodes_from_xmodmap()?;
str_bindings
.into_iter()
.map(|(s, v)| parse_binding(s.as_ref(), &m).map(|k| (k, v)))
.collect()
}
/// Some action to be run by a user key binding
pub trait KeyEventHandler<X>
where
X: XConn,
{
/// Call this handler with the current window manager state
fn call(&mut self, state: &mut State<X>, x: &X) -> Result<()>;
}
impl<X: XConn> fmt::Debug for Box<dyn KeyEventHandler<X>> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KeyEventHandler").finish()
}
}
impl<F, X> KeyEventHandler<X> for F
where
F: FnMut(&mut State<X>, &X) -> Result<()>,
X: XConn,
{
fn call(&mut self, state: &mut State<X>, x: &X) -> Result<()> {
(self)(state, x)
}
}
/// User defined key bindings
pub type KeyBindings<X> = HashMap<KeyCode, Box<dyn KeyEventHandler<X>>>;
/// An action to be run in response to a mouse event
pub trait MouseEventHandler<X>
where
X: XConn,
{
/// Call this handler with the current window manager state and mouse state
fn call(&mut self, evt: &MouseEvent, state: &mut State<X>, x: &X) -> Result<()>;
}
impl<X: XConn> fmt::Debug for Box<dyn MouseEventHandler<X>> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KeyEventHandler").finish()
}
}
impl<F, X> MouseEventHandler<X> for F
where
F: FnMut(&MouseEvent, &mut State<X>, &X) -> Result<()>,
X: XConn,
{
fn call(&mut self, evt: &MouseEvent, state: &mut State<X>, x: &X) -> Result<()> {
(self)(evt, state, x)
}
}
/// User defined mouse bindings
pub type MouseBindings<X> = HashMap<(MouseEventKind, MouseState), Box<dyn MouseEventHandler<X>>>;
/// Abstraction layer for working with key presses
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum KeyPress {
/// A raw character key
Utf8(String),
/// Return / enter key
Return,
/// Escape
Escape,
/// Tab
Tab,
/// Backspace
Backspace,
/// Delete
Delete,
/// PageUp
PageUp,
/// PageDown
PageDown,
/// Up
Up,
/// Down
Down,
/// Left
Left,
/// Right
Right,
}
#[cfg(feature = "keysyms")]
impl TryFrom<XKeySym> for KeyPress {
type Error = std::string::FromUtf8Error;
fn try_from(s: XKeySym) -> std::result::Result<KeyPress, Self::Error> {
Ok(match s {
XKeySym::XK_Return | XKeySym::XK_KP_Enter | XKeySym::XK_ISO_Enter => KeyPress::Return,
XKeySym::XK_Escape => KeyPress::Escape,
XKeySym::XK_Tab | XKeySym::XK_ISO_Left_Tab | XKeySym::XK_KP_Tab => KeyPress::Tab,
XKeySym::XK_BackSpace => KeyPress::Backspace,
XKeySym::XK_Delete | XKeySym::XK_KP_Delete => KeyPress::Delete,
XKeySym::XK_Page_Up | XKeySym::XK_KP_Page_Up => KeyPress::PageUp,
XKeySym::XK_Page_Down | XKeySym::XK_KP_Page_Down => KeyPress::PageDown,
XKeySym::XK_Up | XKeySym::XK_KP_Up => KeyPress::Up,
XKeySym::XK_Down | XKeySym::XK_KP_Down => KeyPress::Down,
XKeySym::XK_Left | XKeySym::XK_KP_Left => KeyPress::Left,
XKeySym::XK_Right | XKeySym::XK_KP_Right => KeyPress::Right,
s => KeyPress::Utf8(s.as_utf8_string()?),
})
}
}
/// A u16 X key-code bitmask
pub type KeyCodeMask = u16;
/// A u8 X key-code enum value
pub type KeyCodeValue = u8;
/// A key press and held modifiers
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct KeyCode {
/// The held modifier mask
pub mask: KeyCodeMask,
/// The key code that was held
pub code: KeyCodeValue,
}
impl KeyCode {
/// Create a new [KeyCode] from this one that removes the given mask
pub fn ignoring_modifier(&self, mask: KeyCodeMask) -> KeyCode {
KeyCode {
mask: self.mask & !mask,
code: self.code,
}
}
}
/// Known mouse buttons for binding actions
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum MouseButton {
/// 1
Left,
/// 2
Middle,
/// 3
Right,
/// 4
ScrollUp,
/// 5
ScrollDown,
}
impl From<MouseButton> for u8 {
fn from(b: MouseButton) -> u8 {
match b {
MouseButton::Left => 1,
MouseButton::Middle => 2,
MouseButton::Right => 3,
MouseButton::ScrollUp => 4,
MouseButton::ScrollDown => 5,
}
}
}
impl TryFrom<u8> for MouseButton {
type Error = Error;
fn try_from(n: u8) -> Result<Self> {
match n {
1 => Ok(Self::Left),
2 => Ok(Self::Middle),
3 => Ok(Self::Right),
4 => Ok(Self::ScrollUp),
5 => Ok(Self::ScrollDown),
_ => Err(Error::UnknownMouseButton { button: n }),
}
}
}
/// Known modifier keys for bindings
#[derive(Debug, EnumIter, PartialEq, Eq, Hash, Clone, Copy, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ModifierKey {
/// Control
Ctrl,
/// Alt
Alt,
/// Shift
Shift,
/// Meta / super / windows
Meta,
}
impl ModifierKey {
fn was_held(&self, mask: u16) -> bool {
mask & u16::from(*self) > 0
}
}
impl From<ModifierKey> for u16 {
fn from(m: ModifierKey) -> u16 {
(match m {
ModifierKey::Shift => 1 << 0,
ModifierKey::Ctrl => 1 << 2,
ModifierKey::Alt => 1 << 3,
ModifierKey::Meta => 1 << 6,
}) as u16
}
}
impl TryFrom<&str> for ModifierKey {
type Error = Error;
fn try_from(s: &str) -> std::result::Result<Self, Self::Error> {
match s {
"C" => Ok(Self::Ctrl),
"A" => Ok(Self::Alt),
"S" => Ok(Self::Shift),
"M" => Ok(Self::Meta),
_ => Err(Error::UnknownModifier { name: s.to_owned() }),
}
}
}
/// A mouse state specification indicating the button and modifiers held
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MouseState {
/// The [MouseButton] being held
pub button: MouseButton,
/// All [ModifierKey]s being held
pub modifiers: Vec<ModifierKey>,
}
impl MouseState {
/// Construct a new MouseState
pub fn new(button: MouseButton, mut modifiers: Vec<ModifierKey>) -> Self {
modifiers.sort();
Self { button, modifiers }
}
/// Parse raw mouse state values into a [MouseState]
pub fn from_detail_and_state(detail: u8, state: u16) -> Result<Self> {
Ok(Self {
button: MouseButton::try_from(detail)?,
modifiers: ModifierKey::iter().filter(|m| m.was_held(state)).collect(),
})
}
/// The xcb bitmask for this [MouseState]
pub fn mask(&self) -> u16 {
self.modifiers
.iter()
.fold(0, |acc, &val| acc | u16::from(val))
}
/// The xcb button ID for this [MouseState]
pub fn button(&self) -> u8 |
}
/// The types of mouse events represented by a MouseEvent
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum MouseEventKind {
/// A button was pressed
Press,
/// A button was released
Release,
/// The mouse was moved while a button was held
Motion,
}
/// A mouse movement or button event
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MouseEvent {
/// The ID of the window that was contained the click
pub id: Xid,
/// Absolute coordinate of the event
pub rpt: Point,
/// Coordinate of the event relative to top-left of the window itself
pub wpt: Point,
/// The modifier and button code that was received
pub state: MouseState,
/// Was this press, release or motion?
pub kind: MouseEventKind,
}
impl MouseEvent {
/// Construct a new [MouseEvent] from raw data
pub fn new(
id: Xid,
rx: i16,
ry: i16,
ex: i16,
ey: i16,
state: MouseState,
kind: MouseEventKind,
) -> Self {
MouseEvent {
id,
rpt: Point::new(rx as u32, ry as u32),
wpt: Point::new(ex as u32, ey as u32),
state,
kind,
}
}
}
| {
self.button.into()
} | identifier_body |
bindings.rs | //! Setting up and responding to user defined key/mouse bindings
use crate::{
core::{State, Xid},
pure::geometry::Point,
x::XConn,
Error, Result,
};
#[cfg(feature = "keysyms")]
use penrose_keysyms::XKeySym;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, convert::TryFrom, fmt, process::Command};
use strum::{EnumIter, IntoEnumIterator};
use tracing::trace;
/// Run the xmodmap command to dump the system keymap table.
///
/// This is done in a form that we can load in and convert back to key
/// codes. This lets the user define key bindings in the way that they
/// would expect while also ensuring that it is east to debug any odd
/// issues with bindings by referring the user to the xmodmap output.
///
/// # Panics
/// This function will panic if it is unable to fetch keycodes using the xmodmap
/// binary on your system or if the output of `xmodmap -pke` is not valid
pub fn keycodes_from_xmodmap() -> Result<HashMap<String, u8>> {
let output = Command::new("xmodmap").arg("-pke").output()?;
let m = String::from_utf8(output.stdout)?
.lines()
.flat_map(|l| {
let mut words = l.split_whitespace(); // keycode <code> = <names ...>
let key_code: u8 = match words.nth(1) {
Some(word) => match word.parse() {
Ok(val) => val,
Err(e) => panic!("{}", e),
},
None => panic!("unexpected output format from xmodmap -pke"),
};
words.skip(1).map(move |name| (name.into(), key_code))
})
.collect();
Ok(m)
}
fn parse_binding(pattern: &str, known_codes: &HashMap<String, u8>) -> Result<KeyCode> {
let mut parts: Vec<&str> = pattern.split('-').collect();
let name = parts.remove(parts.len() - 1);
match known_codes.get(name) {
Some(code) => {
let mask = parts
.iter()
.map(|&s| ModifierKey::try_from(s))
.try_fold(0, |acc, v| v.map(|inner| acc | u16::from(inner)))?;
trace!(?pattern, mask, code, "parsed keybinding");
Ok(KeyCode { mask, code: *code })
}
None => Err(Error::UnknownKeyName {
name: name.to_owned(),
}),
}
}
/// Parse string format key bindings into [KeyCode] based [KeyBindings] using
/// the command line `xmodmap` utility.
///
/// See [keycodes_from_xmodmap] for details of how `xmodmap` is used.
pub fn parse_keybindings_with_xmodmap<S, X>(
str_bindings: HashMap<S, Box<dyn KeyEventHandler<X>>>,
) -> Result<KeyBindings<X>>
where
S: AsRef<str>,
X: XConn,
{
let m = keycodes_from_xmodmap()?;
str_bindings
.into_iter()
.map(|(s, v)| parse_binding(s.as_ref(), &m).map(|k| (k, v)))
.collect()
}
/// Some action to be run by a user key binding
pub trait KeyEventHandler<X>
where
X: XConn,
{
/// Call this handler with the current window manager state
fn call(&mut self, state: &mut State<X>, x: &X) -> Result<()>;
}
impl<X: XConn> fmt::Debug for Box<dyn KeyEventHandler<X>> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KeyEventHandler").finish()
}
}
impl<F, X> KeyEventHandler<X> for F
where
F: FnMut(&mut State<X>, &X) -> Result<()>,
X: XConn,
{
fn call(&mut self, state: &mut State<X>, x: &X) -> Result<()> {
(self)(state, x)
}
}
/// User defined key bindings
pub type KeyBindings<X> = HashMap<KeyCode, Box<dyn KeyEventHandler<X>>>;
/// An action to be run in response to a mouse event
pub trait MouseEventHandler<X>
where
X: XConn,
{
/// Call this handler with the current window manager state and mouse state
fn call(&mut self, evt: &MouseEvent, state: &mut State<X>, x: &X) -> Result<()>;
}
impl<X: XConn> fmt::Debug for Box<dyn MouseEventHandler<X>> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KeyEventHandler").finish()
}
}
impl<F, X> MouseEventHandler<X> for F
where
F: FnMut(&MouseEvent, &mut State<X>, &X) -> Result<()>,
X: XConn,
{
fn call(&mut self, evt: &MouseEvent, state: &mut State<X>, x: &X) -> Result<()> {
(self)(evt, state, x)
}
}
/// User defined mouse bindings
pub type MouseBindings<X> = HashMap<(MouseEventKind, MouseState), Box<dyn MouseEventHandler<X>>>;
/// Abstraction layer for working with key presses
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum KeyPress {
/// A raw character key
Utf8(String),
/// Return / enter key
Return,
/// Escape
Escape,
/// Tab
Tab,
/// Backspace
Backspace,
/// Delete
Delete,
/// PageUp
PageUp,
/// PageDown
PageDown,
/// Up
Up,
/// Down
Down,
/// Left
Left,
/// Right
Right,
}
#[cfg(feature = "keysyms")]
impl TryFrom<XKeySym> for KeyPress {
type Error = std::string::FromUtf8Error;
fn try_from(s: XKeySym) -> std::result::Result<KeyPress, Self::Error> {
Ok(match s {
XKeySym::XK_Return | XKeySym::XK_KP_Enter | XKeySym::XK_ISO_Enter => KeyPress::Return,
XKeySym::XK_Escape => KeyPress::Escape,
XKeySym::XK_Tab | XKeySym::XK_ISO_Left_Tab | XKeySym::XK_KP_Tab => KeyPress::Tab,
XKeySym::XK_BackSpace => KeyPress::Backspace,
XKeySym::XK_Delete | XKeySym::XK_KP_Delete => KeyPress::Delete,
XKeySym::XK_Page_Up | XKeySym::XK_KP_Page_Up => KeyPress::PageUp,
XKeySym::XK_Page_Down | XKeySym::XK_KP_Page_Down => KeyPress::PageDown,
XKeySym::XK_Up | XKeySym::XK_KP_Up => KeyPress::Up,
XKeySym::XK_Down | XKeySym::XK_KP_Down => KeyPress::Down,
XKeySym::XK_Left | XKeySym::XK_KP_Left => KeyPress::Left,
XKeySym::XK_Right | XKeySym::XK_KP_Right => KeyPress::Right,
s => KeyPress::Utf8(s.as_utf8_string()?),
})
}
}
/// A u16 X key-code bitmask
pub type KeyCodeMask = u16;
/// A u8 X key-code enum value
pub type KeyCodeValue = u8;
/// A key press and held modifiers
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct KeyCode {
/// The held modifier mask
pub mask: KeyCodeMask,
/// The key code that was held
pub code: KeyCodeValue,
}
impl KeyCode {
/// Create a new [KeyCode] from this one that removes the given mask
pub fn ignoring_modifier(&self, mask: KeyCodeMask) -> KeyCode {
KeyCode {
mask: self.mask & !mask,
code: self.code,
}
}
}
/// Known mouse buttons for binding actions
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum MouseButton {
/// 1
Left,
/// 2
Middle,
/// 3
Right,
/// 4
ScrollUp,
/// 5
ScrollDown,
}
impl From<MouseButton> for u8 {
fn from(b: MouseButton) -> u8 {
match b {
MouseButton::Left => 1,
MouseButton::Middle => 2,
MouseButton::Right => 3,
MouseButton::ScrollUp => 4,
MouseButton::ScrollDown => 5,
}
}
}
impl TryFrom<u8> for MouseButton {
type Error = Error;
fn try_from(n: u8) -> Result<Self> {
match n {
1 => Ok(Self::Left),
2 => Ok(Self::Middle),
3 => Ok(Self::Right),
4 => Ok(Self::ScrollUp),
5 => Ok(Self::ScrollDown),
_ => Err(Error::UnknownMouseButton { button: n }),
}
}
}
/// Known modifier keys for bindings
#[derive(Debug, EnumIter, PartialEq, Eq, Hash, Clone, Copy, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ModifierKey {
/// Control
Ctrl,
/// Alt
Alt,
/// Shift
Shift,
/// Meta / super / windows
Meta,
}
impl ModifierKey {
fn was_held(&self, mask: u16) -> bool {
mask & u16::from(*self) > 0
}
}
impl From<ModifierKey> for u16 {
fn from(m: ModifierKey) -> u16 {
(match m {
ModifierKey::Shift => 1 << 0,
ModifierKey::Ctrl => 1 << 2,
ModifierKey::Alt => 1 << 3,
ModifierKey::Meta => 1 << 6,
}) as u16
}
}
impl TryFrom<&str> for ModifierKey {
type Error = Error;
fn try_from(s: &str) -> std::result::Result<Self, Self::Error> {
match s {
"C" => Ok(Self::Ctrl),
"A" => Ok(Self::Alt),
"S" => Ok(Self::Shift),
"M" => Ok(Self::Meta),
_ => Err(Error::UnknownModifier { name: s.to_owned() }),
}
}
}
/// A mouse state specification indicating the button and modifiers held
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MouseState {
/// The [MouseButton] being held
pub button: MouseButton,
/// All [ModifierKey]s being held
pub modifiers: Vec<ModifierKey>,
}
impl MouseState {
/// Construct a new MouseState
pub fn new(button: MouseButton, mut modifiers: Vec<ModifierKey>) -> Self {
modifiers.sort();
Self { button, modifiers }
}
/// Parse raw mouse state values into a [MouseState]
pub fn from_detail_and_state(detail: u8, state: u16) -> Result<Self> {
Ok(Self {
button: MouseButton::try_from(detail)?,
modifiers: ModifierKey::iter().filter(|m| m.was_held(state)).collect(),
})
}
/// The xcb bitmask for this [MouseState]
pub fn mask(&self) -> u16 {
self.modifiers
.iter()
.fold(0, |acc, &val| acc | u16::from(val))
}
/// The xcb button ID for this [MouseState]
pub fn button(&self) -> u8 { | self.button.into()
}
}
/// The types of mouse events represented by a MouseEvent
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum MouseEventKind {
/// A button was pressed
Press,
/// A button was released
Release,
/// The mouse was moved while a button was held
Motion,
}
/// A mouse movement or button event
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MouseEvent {
/// The ID of the window that was contained the click
pub id: Xid,
/// Absolute coordinate of the event
pub rpt: Point,
/// Coordinate of the event relative to top-left of the window itself
pub wpt: Point,
/// The modifier and button code that was received
pub state: MouseState,
/// Was this press, release or motion?
pub kind: MouseEventKind,
}
impl MouseEvent {
/// Construct a new [MouseEvent] from raw data
pub fn new(
id: Xid,
rx: i16,
ry: i16,
ex: i16,
ey: i16,
state: MouseState,
kind: MouseEventKind,
) -> Self {
MouseEvent {
id,
rpt: Point::new(rx as u32, ry as u32),
wpt: Point::new(ex as u32, ey as u32),
state,
kind,
}
}
} | random_line_split | |
bindings.rs | //! Setting up and responding to user defined key/mouse bindings
use crate::{
core::{State, Xid},
pure::geometry::Point,
x::XConn,
Error, Result,
};
#[cfg(feature = "keysyms")]
use penrose_keysyms::XKeySym;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::{collections::HashMap, convert::TryFrom, fmt, process::Command};
use strum::{EnumIter, IntoEnumIterator};
use tracing::trace;
/// Run the xmodmap command to dump the system keymap table.
///
/// This is done in a form that we can load in and convert back to key
/// codes. This lets the user define key bindings in the way that they
/// would expect while also ensuring that it is east to debug any odd
/// issues with bindings by referring the user to the xmodmap output.
///
/// # Panics
/// This function will panic if it is unable to fetch keycodes using the xmodmap
/// binary on your system or if the output of `xmodmap -pke` is not valid
pub fn keycodes_from_xmodmap() -> Result<HashMap<String, u8>> {
let output = Command::new("xmodmap").arg("-pke").output()?;
let m = String::from_utf8(output.stdout)?
.lines()
.flat_map(|l| {
let mut words = l.split_whitespace(); // keycode <code> = <names ...>
let key_code: u8 = match words.nth(1) {
Some(word) => match word.parse() {
Ok(val) => val,
Err(e) => panic!("{}", e),
},
None => panic!("unexpected output format from xmodmap -pke"),
};
words.skip(1).map(move |name| (name.into(), key_code))
})
.collect();
Ok(m)
}
fn parse_binding(pattern: &str, known_codes: &HashMap<String, u8>) -> Result<KeyCode> {
let mut parts: Vec<&str> = pattern.split('-').collect();
let name = parts.remove(parts.len() - 1);
match known_codes.get(name) {
Some(code) => {
let mask = parts
.iter()
.map(|&s| ModifierKey::try_from(s))
.try_fold(0, |acc, v| v.map(|inner| acc | u16::from(inner)))?;
trace!(?pattern, mask, code, "parsed keybinding");
Ok(KeyCode { mask, code: *code })
}
None => Err(Error::UnknownKeyName {
name: name.to_owned(),
}),
}
}
/// Parse string format key bindings into [KeyCode] based [KeyBindings] using
/// the command line `xmodmap` utility.
///
/// See [keycodes_from_xmodmap] for details of how `xmodmap` is used.
pub fn parse_keybindings_with_xmodmap<S, X>(
str_bindings: HashMap<S, Box<dyn KeyEventHandler<X>>>,
) -> Result<KeyBindings<X>>
where
S: AsRef<str>,
X: XConn,
{
let m = keycodes_from_xmodmap()?;
str_bindings
.into_iter()
.map(|(s, v)| parse_binding(s.as_ref(), &m).map(|k| (k, v)))
.collect()
}
/// Some action to be run by a user key binding
pub trait KeyEventHandler<X>
where
X: XConn,
{
/// Call this handler with the current window manager state
fn call(&mut self, state: &mut State<X>, x: &X) -> Result<()>;
}
impl<X: XConn> fmt::Debug for Box<dyn KeyEventHandler<X>> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KeyEventHandler").finish()
}
}
impl<F, X> KeyEventHandler<X> for F
where
F: FnMut(&mut State<X>, &X) -> Result<()>,
X: XConn,
{
fn call(&mut self, state: &mut State<X>, x: &X) -> Result<()> {
(self)(state, x)
}
}
/// User defined key bindings
pub type KeyBindings<X> = HashMap<KeyCode, Box<dyn KeyEventHandler<X>>>;
/// An action to be run in response to a mouse event
pub trait MouseEventHandler<X>
where
X: XConn,
{
/// Call this handler with the current window manager state and mouse state
fn call(&mut self, evt: &MouseEvent, state: &mut State<X>, x: &X) -> Result<()>;
}
impl<X: XConn> fmt::Debug for Box<dyn MouseEventHandler<X>> {
fn | (&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("KeyEventHandler").finish()
}
}
impl<F, X> MouseEventHandler<X> for F
where
F: FnMut(&MouseEvent, &mut State<X>, &X) -> Result<()>,
X: XConn,
{
fn call(&mut self, evt: &MouseEvent, state: &mut State<X>, x: &X) -> Result<()> {
(self)(evt, state, x)
}
}
/// User defined mouse bindings
pub type MouseBindings<X> = HashMap<(MouseEventKind, MouseState), Box<dyn MouseEventHandler<X>>>;
/// Abstraction layer for working with key presses
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum KeyPress {
/// A raw character key
Utf8(String),
/// Return / enter key
Return,
/// Escape
Escape,
/// Tab
Tab,
/// Backspace
Backspace,
/// Delete
Delete,
/// PageUp
PageUp,
/// PageDown
PageDown,
/// Up
Up,
/// Down
Down,
/// Left
Left,
/// Right
Right,
}
#[cfg(feature = "keysyms")]
impl TryFrom<XKeySym> for KeyPress {
type Error = std::string::FromUtf8Error;
fn try_from(s: XKeySym) -> std::result::Result<KeyPress, Self::Error> {
Ok(match s {
XKeySym::XK_Return | XKeySym::XK_KP_Enter | XKeySym::XK_ISO_Enter => KeyPress::Return,
XKeySym::XK_Escape => KeyPress::Escape,
XKeySym::XK_Tab | XKeySym::XK_ISO_Left_Tab | XKeySym::XK_KP_Tab => KeyPress::Tab,
XKeySym::XK_BackSpace => KeyPress::Backspace,
XKeySym::XK_Delete | XKeySym::XK_KP_Delete => KeyPress::Delete,
XKeySym::XK_Page_Up | XKeySym::XK_KP_Page_Up => KeyPress::PageUp,
XKeySym::XK_Page_Down | XKeySym::XK_KP_Page_Down => KeyPress::PageDown,
XKeySym::XK_Up | XKeySym::XK_KP_Up => KeyPress::Up,
XKeySym::XK_Down | XKeySym::XK_KP_Down => KeyPress::Down,
XKeySym::XK_Left | XKeySym::XK_KP_Left => KeyPress::Left,
XKeySym::XK_Right | XKeySym::XK_KP_Right => KeyPress::Right,
s => KeyPress::Utf8(s.as_utf8_string()?),
})
}
}
/// A u16 X key-code bitmask
pub type KeyCodeMask = u16;
/// A u8 X key-code enum value
pub type KeyCodeValue = u8;
/// A key press and held modifiers
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct KeyCode {
/// The held modifier mask
pub mask: KeyCodeMask,
/// The key code that was held
pub code: KeyCodeValue,
}
impl KeyCode {
/// Create a new [KeyCode] from this one that removes the given mask
pub fn ignoring_modifier(&self, mask: KeyCodeMask) -> KeyCode {
KeyCode {
mask: self.mask & !mask,
code: self.code,
}
}
}
/// Known mouse buttons for binding actions
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum MouseButton {
/// 1
Left,
/// 2
Middle,
/// 3
Right,
/// 4
ScrollUp,
/// 5
ScrollDown,
}
impl From<MouseButton> for u8 {
fn from(b: MouseButton) -> u8 {
match b {
MouseButton::Left => 1,
MouseButton::Middle => 2,
MouseButton::Right => 3,
MouseButton::ScrollUp => 4,
MouseButton::ScrollDown => 5,
}
}
}
impl TryFrom<u8> for MouseButton {
type Error = Error;
fn try_from(n: u8) -> Result<Self> {
match n {
1 => Ok(Self::Left),
2 => Ok(Self::Middle),
3 => Ok(Self::Right),
4 => Ok(Self::ScrollUp),
5 => Ok(Self::ScrollDown),
_ => Err(Error::UnknownMouseButton { button: n }),
}
}
}
/// Known modifier keys for bindings
#[derive(Debug, EnumIter, PartialEq, Eq, Hash, Clone, Copy, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ModifierKey {
/// Control
Ctrl,
/// Alt
Alt,
/// Shift
Shift,
/// Meta / super / windows
Meta,
}
impl ModifierKey {
fn was_held(&self, mask: u16) -> bool {
mask & u16::from(*self) > 0
}
}
impl From<ModifierKey> for u16 {
fn from(m: ModifierKey) -> u16 {
(match m {
ModifierKey::Shift => 1 << 0,
ModifierKey::Ctrl => 1 << 2,
ModifierKey::Alt => 1 << 3,
ModifierKey::Meta => 1 << 6,
}) as u16
}
}
impl TryFrom<&str> for ModifierKey {
type Error = Error;
fn try_from(s: &str) -> std::result::Result<Self, Self::Error> {
match s {
"C" => Ok(Self::Ctrl),
"A" => Ok(Self::Alt),
"S" => Ok(Self::Shift),
"M" => Ok(Self::Meta),
_ => Err(Error::UnknownModifier { name: s.to_owned() }),
}
}
}
/// A mouse state specification indicating the button and modifiers held
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MouseState {
/// The [MouseButton] being held
pub button: MouseButton,
/// All [ModifierKey]s being held
pub modifiers: Vec<ModifierKey>,
}
impl MouseState {
/// Construct a new MouseState
pub fn new(button: MouseButton, mut modifiers: Vec<ModifierKey>) -> Self {
modifiers.sort();
Self { button, modifiers }
}
/// Parse raw mouse state values into a [MouseState]
pub fn from_detail_and_state(detail: u8, state: u16) -> Result<Self> {
Ok(Self {
button: MouseButton::try_from(detail)?,
modifiers: ModifierKey::iter().filter(|m| m.was_held(state)).collect(),
})
}
/// The xcb bitmask for this [MouseState]
pub fn mask(&self) -> u16 {
self.modifiers
.iter()
.fold(0, |acc, &val| acc | u16::from(val))
}
/// The xcb button ID for this [MouseState]
pub fn button(&self) -> u8 {
self.button.into()
}
}
/// The types of mouse events represented by a MouseEvent
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum MouseEventKind {
/// A button was pressed
Press,
/// A button was released
Release,
/// The mouse was moved while a button was held
Motion,
}
/// A mouse movement or button event
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MouseEvent {
/// The ID of the window that was contained the click
pub id: Xid,
/// Absolute coordinate of the event
pub rpt: Point,
/// Coordinate of the event relative to top-left of the window itself
pub wpt: Point,
/// The modifier and button code that was received
pub state: MouseState,
/// Was this press, release or motion?
pub kind: MouseEventKind,
}
impl MouseEvent {
/// Construct a new [MouseEvent] from raw data
pub fn new(
id: Xid,
rx: i16,
ry: i16,
ex: i16,
ey: i16,
state: MouseState,
kind: MouseEventKind,
) -> Self {
MouseEvent {
id,
rpt: Point::new(rx as u32, ry as u32),
wpt: Point::new(ex as u32, ey as u32),
state,
kind,
}
}
}
| fmt | identifier_name |
pulseoxLoader.py | # Utilities
import os
import pdb
import glob
# Numerical
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Preprocessing / Filtering
from scipy.ndimage import gaussian_filter1d
from sklearn.preprocessing import normalize
class OxData:
def __init__(self, csv_path, label_path):
self.path = csv_path
self.data = None
self.label_path = label_path
def data_file_list(self):
files = []
for fpath in sorted(glob.glob(self.path)):
if fpath == self.label_path:
continue
files.append(os.path.split(fpath)[-1])
return files
### Creating data matrix ###
def get_data_matrix(self, **kwargs):
'''
@args:
path: kwargs:
'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'hold-out':[]
@return:
X - np array of all processed and normalized data
'''
args = {'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'hold_out': [], 'sliding_fft':False}
for key, value in kwargs.items():
if key in args:
args[key] = value
if self.data is not None and not args['reparse']:
return self.data
hold_outs = set(args['hold_out'])
data_candidates = []
for fpath in sorted(glob.glob(self.path)):
fname = os.path.split(fpath)[-1]
if fname in hold_outs:
continue # don't add this to the training matrix
if fpath == self.label_path:
continue
temp_data = self.csv_to_data(fpath, **kwargs)
data_candidates.append(temp_data)
self.data = np.concatenate(data_candidates)
self.data = normalize(self.data)
return self.data
def csv_to_data(self, fpath, **kwargs):
'''
convert the passed file path fpath to a data matrix X
'''
args = {'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'normalize': False, 'sliding_fft':False}
for key, value in kwargs.items():
if key in args:
args[key] = value
sp02, pulse, start_time = self.day_to_df(fpath)
# Preprocess for FFT
sp02_wave = self.preproc_df_fft(
sp02, smoothing=args['smoothing'], sigma=args['sigma'], plot=args['plot'])
pulse_wave = self.preproc_df_fft(
pulse, smoothing=args['smoothing'], sigma=args['sigma'], plot=args['plot'])
# Perform FFT, either sliding or jumping window
if args['sliding_fft']:
sp02_fft = self.gen_fft_sliding(sp02_wave, args['fft_size'])
pulse_fft = self.gen_fft_sliding(pulse_wave, args['fft_size'])
else:
sp02_fft = self.gen_fft(sp02_wave, args['fft_size'])
pulse_fft = self.gen_fft(pulse_wave, args['fft_size'])
# Can separate magnitudes and angles as follows:
# sp02_mag, sp02_angle = sp02_fft[:,:sp02_fft.sp02_fft[1]//2], sp02_fft[:,sp02_fft.shape[1]//2:]
# pulse_mag, pulse_angle = pulse_fft[:,:pulse_fft.pulse_fft[1]//2], pulse_fft[:,pulse_fft.shape[1]//2:]
# Normalize results and combine to final data matrix
temp_data = np.concatenate((sp02_fft, pulse_fft), axis=1)
if args['normalize']:
temp_data = normalize(temp_data)
return temp_data
### Label Generation ###
def labels_to_df(self, path, date):
'''
@args:
path: os.path object representing path to OSCAR csv file to parse
date: date string in MM/DD/YYYY format to select the date to draw data from
@return:
label_df: pandas dataframe containing the labels for a given days' meal
'''
df = pd.read_csv(path)
df = df[df['Date'] == date]
return df
def gen_ground_truth(self, label_df, pre=0, eat=1, post=2):
'''
@args:
label_df: pandas dataframe containing the labels for a given days' meal
optional pre, eat, post specify value for each state, default to 0, 1, 2 respectively
@return
labels: np array of size max(label_df['End_min']) containing values at
each minute index indicating eating state, 0: pre-eating, 1: eating, 2: post-eating
'''
ate = False
labels = np.zeros(np.max(label_df['End_min']), dtype=int)
for index, row in label_df.iterrows():
state = row['Label']
if state == 'eating':
ate = True
labels[row['Start_min']:row['End_min']] = eat
elif state == 'break' and not ate:
labels[row['Start_min']:row['End_min']] = pre
else:
labels[row['Start_min']:row['End_min']] = post
return labels
### Data Loading ###
def day_to_df(self, path):
'''
@args:
path: os.path object representing path to OSCAR csv file to parse
@return:
return: tuple(sp02, pulse, start_time),
sp02 = pd.DataFrame of SP02 data
pulse = pd.DataFrame of Pulse data
start_time = UTC start time in s
'''
df = pd.read_csv(path)
time = df['Session'][0]
df = df.drop(columns='Session')
df['DateTime'] -= time # All times start from 0 for session
sp02 = df[df['Event'] == 'SPO2']
sp02 = sp02.drop(columns='Event')
pulse = df[df['Event'] == 'Pulse']
pulse = pulse.drop(columns='Event')
pulse = pulse.reset_index(drop=True)
sp02 = sp02.reset_index(drop=True)
del df
return sp02, pulse, time
# Waveform Manipulations
def impute_wave(self, wave):
'''
@info:
impute missing values in waveform
@args:
wave: np array with floats corresponding to waveform
@return:
None, modifies waveform array in place
'''
n = wave.shape[0]
j = 0
step = None
for i in range(n-1):
if i >= j and wave[i+1] == 0:
j = i+1
while j < n and wave[j] == 0:
j += 1
if j < n:
step = float(wave[j] - wave[i]) / float(j-i+1)
elif wave[i] == 0:
wave[i] = wave[i-1]+step
### Waveform Manipulations ###
def moving_avg(self, wave, N):
'''
@info:
run moving average filter kernal of size N over the wave
@params:
wave: np array if size > N
n: int, the size of the filter kernal
@return:
nothing, modifies wave in place
'''
n = wave.shape[0]
if N > n or N % 2 != 1:
print("Filter kernal needs to be odd sized.")
return
running_sum = np.sum(wave[:N])
N2 = N//2
for i in range(N2, n - N2 - 1):
wave[i] = float(running_sum) / float(N)
running_sum -= wave[i-N2]
running_sum += wave[i+N2+1]
# cumsum = np.cumsum(np.insert(wave, 0, 0))
# return (cumsum[N:] - cumsum[:-N]) / float(N)
def preproc_df_fft(self, df, **kwargs):
|
### Generation of feature matrix from day-data ###
# TODO: Turn this function into Gen-Features, fator FFT part out, use it to generate feature array X
def gen_fft_sliding(self, wave, kernel_size):
"""
Generate FFT with a sliding window, rather than a jumping window
@params:
* wave: numpy array (n,), containing the waveform to generate the FFT for
* kernel_size: int, between 1 and n, size of the FFT window that will slide over the wave, should be power of 2
@returns:
list (n-kernel_size, 2) containing, each row contains 2 np arrays of size
(kernel_size) containing magnitude and phase angle respectively, i.e. [[magnitude, phaseangle],...]
"""
wave_length = wave.shape[0]
last_section = wave_length - kernel_size
result = np.zeros((last_section, kernel_size))
for i in range(last_section):
section = wave[i:i+kernel_size]
sp = np.fft.fft(section)
t = np.arange(sp.shape[0])
tlen = len(t)//2
# Extract magnitude and angle info
real = sp.real[:tlen]
imag = sp.imag[:tlen]
magnitudes = np.sqrt((real)**2 + (imag)**2)
angles = np.arctan2(imag, real)
# print("mag:{}, ang:{}".format(magnitudes.shape, angles.shape))
row = np.concatenate((magnitudes, angles))
# print("mag:{}, ang:{}, row:{}".format(magnitudes.shape, angles.shape, row.shape))
result[i, :] = row
return result
def gen_fft(self, wave, kernel_size=256, isPlot=False, plots=None):
'''
@params:
wave: numpy array (n,), containing the waveform to generate the FFT for
kernel_size: int, between 1 and n, size of the FFT window that will slide over the wave, should be power of 2
isPlot: bool, if true, produce graphs of output
plots: list, intervals to be printed if isPlot is true
@returns:
list (n//kernel_size, 2) containing, each row contains 2 np arrays of size
(kernel_size) containing magnitude and phase angle respectively, i.e. [[magnitude, phaseangle],...]
'''
if plots:
plots = set(plots)
else:
plots = set()
wave_length = wave.shape[0]
last_section = wave_length // kernel_size
result = np.zeros((last_section, kernel_size))
for interval in range(last_section):
section = wave[interval*kernel_size:(interval+1)*kernel_size]
sp = np.fft.fft(section)
t = np.arange(sp.shape[0])
tlen = len(t)//2
if isPlot and interval in plots:
plt.clf()
plt.title("Raw Post-FFT Data, Interval: {}".format(interval))
plt.plot(t, sp.real, t, sp.imag)
plt.ylim(-100, 100)
plt.xlim(-5, tlen) # Second half of FFT is duplicate info
plt.show()
plt.clf()
# Extract magnitude and angle info
real = sp.real[:tlen]
imag = sp.imag[:tlen]
magnitudes = np.sqrt((real)**2 + (imag)**2)
angles = np.arctan2(imag, real)
# print("mag:{}, ang:{}".format(magnitudes.shape, angles.shape))
row = np.concatenate((magnitudes, angles))
# print("mag:{}, ang:{}, row:{}".format(magnitudes.shape, angles.shape, row.shape))
result[interval, :] = row
return result
def plot_fft(self, magnitudes, angles, offset=1):
'''
Plot the given np arrays of magnitude and phase angle,
with an optional offset (i.e. to ignore DC component offset=1)
'''
ts = np.arange(magnitudes.shape[0])[offset:]
# Ignoring DC component which is very large
plt.plot(ts, magnitudes[offset:])
plt.title("Magnitudes Squared vs Frequency")
plt.ylabel("Magnitude Squared")
sample_freq = 1000
bin_freq = sample_freq//magnitudes.shape[0]
plt.xlabel("Frequency in multiples of {} Hz".format(bin_freq))
# plt.ylim(-100, 100)
plt.show()
plt.clf()
plt.plot(ts, angles[offset:])
plt.title("Phase Angles vs Frequency")
plt.ylabel("Phase Angle (Radians)")
plt.xlabel("Frequency in multiples of {} Hz".format(bin_freq))
plt.show()
plt.clf()
| '''
Pre-process the given dataframe df (obtained using the day_to_df function)
to space all points into time buckets so FFT output is meaningful. Then perform
imputation to create the envalope of the waveform, and run a amoving average filter
kernel of size avg_size on it.
plot=False, sigma=1, smoothing='gaussian'):
**kwargs:
plot=True/False -- plots garph (default False)
sigma=int() -- smoothness of gaussian filter (default 1), moving avg size of moving avg kernel
smoothing='gaussian'/'avg' -- whether to use gaussian or moving avg filter (default gassian)
'''
args = {'sigma': 1, 'smoothing': 'gaussian', 'plot': False}
for key, value in kwargs.items():
if key in args:
args[key] = value
result = np.zeros(np.max(df['DateTime'])+1)
# TODO: fix this, DateTimes come in duplicate pairs
result[df['DateTime']] = df['Data/Duration']
if args['plot']:
plt.clf()
wave = np.array(result, copy=True)
self.impute_wave(wave)
if args['smoothing'] == 'gaussian':
wave = gaussian_filter1d(wave, args['sigma'])
else:
self.moving_avg(wave, args['sigma'])
plt.clf()
plt.plot(np.arange(wave.shape[0]), result)
plt.plot(np.arange(wave.shape[0]), wave, 'r:')
plt.legend(["Raw Time Indexed", "Smoothed & Imputed"])
plt.xlabel("Time (ms)")
plt.ylabel(r"Magnitude (of $SpO_{2}$)")
plt.title(r"Time based indexing, Imputation, and Smoothing of $SpO_{2}$ Data")
plt.xlim(20, 1000)
plt.ylim(85, 105)
return wave
else:
self.impute_wave(result)
if args['smoothing'] == 'gaussian':
result = gaussian_filter1d(result, args['sigma'])
else:
self.moving_avg(result, args['sigma'])
return result | identifier_body |
pulseoxLoader.py | # Utilities
import os
import pdb
import glob
# Numerical
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Preprocessing / Filtering
from scipy.ndimage import gaussian_filter1d
from sklearn.preprocessing import normalize
class OxData:
def __init__(self, csv_path, label_path):
self.path = csv_path
self.data = None
self.label_path = label_path
def data_file_list(self):
files = []
for fpath in sorted(glob.glob(self.path)):
if fpath == self.label_path:
continue
files.append(os.path.split(fpath)[-1])
return files
### Creating data matrix ###
def get_data_matrix(self, **kwargs):
'''
@args:
path: kwargs:
'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'hold-out':[]
@return:
X - np array of all processed and normalized data
'''
args = {'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'hold_out': [], 'sliding_fft':False}
for key, value in kwargs.items():
if key in args:
args[key] = value
if self.data is not None and not args['reparse']:
return self.data
hold_outs = set(args['hold_out'])
data_candidates = []
for fpath in sorted(glob.glob(self.path)):
fname = os.path.split(fpath)[-1]
if fname in hold_outs:
continue # don't add this to the training matrix
if fpath == self.label_path:
continue
temp_data = self.csv_to_data(fpath, **kwargs)
data_candidates.append(temp_data)
self.data = np.concatenate(data_candidates)
self.data = normalize(self.data)
return self.data
def csv_to_data(self, fpath, **kwargs):
'''
convert the passed file path fpath to a data matrix X
'''
args = {'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'normalize': False, 'sliding_fft':False}
for key, value in kwargs.items():
if key in args:
args[key] = value
sp02, pulse, start_time = self.day_to_df(fpath)
# Preprocess for FFT
sp02_wave = self.preproc_df_fft(
sp02, smoothing=args['smoothing'], sigma=args['sigma'], plot=args['plot'])
pulse_wave = self.preproc_df_fft(
pulse, smoothing=args['smoothing'], sigma=args['sigma'], plot=args['plot'])
# Perform FFT, either sliding or jumping window
if args['sliding_fft']:
sp02_fft = self.gen_fft_sliding(sp02_wave, args['fft_size'])
pulse_fft = self.gen_fft_sliding(pulse_wave, args['fft_size'])
else:
sp02_fft = self.gen_fft(sp02_wave, args['fft_size'])
pulse_fft = self.gen_fft(pulse_wave, args['fft_size'])
# Can separate magnitudes and angles as follows:
# sp02_mag, sp02_angle = sp02_fft[:,:sp02_fft.sp02_fft[1]//2], sp02_fft[:,sp02_fft.shape[1]//2:]
# pulse_mag, pulse_angle = pulse_fft[:,:pulse_fft.pulse_fft[1]//2], pulse_fft[:,pulse_fft.shape[1]//2:]
# Normalize results and combine to final data matrix
temp_data = np.concatenate((sp02_fft, pulse_fft), axis=1)
if args['normalize']:
temp_data = normalize(temp_data)
return temp_data
### Label Generation ###
def labels_to_df(self, path, date):
'''
@args:
path: os.path object representing path to OSCAR csv file to parse
date: date string in MM/DD/YYYY format to select the date to draw data from
@return:
label_df: pandas dataframe containing the labels for a given days' meal
'''
df = pd.read_csv(path)
df = df[df['Date'] == date]
return df
def gen_ground_truth(self, label_df, pre=0, eat=1, post=2):
'''
@args:
label_df: pandas dataframe containing the labels for a given days' meal
optional pre, eat, post specify value for each state, default to 0, 1, 2 respectively
@return
labels: np array of size max(label_df['End_min']) containing values at
each minute index indicating eating state, 0: pre-eating, 1: eating, 2: post-eating
'''
ate = False
labels = np.zeros(np.max(label_df['End_min']), dtype=int)
for index, row in label_df.iterrows():
state = row['Label']
if state == 'eating':
ate = True
labels[row['Start_min']:row['End_min']] = eat
elif state == 'break' and not ate:
labels[row['Start_min']:row['End_min']] = pre
else:
labels[row['Start_min']:row['End_min']] = post
return labels
### Data Loading ###
def day_to_df(self, path):
'''
@args:
path: os.path object representing path to OSCAR csv file to parse
@return:
return: tuple(sp02, pulse, start_time),
sp02 = pd.DataFrame of SP02 data
pulse = pd.DataFrame of Pulse data
start_time = UTC start time in s
'''
df = pd.read_csv(path)
time = df['Session'][0]
df = df.drop(columns='Session')
df['DateTime'] -= time # All times start from 0 for session
sp02 = df[df['Event'] == 'SPO2']
sp02 = sp02.drop(columns='Event')
pulse = df[df['Event'] == 'Pulse']
pulse = pulse.drop(columns='Event')
pulse = pulse.reset_index(drop=True)
sp02 = sp02.reset_index(drop=True)
del df
return sp02, pulse, time
# Waveform Manipulations
def impute_wave(self, wave):
'''
@info:
impute missing values in waveform
@args:
wave: np array with floats corresponding to waveform
@return:
None, modifies waveform array in place
'''
n = wave.shape[0]
j = 0
step = None
for i in range(n-1):
if i >= j and wave[i+1] == 0:
j = i+1
while j < n and wave[j] == 0:
j += 1
if j < n:
step = float(wave[j] - wave[i]) / float(j-i+1)
elif wave[i] == 0:
wave[i] = wave[i-1]+step
### Waveform Manipulations ###
def moving_avg(self, wave, N):
'''
@info:
run moving average filter kernal of size N over the wave
@params:
wave: np array if size > N
n: int, the size of the filter kernal
@return:
nothing, modifies wave in place
'''
n = wave.shape[0]
if N > n or N % 2 != 1:
print("Filter kernal needs to be odd sized.")
return
running_sum = np.sum(wave[:N])
N2 = N//2
for i in range(N2, n - N2 - 1):
wave[i] = float(running_sum) / float(N)
running_sum -= wave[i-N2]
running_sum += wave[i+N2+1]
# cumsum = np.cumsum(np.insert(wave, 0, 0))
# return (cumsum[N:] - cumsum[:-N]) / float(N)
def preproc_df_fft(self, df, **kwargs):
'''
Pre-process the given dataframe df (obtained using the day_to_df function)
to space all points into time buckets so FFT output is meaningful. Then perform
imputation to create the envalope of the waveform, and run a amoving average filter
kernel of size avg_size on it.
plot=False, sigma=1, smoothing='gaussian'):
**kwargs:
plot=True/False -- plots garph (default False)
sigma=int() -- smoothness of gaussian filter (default 1), moving avg size of moving avg kernel
smoothing='gaussian'/'avg' -- whether to use gaussian or moving avg filter (default gassian)
'''
args = {'sigma': 1, 'smoothing': 'gaussian', 'plot': False}
for key, value in kwargs.items():
if key in args:
args[key] = value
result = np.zeros(np.max(df['DateTime'])+1)
# TODO: fix this, DateTimes come in duplicate pairs
result[df['DateTime']] = df['Data/Duration']
if args['plot']:
plt.clf()
wave = np.array(result, copy=True)
self.impute_wave(wave)
if args['smoothing'] == 'gaussian':
wave = gaussian_filter1d(wave, args['sigma'])
else:
self.moving_avg(wave, args['sigma'])
plt.clf()
plt.plot(np.arange(wave.shape[0]), result)
plt.plot(np.arange(wave.shape[0]), wave, 'r:')
plt.legend(["Raw Time Indexed", "Smoothed & Imputed"])
plt.xlabel("Time (ms)")
plt.ylabel(r"Magnitude (of $SpO_{2}$)")
plt.title(r"Time based indexing, Imputation, and Smoothing of $SpO_{2}$ Data")
plt.xlim(20, 1000)
plt.ylim(85, 105)
return wave
else:
self.impute_wave(result)
if args['smoothing'] == 'gaussian':
result = gaussian_filter1d(result, args['sigma'])
else:
self.moving_avg(result, args['sigma'])
return result
### Generation of feature matrix from day-data ###
# TODO: Turn this function into Gen-Features, fator FFT part out, use it to generate feature array X
def gen_fft_sliding(self, wave, kernel_size):
"""
Generate FFT with a sliding window, rather than a jumping window
@params:
* wave: numpy array (n,), containing the waveform to generate the FFT for
* kernel_size: int, between 1 and n, size of the FFT window that will slide over the wave, should be power of 2
@returns:
list (n-kernel_size, 2) containing, each row contains 2 np arrays of size
(kernel_size) containing magnitude and phase angle respectively, i.e. [[magnitude, phaseangle],...]
"""
wave_length = wave.shape[0]
last_section = wave_length - kernel_size
result = np.zeros((last_section, kernel_size))
for i in range(last_section):
section = wave[i:i+kernel_size]
sp = np.fft.fft(section)
t = np.arange(sp.shape[0])
tlen = len(t)//2
# Extract magnitude and angle info
real = sp.real[:tlen]
imag = sp.imag[:tlen]
magnitudes = np.sqrt((real)**2 + (imag)**2)
angles = np.arctan2(imag, real)
# print("mag:{}, ang:{}".format(magnitudes.shape, angles.shape))
row = np.concatenate((magnitudes, angles))
# print("mag:{}, ang:{}, row:{}".format(magnitudes.shape, angles.shape, row.shape))
result[i, :] = row
return result
def gen_fft(self, wave, kernel_size=256, isPlot=False, plots=None):
'''
@params:
wave: numpy array (n,), containing the waveform to generate the FFT for
kernel_size: int, between 1 and n, size of the FFT window that will slide over the wave, should be power of 2
isPlot: bool, if true, produce graphs of output
plots: list, intervals to be printed if isPlot is true
@returns:
list (n//kernel_size, 2) containing, each row contains 2 np arrays of size
(kernel_size) containing magnitude and phase angle respectively, i.e. [[magnitude, phaseangle],...]
'''
if plots:
plots = set(plots)
else:
plots = set()
wave_length = wave.shape[0]
last_section = wave_length // kernel_size
result = np.zeros((last_section, kernel_size))
for interval in range(last_section):
section = wave[interval*kernel_size:(interval+1)*kernel_size]
sp = np.fft.fft(section)
t = np.arange(sp.shape[0])
tlen = len(t)//2
if isPlot and interval in plots:
plt.clf()
plt.title("Raw Post-FFT Data, Interval: {}".format(interval))
plt.plot(t, sp.real, t, sp.imag)
plt.ylim(-100, 100)
plt.xlim(-5, tlen) # Second half of FFT is duplicate info
plt.show()
plt.clf()
# Extract magnitude and angle info
real = sp.real[:tlen]
imag = sp.imag[:tlen]
magnitudes = np.sqrt((real)**2 + (imag)**2)
angles = np.arctan2(imag, real)
# print("mag:{}, ang:{}".format(magnitudes.shape, angles.shape))
row = np.concatenate((magnitudes, angles))
# print("mag:{}, ang:{}, row:{}".format(magnitudes.shape, angles.shape, row.shape))
result[interval, :] = row
return result
def | (self, magnitudes, angles, offset=1):
'''
Plot the given np arrays of magnitude and phase angle,
with an optional offset (i.e. to ignore DC component offset=1)
'''
ts = np.arange(magnitudes.shape[0])[offset:]
# Ignoring DC component which is very large
plt.plot(ts, magnitudes[offset:])
plt.title("Magnitudes Squared vs Frequency")
plt.ylabel("Magnitude Squared")
sample_freq = 1000
bin_freq = sample_freq//magnitudes.shape[0]
plt.xlabel("Frequency in multiples of {} Hz".format(bin_freq))
# plt.ylim(-100, 100)
plt.show()
plt.clf()
plt.plot(ts, angles[offset:])
plt.title("Phase Angles vs Frequency")
plt.ylabel("Phase Angle (Radians)")
plt.xlabel("Frequency in multiples of {} Hz".format(bin_freq))
plt.show()
plt.clf()
| plot_fft | identifier_name |
pulseoxLoader.py | # Utilities
import os
import pdb
import glob
# Numerical
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Preprocessing / Filtering
from scipy.ndimage import gaussian_filter1d
from sklearn.preprocessing import normalize
class OxData:
def __init__(self, csv_path, label_path):
self.path = csv_path
self.data = None
self.label_path = label_path
def data_file_list(self):
files = []
for fpath in sorted(glob.glob(self.path)):
if fpath == self.label_path:
continue
files.append(os.path.split(fpath)[-1])
return files
### Creating data matrix ###
def get_data_matrix(self, **kwargs):
'''
@args:
path: kwargs:
'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'hold-out':[]
@return:
X - np array of all processed and normalized data
'''
args = {'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'hold_out': [], 'sliding_fft':False}
for key, value in kwargs.items():
if key in args:
args[key] = value
if self.data is not None and not args['reparse']:
return self.data
hold_outs = set(args['hold_out'])
data_candidates = []
for fpath in sorted(glob.glob(self.path)):
fname = os.path.split(fpath)[-1]
if fname in hold_outs:
continue # don't add this to the training matrix
if fpath == self.label_path:
continue
temp_data = self.csv_to_data(fpath, **kwargs)
data_candidates.append(temp_data)
self.data = np.concatenate(data_candidates)
self.data = normalize(self.data)
return self.data
def csv_to_data(self, fpath, **kwargs):
'''
convert the passed file path fpath to a data matrix X
'''
args = {'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'normalize': False, 'sliding_fft':False}
for key, value in kwargs.items():
if key in args:
args[key] = value
sp02, pulse, start_time = self.day_to_df(fpath)
# Preprocess for FFT
sp02_wave = self.preproc_df_fft(
sp02, smoothing=args['smoothing'], sigma=args['sigma'], plot=args['plot'])
pulse_wave = self.preproc_df_fft(
pulse, smoothing=args['smoothing'], sigma=args['sigma'], plot=args['plot'])
# Perform FFT, either sliding or jumping window
if args['sliding_fft']:
sp02_fft = self.gen_fft_sliding(sp02_wave, args['fft_size'])
pulse_fft = self.gen_fft_sliding(pulse_wave, args['fft_size'])
else:
sp02_fft = self.gen_fft(sp02_wave, args['fft_size'])
pulse_fft = self.gen_fft(pulse_wave, args['fft_size'])
# Can separate magnitudes and angles as follows:
# sp02_mag, sp02_angle = sp02_fft[:,:sp02_fft.sp02_fft[1]//2], sp02_fft[:,sp02_fft.shape[1]//2:]
# pulse_mag, pulse_angle = pulse_fft[:,:pulse_fft.pulse_fft[1]//2], pulse_fft[:,pulse_fft.shape[1]//2:]
# Normalize results and combine to final data matrix
temp_data = np.concatenate((sp02_fft, pulse_fft), axis=1)
if args['normalize']:
temp_data = normalize(temp_data)
return temp_data
### Label Generation ###
def labels_to_df(self, path, date):
'''
@args:
path: os.path object representing path to OSCAR csv file to parse
date: date string in MM/DD/YYYY format to select the date to draw data from
@return:
label_df: pandas dataframe containing the labels for a given days' meal
'''
df = pd.read_csv(path)
df = df[df['Date'] == date]
return df
def gen_ground_truth(self, label_df, pre=0, eat=1, post=2):
'''
@args:
label_df: pandas dataframe containing the labels for a given days' meal
optional pre, eat, post specify value for each state, default to 0, 1, 2 respectively
@return
labels: np array of size max(label_df['End_min']) containing values at
each minute index indicating eating state, 0: pre-eating, 1: eating, 2: post-eating
'''
ate = False
labels = np.zeros(np.max(label_df['End_min']), dtype=int)
for index, row in label_df.iterrows():
state = row['Label']
if state == 'eating':
ate = True
labels[row['Start_min']:row['End_min']] = eat
elif state == 'break' and not ate:
labels[row['Start_min']:row['End_min']] = pre
else:
labels[row['Start_min']:row['End_min']] = post
return labels
### Data Loading ###
def day_to_df(self, path):
'''
@args:
path: os.path object representing path to OSCAR csv file to parse
@return:
return: tuple(sp02, pulse, start_time),
sp02 = pd.DataFrame of SP02 data
pulse = pd.DataFrame of Pulse data
start_time = UTC start time in s
'''
df = pd.read_csv(path)
time = df['Session'][0]
df = df.drop(columns='Session')
df['DateTime'] -= time # All times start from 0 for session
sp02 = df[df['Event'] == 'SPO2']
sp02 = sp02.drop(columns='Event')
pulse = df[df['Event'] == 'Pulse']
pulse = pulse.drop(columns='Event')
pulse = pulse.reset_index(drop=True)
sp02 = sp02.reset_index(drop=True)
del df
return sp02, pulse, time
# Waveform Manipulations
def impute_wave(self, wave):
'''
@info:
impute missing values in waveform
@args:
wave: np array with floats corresponding to waveform
@return:
None, modifies waveform array in place
'''
n = wave.shape[0]
j = 0
step = None
for i in range(n-1):
if i >= j and wave[i+1] == 0:
j = i+1
while j < n and wave[j] == 0:
j += 1
if j < n:
step = float(wave[j] - wave[i]) / float(j-i+1)
elif wave[i] == 0:
wave[i] = wave[i-1]+step
### Waveform Manipulations ###
def moving_avg(self, wave, N):
'''
@info:
run moving average filter kernal of size N over the wave
@params:
wave: np array if size > N
n: int, the size of the filter kernal
@return:
nothing, modifies wave in place
'''
n = wave.shape[0]
if N > n or N % 2 != 1:
print("Filter kernal needs to be odd sized.")
return
running_sum = np.sum(wave[:N])
N2 = N//2
for i in range(N2, n - N2 - 1):
wave[i] = float(running_sum) / float(N)
running_sum -= wave[i-N2]
running_sum += wave[i+N2+1]
# cumsum = np.cumsum(np.insert(wave, 0, 0))
# return (cumsum[N:] - cumsum[:-N]) / float(N)
def preproc_df_fft(self, df, **kwargs):
'''
Pre-process the given dataframe df (obtained using the day_to_df function)
to space all points into time buckets so FFT output is meaningful. Then perform
imputation to create the envalope of the waveform, and run a amoving average filter
kernel of size avg_size on it.
plot=False, sigma=1, smoothing='gaussian'):
**kwargs:
plot=True/False -- plots garph (default False)
sigma=int() -- smoothness of gaussian filter (default 1), moving avg size of moving avg kernel
smoothing='gaussian'/'avg' -- whether to use gaussian or moving avg filter (default gassian)
'''
args = {'sigma': 1, 'smoothing': 'gaussian', 'plot': False}
for key, value in kwargs.items():
|
result = np.zeros(np.max(df['DateTime'])+1)
# TODO: fix this, DateTimes come in duplicate pairs
result[df['DateTime']] = df['Data/Duration']
if args['plot']:
plt.clf()
wave = np.array(result, copy=True)
self.impute_wave(wave)
if args['smoothing'] == 'gaussian':
wave = gaussian_filter1d(wave, args['sigma'])
else:
self.moving_avg(wave, args['sigma'])
plt.clf()
plt.plot(np.arange(wave.shape[0]), result)
plt.plot(np.arange(wave.shape[0]), wave, 'r:')
plt.legend(["Raw Time Indexed", "Smoothed & Imputed"])
plt.xlabel("Time (ms)")
plt.ylabel(r"Magnitude (of $SpO_{2}$)")
plt.title(r"Time based indexing, Imputation, and Smoothing of $SpO_{2}$ Data")
plt.xlim(20, 1000)
plt.ylim(85, 105)
return wave
else:
self.impute_wave(result)
if args['smoothing'] == 'gaussian':
result = gaussian_filter1d(result, args['sigma'])
else:
self.moving_avg(result, args['sigma'])
return result
### Generation of feature matrix from day-data ###
# TODO: Turn this function into Gen-Features, fator FFT part out, use it to generate feature array X
def gen_fft_sliding(self, wave, kernel_size):
"""
Generate FFT with a sliding window, rather than a jumping window
@params:
* wave: numpy array (n,), containing the waveform to generate the FFT for
* kernel_size: int, between 1 and n, size of the FFT window that will slide over the wave, should be power of 2
@returns:
list (n-kernel_size, 2) containing, each row contains 2 np arrays of size
(kernel_size) containing magnitude and phase angle respectively, i.e. [[magnitude, phaseangle],...]
"""
wave_length = wave.shape[0]
last_section = wave_length - kernel_size
result = np.zeros((last_section, kernel_size))
for i in range(last_section):
section = wave[i:i+kernel_size]
sp = np.fft.fft(section)
t = np.arange(sp.shape[0])
tlen = len(t)//2
# Extract magnitude and angle info
real = sp.real[:tlen]
imag = sp.imag[:tlen]
magnitudes = np.sqrt((real)**2 + (imag)**2)
angles = np.arctan2(imag, real)
# print("mag:{}, ang:{}".format(magnitudes.shape, angles.shape))
row = np.concatenate((magnitudes, angles))
# print("mag:{}, ang:{}, row:{}".format(magnitudes.shape, angles.shape, row.shape))
result[i, :] = row
return result
def gen_fft(self, wave, kernel_size=256, isPlot=False, plots=None):
'''
@params:
wave: numpy array (n,), containing the waveform to generate the FFT for
kernel_size: int, between 1 and n, size of the FFT window that will slide over the wave, should be power of 2
isPlot: bool, if true, produce graphs of output
plots: list, intervals to be printed if isPlot is true
@returns:
list (n//kernel_size, 2) containing, each row contains 2 np arrays of size
(kernel_size) containing magnitude and phase angle respectively, i.e. [[magnitude, phaseangle],...]
'''
if plots:
plots = set(plots)
else:
plots = set()
wave_length = wave.shape[0]
last_section = wave_length // kernel_size
result = np.zeros((last_section, kernel_size))
for interval in range(last_section):
section = wave[interval*kernel_size:(interval+1)*kernel_size]
sp = np.fft.fft(section)
t = np.arange(sp.shape[0])
tlen = len(t)//2
if isPlot and interval in plots:
plt.clf()
plt.title("Raw Post-FFT Data, Interval: {}".format(interval))
plt.plot(t, sp.real, t, sp.imag)
plt.ylim(-100, 100)
plt.xlim(-5, tlen) # Second half of FFT is duplicate info
plt.show()
plt.clf()
# Extract magnitude and angle info
real = sp.real[:tlen]
imag = sp.imag[:tlen]
magnitudes = np.sqrt((real)**2 + (imag)**2)
angles = np.arctan2(imag, real)
# print("mag:{}, ang:{}".format(magnitudes.shape, angles.shape))
row = np.concatenate((magnitudes, angles))
# print("mag:{}, ang:{}, row:{}".format(magnitudes.shape, angles.shape, row.shape))
result[interval, :] = row
return result
def plot_fft(self, magnitudes, angles, offset=1):
'''
Plot the given np arrays of magnitude and phase angle,
with an optional offset (i.e. to ignore DC component offset=1)
'''
ts = np.arange(magnitudes.shape[0])[offset:]
# Ignoring DC component which is very large
plt.plot(ts, magnitudes[offset:])
plt.title("Magnitudes Squared vs Frequency")
plt.ylabel("Magnitude Squared")
sample_freq = 1000
bin_freq = sample_freq//magnitudes.shape[0]
plt.xlabel("Frequency in multiples of {} Hz".format(bin_freq))
# plt.ylim(-100, 100)
plt.show()
plt.clf()
plt.plot(ts, angles[offset:])
plt.title("Phase Angles vs Frequency")
plt.ylabel("Phase Angle (Radians)")
plt.xlabel("Frequency in multiples of {} Hz".format(bin_freq))
plt.show()
plt.clf()
| if key in args:
args[key] = value | conditional_block |
pulseoxLoader.py | # Utilities
import os
import pdb
import glob
# Numerical
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Preprocessing / Filtering
from scipy.ndimage import gaussian_filter1d
from sklearn.preprocessing import normalize
class OxData:
def __init__(self, csv_path, label_path):
self.path = csv_path
self.data = None
self.label_path = label_path
def data_file_list(self):
files = []
for fpath in sorted(glob.glob(self.path)):
if fpath == self.label_path:
continue
files.append(os.path.split(fpath)[-1])
return files
### Creating data matrix ###
def get_data_matrix(self, **kwargs):
'''
@args:
path: kwargs:
'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'hold-out':[]
@return:
X - np array of all processed and normalized data
'''
args = {'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'hold_out': [], 'sliding_fft':False}
for key, value in kwargs.items():
if key in args:
args[key] = value
if self.data is not None and not args['reparse']:
return self.data
hold_outs = set(args['hold_out'])
data_candidates = []
for fpath in sorted(glob.glob(self.path)):
fname = os.path.split(fpath)[-1]
if fname in hold_outs:
continue # don't add this to the training matrix
| temp_data = self.csv_to_data(fpath, **kwargs)
data_candidates.append(temp_data)
self.data = np.concatenate(data_candidates)
self.data = normalize(self.data)
return self.data
def csv_to_data(self, fpath, **kwargs):
'''
convert the passed file path fpath to a data matrix X
'''
args = {'sigma': 1, 'smoothing': 'gaussian',
'plot': False, 'reparse': False,
'fft_size': 1024, 'normalize': False, 'sliding_fft':False}
for key, value in kwargs.items():
if key in args:
args[key] = value
sp02, pulse, start_time = self.day_to_df(fpath)
# Preprocess for FFT
sp02_wave = self.preproc_df_fft(
sp02, smoothing=args['smoothing'], sigma=args['sigma'], plot=args['plot'])
pulse_wave = self.preproc_df_fft(
pulse, smoothing=args['smoothing'], sigma=args['sigma'], plot=args['plot'])
# Perform FFT, either sliding or jumping window
if args['sliding_fft']:
sp02_fft = self.gen_fft_sliding(sp02_wave, args['fft_size'])
pulse_fft = self.gen_fft_sliding(pulse_wave, args['fft_size'])
else:
sp02_fft = self.gen_fft(sp02_wave, args['fft_size'])
pulse_fft = self.gen_fft(pulse_wave, args['fft_size'])
# Can separate magnitudes and angles as follows:
# sp02_mag, sp02_angle = sp02_fft[:,:sp02_fft.sp02_fft[1]//2], sp02_fft[:,sp02_fft.shape[1]//2:]
# pulse_mag, pulse_angle = pulse_fft[:,:pulse_fft.pulse_fft[1]//2], pulse_fft[:,pulse_fft.shape[1]//2:]
# Normalize results and combine to final data matrix
temp_data = np.concatenate((sp02_fft, pulse_fft), axis=1)
if args['normalize']:
temp_data = normalize(temp_data)
return temp_data
### Label Generation ###
def labels_to_df(self, path, date):
'''
@args:
path: os.path object representing path to OSCAR csv file to parse
date: date string in MM/DD/YYYY format to select the date to draw data from
@return:
label_df: pandas dataframe containing the labels for a given days' meal
'''
df = pd.read_csv(path)
df = df[df['Date'] == date]
return df
def gen_ground_truth(self, label_df, pre=0, eat=1, post=2):
'''
@args:
label_df: pandas dataframe containing the labels for a given days' meal
optional pre, eat, post specify value for each state, default to 0, 1, 2 respectively
@return
labels: np array of size max(label_df['End_min']) containing values at
each minute index indicating eating state, 0: pre-eating, 1: eating, 2: post-eating
'''
ate = False
labels = np.zeros(np.max(label_df['End_min']), dtype=int)
for index, row in label_df.iterrows():
state = row['Label']
if state == 'eating':
ate = True
labels[row['Start_min']:row['End_min']] = eat
elif state == 'break' and not ate:
labels[row['Start_min']:row['End_min']] = pre
else:
labels[row['Start_min']:row['End_min']] = post
return labels
### Data Loading ###
def day_to_df(self, path):
'''
@args:
path: os.path object representing path to OSCAR csv file to parse
@return:
return: tuple(sp02, pulse, start_time),
sp02 = pd.DataFrame of SP02 data
pulse = pd.DataFrame of Pulse data
start_time = UTC start time in s
'''
df = pd.read_csv(path)
time = df['Session'][0]
df = df.drop(columns='Session')
df['DateTime'] -= time # All times start from 0 for session
sp02 = df[df['Event'] == 'SPO2']
sp02 = sp02.drop(columns='Event')
pulse = df[df['Event'] == 'Pulse']
pulse = pulse.drop(columns='Event')
pulse = pulse.reset_index(drop=True)
sp02 = sp02.reset_index(drop=True)
del df
return sp02, pulse, time
# Waveform Manipulations
def impute_wave(self, wave):
'''
@info:
impute missing values in waveform
@args:
wave: np array with floats corresponding to waveform
@return:
None, modifies waveform array in place
'''
n = wave.shape[0]
j = 0
step = None
for i in range(n-1):
if i >= j and wave[i+1] == 0:
j = i+1
while j < n and wave[j] == 0:
j += 1
if j < n:
step = float(wave[j] - wave[i]) / float(j-i+1)
elif wave[i] == 0:
wave[i] = wave[i-1]+step
### Waveform Manipulations ###
def moving_avg(self, wave, N):
'''
@info:
run moving average filter kernal of size N over the wave
@params:
wave: np array if size > N
n: int, the size of the filter kernal
@return:
nothing, modifies wave in place
'''
n = wave.shape[0]
if N > n or N % 2 != 1:
print("Filter kernal needs to be odd sized.")
return
running_sum = np.sum(wave[:N])
N2 = N//2
for i in range(N2, n - N2 - 1):
wave[i] = float(running_sum) / float(N)
running_sum -= wave[i-N2]
running_sum += wave[i+N2+1]
# cumsum = np.cumsum(np.insert(wave, 0, 0))
# return (cumsum[N:] - cumsum[:-N]) / float(N)
def preproc_df_fft(self, df, **kwargs):
'''
Pre-process the given dataframe df (obtained using the day_to_df function)
to space all points into time buckets so FFT output is meaningful. Then perform
imputation to create the envalope of the waveform, and run a amoving average filter
kernel of size avg_size on it.
plot=False, sigma=1, smoothing='gaussian'):
**kwargs:
plot=True/False -- plots garph (default False)
sigma=int() -- smoothness of gaussian filter (default 1), moving avg size of moving avg kernel
smoothing='gaussian'/'avg' -- whether to use gaussian or moving avg filter (default gassian)
'''
args = {'sigma': 1, 'smoothing': 'gaussian', 'plot': False}
for key, value in kwargs.items():
if key in args:
args[key] = value
result = np.zeros(np.max(df['DateTime'])+1)
# TODO: fix this, DateTimes come in duplicate pairs
result[df['DateTime']] = df['Data/Duration']
if args['plot']:
plt.clf()
wave = np.array(result, copy=True)
self.impute_wave(wave)
if args['smoothing'] == 'gaussian':
wave = gaussian_filter1d(wave, args['sigma'])
else:
self.moving_avg(wave, args['sigma'])
plt.clf()
plt.plot(np.arange(wave.shape[0]), result)
plt.plot(np.arange(wave.shape[0]), wave, 'r:')
plt.legend(["Raw Time Indexed", "Smoothed & Imputed"])
plt.xlabel("Time (ms)")
plt.ylabel(r"Magnitude (of $SpO_{2}$)")
plt.title(r"Time based indexing, Imputation, and Smoothing of $SpO_{2}$ Data")
plt.xlim(20, 1000)
plt.ylim(85, 105)
return wave
else:
self.impute_wave(result)
if args['smoothing'] == 'gaussian':
result = gaussian_filter1d(result, args['sigma'])
else:
self.moving_avg(result, args['sigma'])
return result
### Generation of feature matrix from day-data ###
# TODO: Turn this function into Gen-Features, fator FFT part out, use it to generate feature array X
def gen_fft_sliding(self, wave, kernel_size):
"""
Generate FFT with a sliding window, rather than a jumping window
@params:
* wave: numpy array (n,), containing the waveform to generate the FFT for
* kernel_size: int, between 1 and n, size of the FFT window that will slide over the wave, should be power of 2
@returns:
list (n-kernel_size, 2) containing, each row contains 2 np arrays of size
(kernel_size) containing magnitude and phase angle respectively, i.e. [[magnitude, phaseangle],...]
"""
wave_length = wave.shape[0]
last_section = wave_length - kernel_size
result = np.zeros((last_section, kernel_size))
for i in range(last_section):
section = wave[i:i+kernel_size]
sp = np.fft.fft(section)
t = np.arange(sp.shape[0])
tlen = len(t)//2
# Extract magnitude and angle info
real = sp.real[:tlen]
imag = sp.imag[:tlen]
magnitudes = np.sqrt((real)**2 + (imag)**2)
angles = np.arctan2(imag, real)
# print("mag:{}, ang:{}".format(magnitudes.shape, angles.shape))
row = np.concatenate((magnitudes, angles))
# print("mag:{}, ang:{}, row:{}".format(magnitudes.shape, angles.shape, row.shape))
result[i, :] = row
return result
def gen_fft(self, wave, kernel_size=256, isPlot=False, plots=None):
'''
@params:
wave: numpy array (n,), containing the waveform to generate the FFT for
kernel_size: int, between 1 and n, size of the FFT window that will slide over the wave, should be power of 2
isPlot: bool, if true, produce graphs of output
plots: list, intervals to be printed if isPlot is true
@returns:
list (n//kernel_size, 2) containing, each row contains 2 np arrays of size
(kernel_size) containing magnitude and phase angle respectively, i.e. [[magnitude, phaseangle],...]
'''
if plots:
plots = set(plots)
else:
plots = set()
wave_length = wave.shape[0]
last_section = wave_length // kernel_size
result = np.zeros((last_section, kernel_size))
for interval in range(last_section):
section = wave[interval*kernel_size:(interval+1)*kernel_size]
sp = np.fft.fft(section)
t = np.arange(sp.shape[0])
tlen = len(t)//2
if isPlot and interval in plots:
plt.clf()
plt.title("Raw Post-FFT Data, Interval: {}".format(interval))
plt.plot(t, sp.real, t, sp.imag)
plt.ylim(-100, 100)
plt.xlim(-5, tlen) # Second half of FFT is duplicate info
plt.show()
plt.clf()
# Extract magnitude and angle info
real = sp.real[:tlen]
imag = sp.imag[:tlen]
magnitudes = np.sqrt((real)**2 + (imag)**2)
angles = np.arctan2(imag, real)
# print("mag:{}, ang:{}".format(magnitudes.shape, angles.shape))
row = np.concatenate((magnitudes, angles))
# print("mag:{}, ang:{}, row:{}".format(magnitudes.shape, angles.shape, row.shape))
result[interval, :] = row
return result
def plot_fft(self, magnitudes, angles, offset=1):
'''
Plot the given np arrays of magnitude and phase angle,
with an optional offset (i.e. to ignore DC component offset=1)
'''
ts = np.arange(magnitudes.shape[0])[offset:]
# Ignoring DC component which is very large
plt.plot(ts, magnitudes[offset:])
plt.title("Magnitudes Squared vs Frequency")
plt.ylabel("Magnitude Squared")
sample_freq = 1000
bin_freq = sample_freq//magnitudes.shape[0]
plt.xlabel("Frequency in multiples of {} Hz".format(bin_freq))
# plt.ylim(-100, 100)
plt.show()
plt.clf()
plt.plot(ts, angles[offset:])
plt.title("Phase Angles vs Frequency")
plt.ylabel("Phase Angle (Radians)")
plt.xlabel("Frequency in multiples of {} Hz".format(bin_freq))
plt.show()
plt.clf() | if fpath == self.label_path:
continue
| random_line_split |
pack.rs | //! SPDX-License-Identifier: Apache-2.0
//! Copyright (C) 2021 Arm Limited or its affiliates and Contributors. All rights reserved.
use serde::{Deserialize, Serialize};
use std::{
fmt, fs,
fs::File,
io,
io::{BufReader, Read, Write},
path::{Path, PathBuf},
};
use std::{fmt::Display, str::FromStr};
use crypto::digest::Digest;
use crypto::sha1::Sha1;
use log::info;
use zstd::stream::raw::DParameter;
use zstd::Decoder;
use super::constants::{
DEFAULT_WINDOW_LOG_MAX, PACKS_DIR, PACK_EXTENSION, PACK_HEADER_MAGIC, PACK_INDEX_EXTENSION,
};
use super::error::Error;
use super::repository::Repository;
use super::{algo::run_in_parallel, constants::DOT_PACK_INDEX_EXTENSION};
use crate::packidx::{FileEntry, ObjectChecksum, PackError, PackIndex};
use crate::{log::measure_ok, packidx::ObjectMetadata};
/// Pack and snapshots IDs can contain latin letter, digits or the following characters.
const EXTRA_ID_CHARS: &[char] = &['-', '_', '/'];
#[derive(Debug)]
/// Error used when parsing a [`SnapshotId`] fails.
pub enum IdError {
BadFormat(String),
InvalidPack(String),
InvalidSnapshot(String),
}
impl Display for IdError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::BadFormat(s) => write!(f, "Unrecognized identifier format '{}'!", s),
Self::InvalidPack(s) => write!(
f,
"Invalid pack identifier '{}'! Latin letters, digits, -, _ and / are allowed!",
s
),
Self::InvalidSnapshot(s) => write!(
f,
"Invalid snapshot identifier '{}'! Latin letters, digits, - and _ are allowed!",
s
),
}
}
}
impl std::error::Error for IdError {}
/// Identifies a pack file.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum PackId {
Pack(String),
}
impl PackId {
/// from_index_path returns Some(PackId::Pack(index_path)) with the pack
/// index extension trimmed, if the input ends in the PACK_INDEX_EXTENSION.
pub fn from_index_path(index_path: String) -> Option<PackId> {
index_path
.strip_suffix(DOT_PACK_INDEX_EXTENSION)
.map(|s| PackId::Pack(s.to_owned()))
}
fn is_valid(s: &str) -> bool {
s.chars()
.all(|c| c.is_ascii_alphanumeric() || EXTRA_ID_CHARS.contains(&c))
}
}
impl FromStr for PackId {
type Err = IdError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if !PackId::is_valid(s) {
return Err(IdError::InvalidPack(s.to_owned()));
}
Ok(PackId::Pack(s.to_owned()))
}
}
impl Display for PackId {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
PackId::Pack(s) => write!(f, "{}", s),
}
}
}
/// Identifies a snapshot using a pack base filename [`Path::file_stem`] and a snapshot tag.
#[derive(PartialEq, Clone, Debug)]
pub struct SnapshotId {
pack: PackId,
tag: String,
}
impl SnapshotId {
/// Creates a [`SnapshotId`] from a pack and a snapshot tag
pub fn new(pack: PackId, tag: &str) -> Result<Self, IdError> {
if !Self::is_valid(tag) {
return Err(IdError::InvalidSnapshot(tag.to_owned()));
}
Ok(Self {
pack,
tag: tag.to_owned(),
})
}
pub fn pack(&self) -> &PackId {
&self.pack
}
pub fn tag(&self) -> &str {
&self.tag
}
fn is_valid(tag: &str) -> bool {
tag.chars()
.all(|c| c.is_ascii_alphanumeric() || EXTRA_ID_CHARS.contains(&c))
}
}
/// [`SnapshotId`] equality is a full equivalence relation.
impl Eq for SnapshotId {}
/// Prints the canonical form for [`SnapshotId`].
impl Display for SnapshotId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.pack, self.tag)
}
}
/// Parses a [`SnapshotId`] from a canonical form string 'pack_name:snapshot_name'.
impl FromStr for SnapshotId {
type Err = IdError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some((pack, snapshot)) = s.trim_end().rsplit_once(':') {
if pack.is_empty() {
return Err(IdError::InvalidPack(pack.to_owned()));
}
if snapshot.is_empty() {
return Err(IdError::InvalidSnapshot(snapshot.to_owned()));
}
Ok(SnapshotId {
pack: PackId::from_str(pack)?,
tag: snapshot.to_owned(),
})
} else {
Err(IdError::BadFormat(s.to_owned()))
}
}
}
/// A magic constant that has no use other than to indicate a custom .pack header
/// stored in a Zstandard skippable frame.
const SKIPPABLE_MAGIC_MASK: u32 = 0x184D2A50;
/// Reads a Zstandard skippable frame from reader and writes the result to `buf`.
/// Returns the size of the frame, *not* the number of bytes read.
pub fn read_skippable_frame(mut reader: impl Read, buf: &mut Vec<u8>) -> io::Result<u64> {
fn read_u32_le(mut reader: impl Read) -> io::Result<u32> {
let mut bytes = [0u8; 4];
reader.read_exact(&mut bytes)?;
Ok(u32::from_le_bytes(bytes))
}
// Ensure this is a skippable frame.
let magic = read_u32_le(&mut reader)?;
if magic & SKIPPABLE_MAGIC_MASK != SKIPPABLE_MAGIC_MASK {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Not a Zstandard skippable frame!",
));
}
let frame_size = read_u32_le(&mut reader)?;
buf.resize(frame_size as usize, 0);
reader.read_exact(buf)?;
// Compute overall frame size.
Ok((std::mem::size_of::<u32>() * 2 + buf.len()) as u64)
}
/// Writes a Zstandard skippable frame with the given user data.
/// Returns the number of bytes written to writer (the size of the frame, including the magic number and header).
pub fn write_skippable_frame(mut writer: impl Write, buf: &[u8]) -> io::Result<u64> {
fn write_u32_le(mut writer: impl Write, value: u32) -> io::Result<()> {
writer.write_all(&value.to_le_bytes())
}
// Ensure this is a skippable frame.
write_u32_le(&mut writer, SKIPPABLE_MAGIC_MASK)?;
write_u32_le(&mut writer, buf.len() as u32)?;
writer.write_all(buf)?;
// Compute overall frame size.
Ok((std::mem::size_of::<u32>() * 2 + buf.len()) as u64)
}
/// The unidirectional stream of data stored in the pack.
enum PackReader {
Compressed(Decoder<'static, BufReader<File>>),
}
impl PackReader {
/// Consumes the specified number of bytes from the reader.
fn seek(&mut self, bytes: u64) -> io::Result<()> {
match self {
Self::Compressed(decoder) => {
io::copy(&mut decoder.by_ref().take(bytes), &mut io::sink()).map(|_| {})
}
}
}
// Reads the exact number of bytes into `buf`.
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
match self {
Self::Compressed(decoder) => decoder.read_exact(buf),
}
}
}
/// Represents a compressed Zstandard frame in the .pack file.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PackFrame {
/// The size of the frame, in bytes.
pub frame_size: u64,
/// The size of the data stream in the frame, once decompressed, in bytes.
pub decompressed_size: u64,
}
/// Represents the custom header in the beginning of a .pack file.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PackHeader {
/// Valid pack headers have this value set to [`PACK_HEADER_MAGIC`].
magic: u64,
/// The list of frames in the pack file, sorted by their byte offsets.
frames: Vec<PackFrame>,
}
impl PackHeader {
/// Create a new pack header.
pub fn new(frames: Vec<PackFrame>) -> Self {
Self {
magic: PACK_HEADER_MAGIC,
frames,
}
}
/// Verifies the header magic.
pub fn is_valid(&self) -> bool {
self.magic == PACK_HEADER_MAGIC
}
}
impl Default for PackHeader {
fn default() -> Self {
Self {
magic: PACK_HEADER_MAGIC,
frames: vec![],
}
}
}
/// Represents an pack file.
pub struct Pack {
/// The base filename ([`Path::file_stem`]) of the pack.
name: String,
/// The index for the pack.
index: PackIndex,
/// The header of the pack.
header: PackHeader,
/// PackReader instances for each frame in the .pack file.
frame_readers: Vec<PackReader>,
/// The file size of the pack (in bytes).
file_size: u64,
}
impl Pack {
/// Opens a pack file and its corresponding index.
///
/// # Arguments
///
/// * `pack_name` - The base filename ([`Path::file_stem`]) of the pack.
pub fn open<P>(repo: P, pack_name: &PackId) -> Result<Self, Error>
where
P: AsRef<Path>,
{
let PackId::Pack(pack_name) = pack_name;
let mut packs_data = repo.as_ref().join(&*Repository::data_dir());
packs_data.push(PACKS_DIR);
let mut pack_index_path = packs_data.join(pack_name);
pack_index_path.set_extension(PACK_INDEX_EXTENSION);
let mut pack_path = packs_data.join(pack_name);
pack_path.set_extension(PACK_EXTENSION);
info!("Reading pack index {:?}...", pack_index_path);
let pack_index = PackIndex::load(&pack_index_path)?;
info!("Opening pack file {:?}...", pack_path);
let (file_size, header, frame_readers) =
Self::open_pack(&pack_path).or_else(|_| Self::open_pack_legacy(&pack_path))?;
Ok(Pack {
name: pack_name.to_owned(),
index: pack_index,
header,
frame_readers,
file_size,
})
}
/// Opens the pack file for reading.
fn open_pack(pack_path: &Path) -> Result<(u64, PackHeader, Vec<PackReader>), Error> {
let file = File::open(&pack_path)?;
let file_size = file.metadata()?.len();
let mut reader = io::BufReader::new(file);
let mut header = vec![];
let header_size = read_skippable_frame(&mut reader, &mut header)?;
let header: PackHeader =
rmp_serde::decode::from_read(&header[..]).map_err(|_| Error::CorruptPack)?;
drop(reader);
if !header.is_valid() {
return Err(Error::CorruptPack);
}
let frame_offsets = compute_frame_offsets(&header.frames);
let frame_readers = frame_offsets
.iter()
.map(|offset| -> Result<_, Error> {
let mut reader = File::open(&pack_path)?;
io::Seek::seek(&mut reader, io::SeekFrom::Start(header_size + offset))?;
let mut reader = Decoder::new(reader)?;
reader.set_parameter(DParameter::WindowLogMax(DEFAULT_WINDOW_LOG_MAX))?;
// Wrap in a `PackReader`.
let reader = PackReader::Compressed(reader);
Ok(reader)
})
.collect::<Result<Vec<_>, _>>()?;
Ok((file_size, header, frame_readers))
}
/// Backwards-compatible open_pack for the legacy pack format (no skippable frame/no header).
fn open_pack_legacy(pack_path: &Path) -> Result<(u64, PackHeader, Vec<PackReader>), Error> {
let file = File::open(&pack_path)?;
let file_size = file.metadata()?.len();
// This manufactured pack header works for the current implementation. We might
// change how/whether we support the legacy pack format in the future...
let header = PackHeader::new(vec![PackFrame {
frame_size: file_size,
decompressed_size: u64::MAX,
}]);
let mut reader = Decoder::new(file)?;
reader.set_parameter(DParameter::WindowLogMax(DEFAULT_WINDOW_LOG_MAX))?;
let frame_reader = PackReader::Compressed(reader);
Ok((file_size, header, vec![frame_reader]))
}
/// The base filename ([`Path::file_stem`]) of the pack.
pub fn name(&self) -> &str {
&self.name
}
/// A reference to the pack index.
pub fn index(&self) -> &PackIndex {
&self.index
}
/// The size of the pack in bytes.
pub fn file_size(&self) -> u64 {
self.file_size
}
/// Extracts the specified entries from the pack into the specified directory.
/// This operation consumes the pack, since [`Pack`] objects contain a unidirectional
/// data stream that becomes unusable after it is read.
///
/// # Arguments
///
/// * `entries` - The list of entries to extract. These *must* be entries contained in the pack index.
/// * `output_dir` - The directory relative to which the files will be extracted.
/// * `verify` - Enable/disable checksum verification.
#[allow(unused_mut)]
#[allow(clippy::needless_collect)]
pub(crate) fn extract_entries<P>(
mut self,
entries: &[FileEntry],
output_dir: P,
verify: bool,
num_workers: u32,
) -> Result<(), Error>
where
P: AsRef<Path> + Sync,
{
if entries.is_empty() {
return Ok(());
}
let num_frames = self.header.frames.len();
assert_ne!(0, num_workers);
assert_ne!(0, num_frames);
if num_frames < num_workers as usize {
info!(
"Requested {} workers, but there are only {} frames!",
num_workers, num_frames
);
}
let num_workers = std::cmp::min(num_workers, num_frames as u32);
// Assign entries to the frames they reside in.
// The resulting entries will have offsets relative to their containing frame.
let frame_to_entries = assign_to_frames(&self.header.frames, entries)?;
// Compute and log total amount of seeking and decompression needed.
let bytes_to_decompress = frame_to_entries
.iter()
.flat_map(|entries| {
entries
.iter()
.map(|e| e.metadata.offset + e.metadata.offset)
.max()
})
.sum::<u64>();
info!(
"Decompressing {:.3} MiB of data...",
bytes_to_decompress as f64 / 1024f64 / 1024f64
);
// Collect required for run_in_parallel ExactSizeIterator argument.
let tasks = self
.frame_readers
.into_iter()
.zip(frame_to_entries.into_iter())
// Skip empty frames.
.filter(|(_, entries)| !entries.is_empty())
.collect::<Vec<_>>();
// Record start time
let start_time = std::time::Instant::now();
let results = run_in_parallel(
num_workers as usize,
tasks.into_iter(),
|(frame_reader, entries)| extract_files(frame_reader, &entries, &output_dir, verify),
);
// Collect stats
let stats = results
.into_iter()
.sum::<Result<ExtractStats, Error>>()?
// Convert the statistics into fractions, since summing the time per thread doesn't make much sense.
.fractions();
// Log statistics about the decompression performance
let real_time = std::time::Instant::now() - start_time;
info!(
"Decompression statistics ({:?})\n\
\tSeeking: {:.1}%\n\
\tObject decompression: {:.1}%\n\
\tVerification: {:.1}%\n\
\tWriting to disk: {:.1}%\n\
\tOther: {:.1}%",
real_time,
stats.seek_time * 100f64,
stats.object_time * 100f64,
stats.verify_time * 100f64,
stats.write_time * 100f64,
stats.other_time() * 100f64,
);
Ok(())
}
}
/// Verifies that the object has the expected checksum.
fn verify_object(buf: &[u8], exp_checksum: &ObjectChecksum) -> Result<(), Error> {
// Verify checksum
let mut checksum = [0u8; 20];
let mut hasher = Sha1::new();
hasher.input(buf);
hasher.result(&mut checksum);
if &checksum != exp_checksum |
Ok(())
}
/// Writes the object to the specified path, taking care
/// of adjusting file permissions.
fn write_object(buf: &[u8], path: &Path) -> Result<(), Error> {
fs::create_dir_all(path.parent().unwrap())?;
let mut f = File::create(path)?;
f.write_all(buf)?;
Ok(())
}
/// Returns a list of the frame offsets, computed
/// using the order and sizes of the given frames.
fn compute_frame_offsets(frames: &[PackFrame]) -> Vec<u64> {
let mut frame_offsets: Vec<_> = vec![0; frames.len()];
for i in 1..frame_offsets.len() {
frame_offsets[i] = frames[i - 1].frame_size + frame_offsets[i - 1];
}
frame_offsets
}
/// Returns a list of the data offsets, computed using the order and
/// decompressed sizes of the given frames.
fn compute_frame_decompressed_offset(frames: &[PackFrame]) -> Vec<u64> {
let mut frame_decompressed_offset: Vec<_> = vec![0; frames.len()];
for i in 1..frame_decompressed_offset.len() {
frame_decompressed_offset[i] =
frames[i - 1].decompressed_size + frame_decompressed_offset[i - 1];
}
frame_decompressed_offset
}
/// Groups and transforms the list of [`FileEntry`]-s taken from a pack index
/// (and with absolute offsets into the decompressed stream) into sets of
/// entries per frame, with adjusted (relative) offsets to that corresponding
/// Zstandard frame. Objects are assumed to not be split across two frames.
fn assign_to_frames(
frames: &[PackFrame],
entries: &[FileEntry],
) -> Result<Vec<Vec<FileEntry>>, Error> {
let frame_decompressed_offset: Vec<_> = compute_frame_decompressed_offset(frames);
// Figure out frame belonging of the objects,
// using the frame offset and the object offset.
let mut frames: Vec<Vec<FileEntry>> = (0..frames.len()).map(|_| Vec::new()).collect();
for entry in entries {
let frame_index = frame_decompressed_offset
.iter()
// Find the index of the frame containing the object (objects are
// assumed to not be split across two frames)
.rposition(|&x| x <= entry.metadata.offset)
.ok_or(Error::CorruptPack)?;
// Compute the offset relative to that frame.
let local_offset = entry.metadata.offset - frame_decompressed_offset[frame_index];
let local_entry = FileEntry::new(
entry.path.clone(),
entry.checksum,
ObjectMetadata {
offset: local_offset, // Replace global offset -> local offset
size: entry.metadata.size,
},
);
frames[frame_index].push(local_entry);
}
Ok(frames)
}
/// Used for timing the different parts of the extraction process.
#[derive(Default)]
struct ExtractStats {
total_time: f64,
seek_time: f64,
object_time: f64,
verify_time: f64,
write_time: f64,
}
impl ExtractStats {
fn other_time(&self) -> f64 {
self.total_time - (self.seek_time + self.object_time + self.verify_time + self.write_time)
}
/// Convert the statistics into fractions, taken relative to `self.total_time`.
fn fractions(&self) -> Self {
let norm_factor = 1f64 / self.total_time;
Self {
total_time: norm_factor * self.total_time,
seek_time: norm_factor * self.seek_time,
object_time: norm_factor * self.object_time,
verify_time: norm_factor * self.verify_time,
write_time: norm_factor * self.write_time,
}
}
}
impl std::ops::Add for ExtractStats {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
total_time: self.total_time + other.total_time,
seek_time: self.seek_time + other.seek_time,
object_time: self.object_time + other.object_time,
verify_time: self.verify_time + other.verify_time,
write_time: self.write_time + other.write_time,
}
}
}
impl std::iter::Sum<ExtractStats> for ExtractStats {
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = ExtractStats>,
{
let mut acc = ExtractStats::default();
for x in iter {
acc = acc + x;
}
acc
}
}
/// Extracts the given entries from the pack reader into the specified output directory.
/// Checksum verification can be toggled on/off.
fn extract_files(
mut reader: PackReader,
entries: &[FileEntry],
output_dir: impl AsRef<Path>,
verify: bool,
) -> Result<ExtractStats, Error> {
let mut entries: Vec<FileEntry> = entries.to_vec();
// Sort objects to allow for forward-only seeking
entries.sort_by(|x, y| {
let offset_x = x.metadata.offset;
let offset_y = y.metadata.offset;
offset_x.cmp(&offset_y)
});
// Used for timing
let mut stats = ExtractStats::default();
let total_time = measure_ok(|| -> Result<(), Error> {
// Decompression buffer
let mut buf = vec![];
let mut path_buf = PathBuf::new();
let mut pos = 0;
for entry in entries {
let metadata = &entry.metadata;
// Seek forward
let discard_bytes = metadata.offset - pos;
// Check if we need to read a new object.
// The current position in stream can be AFTER the object offset only
// if the previous and this object are the same. This is because the objects
// are sorted by offset, and the current position is set to the offset at the
// end of each object, after that object is consumed.
if pos <= metadata.offset {
stats.seek_time += measure_ok(|| reader.seek(discard_bytes))?.0.as_secs_f64();
// Resize buf
buf.resize(metadata.size as usize, 0);
// Read object
stats.object_time += measure_ok(|| reader.read_exact(&mut buf[..]))?
.0
.as_secs_f64();
pos = metadata.offset + metadata.size;
if verify {
stats.verify_time += measure_ok(|| verify_object(&buf[..], &entry.checksum))?
.0
.as_secs_f64();
}
}
// Output path
path_buf.clear();
path_buf.push(&output_dir);
path_buf.push(&entry.path);
stats.write_time += measure_ok(|| write_object(&buf[..], &path_buf))?
.0
.as_secs_f64();
}
Ok(())
})?
.0;
stats.total_time = total_time.as_secs_f64();
Ok(stats)
}
#[cfg(test)]
mod tests {
use super::*;
fn make_md(offset: u64, size: u64) -> ObjectMetadata {
ObjectMetadata { offset, size }
}
#[test]
fn pack_id_validation_works() {
// VALID
assert!(PackId::is_valid("ABCD"));
assert!(PackId::is_valid("abcd"));
assert!(PackId::is_valid("____"));
assert!(PackId::is_valid("----"));
assert!(PackId::is_valid("ABCD-132_TAG"));
// NOT VALID
// spaces
assert!(!PackId::is_valid("Some Text"));
// non-latin alphabets
assert!(!PackId::is_valid("това-е-тест"));
// non-letter symbols other than - and _
assert!(!PackId::is_valid("QWERTY-^!$^%^@!#"));
}
#[test]
fn snapshot_tag_validation_works() {
// VALID
assert!(SnapshotId::is_valid("ABCD"));
assert!(SnapshotId::is_valid("abcd"));
assert!(SnapshotId::is_valid("____"));
assert!(SnapshotId::is_valid("----"));
assert!(SnapshotId::is_valid("ABCD-132_TAG"));
// NOT VALID
// spaces
assert!(!SnapshotId::is_valid("Some Text"));
// non-latin alphabets
assert!(!SnapshotId::is_valid("това-е-тест"));
// non-letter symbols other than - and _
assert!(!SnapshotId::is_valid("QWERTY-^!$^%^@!#"));
}
#[test]
fn assign_to_frames_single_works() {
let frames = [PackFrame {
frame_size: 100,
decompressed_size: 1000,
}];
let entries = [
FileEntry::new("A".into(), [0; 20], make_md(50, 1)),
FileEntry::new("B".into(), [1; 20], make_md(50, 1)),
];
let result = assign_to_frames(&frames, &entries).unwrap();
assert_eq!(1, result.len());
assert_eq!(&entries, result[0].as_slice());
}
#[test]
fn assign_to_frames_multiple_works() {
let frames = [
PackFrame {
frame_size: 100,
decompressed_size: 1000,
},
PackFrame {
frame_size: 100,
decompressed_size: 1000,
},
];
let entries = [
FileEntry::new("A".into(), [0; 20], make_md(800, 200)),
FileEntry::new("B".into(), [1; 20], make_md(1200, 200)),
];
let frame_1_entries = [
// Offset is same
FileEntry::new("A".into(), [0; 20], make_md(800, 200)),
];
let frame_2_entries = [
// Offset 1200 -> 200
FileEntry::new("B".into(), [1; 20], make_md(200, 200)),
];
let result = assign_to_frames(&frames, &entries).unwrap();
assert_eq!(2, result.len());
assert_eq!(&frame_1_entries, result[0].as_slice());
assert_eq!(&frame_2_entries, result[1].as_slice());
}
}
| {
return Err(PackError::ChecksumMismatch(*exp_checksum, checksum).into());
} | conditional_block |
pack.rs | //! SPDX-License-Identifier: Apache-2.0
//! Copyright (C) 2021 Arm Limited or its affiliates and Contributors. All rights reserved.
use serde::{Deserialize, Serialize};
use std::{
fmt, fs,
fs::File,
io,
io::{BufReader, Read, Write},
path::{Path, PathBuf},
};
use std::{fmt::Display, str::FromStr};
use crypto::digest::Digest;
use crypto::sha1::Sha1;
use log::info;
use zstd::stream::raw::DParameter;
use zstd::Decoder;
use super::constants::{
DEFAULT_WINDOW_LOG_MAX, PACKS_DIR, PACK_EXTENSION, PACK_HEADER_MAGIC, PACK_INDEX_EXTENSION,
};
use super::error::Error;
use super::repository::Repository;
use super::{algo::run_in_parallel, constants::DOT_PACK_INDEX_EXTENSION};
use crate::packidx::{FileEntry, ObjectChecksum, PackError, PackIndex};
use crate::{log::measure_ok, packidx::ObjectMetadata};
/// Pack and snapshots IDs can contain latin letter, digits or the following characters.
const EXTRA_ID_CHARS: &[char] = &['-', '_', '/'];
#[derive(Debug)]
/// Error used when parsing a [`SnapshotId`] fails.
pub enum IdError {
BadFormat(String),
InvalidPack(String),
InvalidSnapshot(String),
}
impl Display for IdError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::BadFormat(s) => write!(f, "Unrecognized identifier format '{}'!", s),
Self::InvalidPack(s) => write!(
f,
"Invalid pack identifier '{}'! Latin letters, digits, -, _ and / are allowed!",
s
),
Self::InvalidSnapshot(s) => write!(
f,
"Invalid snapshot identifier '{}'! Latin letters, digits, - and _ are allowed!",
s
),
}
}
}
impl std::error::Error for IdError {}
/// Identifies a pack file.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum PackId {
Pack(String),
}
impl PackId {
/// from_index_path returns Some(PackId::Pack(index_path)) with the pack
/// index extension trimmed, if the input ends in the PACK_INDEX_EXTENSION.
pub fn from_index_path(index_path: String) -> Option<PackId> {
index_path
.strip_suffix(DOT_PACK_INDEX_EXTENSION)
.map(|s| PackId::Pack(s.to_owned()))
}
fn is_valid(s: &str) -> bool {
s.chars()
.all(|c| c.is_ascii_alphanumeric() || EXTRA_ID_CHARS.contains(&c))
}
}
impl FromStr for PackId {
type Err = IdError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if !PackId::is_valid(s) {
return Err(IdError::InvalidPack(s.to_owned()));
}
Ok(PackId::Pack(s.to_owned()))
}
}
impl Display for PackId {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
PackId::Pack(s) => write!(f, "{}", s),
}
}
}
/// Identifies a snapshot using a pack base filename [`Path::file_stem`] and a snapshot tag.
#[derive(PartialEq, Clone, Debug)]
pub struct SnapshotId {
pack: PackId,
tag: String,
}
impl SnapshotId {
/// Creates a [`SnapshotId`] from a pack and a snapshot tag
pub fn new(pack: PackId, tag: &str) -> Result<Self, IdError> {
if !Self::is_valid(tag) {
return Err(IdError::InvalidSnapshot(tag.to_owned()));
}
Ok(Self {
pack,
tag: tag.to_owned(),
})
}
pub fn pack(&self) -> &PackId {
&self.pack
}
pub fn tag(&self) -> &str {
&self.tag
}
fn is_valid(tag: &str) -> bool {
tag.chars()
.all(|c| c.is_ascii_alphanumeric() || EXTRA_ID_CHARS.contains(&c))
}
}
/// [`SnapshotId`] equality is a full equivalence relation.
impl Eq for SnapshotId {}
/// Prints the canonical form for [`SnapshotId`].
impl Display for SnapshotId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.pack, self.tag)
}
}
/// Parses a [`SnapshotId`] from a canonical form string 'pack_name:snapshot_name'.
impl FromStr for SnapshotId {
type Err = IdError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some((pack, snapshot)) = s.trim_end().rsplit_once(':') {
if pack.is_empty() {
return Err(IdError::InvalidPack(pack.to_owned()));
}
if snapshot.is_empty() {
return Err(IdError::InvalidSnapshot(snapshot.to_owned()));
}
Ok(SnapshotId {
pack: PackId::from_str(pack)?,
tag: snapshot.to_owned(),
})
} else {
Err(IdError::BadFormat(s.to_owned()))
}
}
}
/// A magic constant that has no use other than to indicate a custom .pack header
/// stored in a Zstandard skippable frame.
const SKIPPABLE_MAGIC_MASK: u32 = 0x184D2A50;
/// Reads a Zstandard skippable frame from reader and writes the result to `buf`.
/// Returns the size of the frame, *not* the number of bytes read.
pub fn read_skippable_frame(mut reader: impl Read, buf: &mut Vec<u8>) -> io::Result<u64> {
fn read_u32_le(mut reader: impl Read) -> io::Result<u32> {
let mut bytes = [0u8; 4];
reader.read_exact(&mut bytes)?;
Ok(u32::from_le_bytes(bytes))
}
// Ensure this is a skippable frame.
let magic = read_u32_le(&mut reader)?;
if magic & SKIPPABLE_MAGIC_MASK != SKIPPABLE_MAGIC_MASK {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Not a Zstandard skippable frame!",
));
}
let frame_size = read_u32_le(&mut reader)?;
buf.resize(frame_size as usize, 0);
reader.read_exact(buf)?;
// Compute overall frame size.
Ok((std::mem::size_of::<u32>() * 2 + buf.len()) as u64)
}
/// Writes a Zstandard skippable frame with the given user data.
/// Returns the number of bytes written to writer (the size of the frame, including the magic number and header).
pub fn write_skippable_frame(mut writer: impl Write, buf: &[u8]) -> io::Result<u64> {
fn write_u32_le(mut writer: impl Write, value: u32) -> io::Result<()> {
writer.write_all(&value.to_le_bytes())
}
// Ensure this is a skippable frame.
write_u32_le(&mut writer, SKIPPABLE_MAGIC_MASK)?;
write_u32_le(&mut writer, buf.len() as u32)?;
writer.write_all(buf)?;
// Compute overall frame size.
Ok((std::mem::size_of::<u32>() * 2 + buf.len()) as u64)
}
/// The unidirectional stream of data stored in the pack.
enum PackReader {
Compressed(Decoder<'static, BufReader<File>>),
}
impl PackReader {
/// Consumes the specified number of bytes from the reader.
fn seek(&mut self, bytes: u64) -> io::Result<()> {
match self {
Self::Compressed(decoder) => {
io::copy(&mut decoder.by_ref().take(bytes), &mut io::sink()).map(|_| {})
}
}
}
// Reads the exact number of bytes into `buf`.
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
match self {
Self::Compressed(decoder) => decoder.read_exact(buf),
}
}
}
/// Represents a compressed Zstandard frame in the .pack file.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PackFrame {
/// The size of the frame, in bytes.
pub frame_size: u64,
/// The size of the data stream in the frame, once decompressed, in bytes.
pub decompressed_size: u64,
}
/// Represents the custom header in the beginning of a .pack file.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PackHeader {
/// Valid pack headers have this value set to [`PACK_HEADER_MAGIC`].
magic: u64,
/// The list of frames in the pack file, sorted by their byte offsets.
frames: Vec<PackFrame>,
}
impl PackHeader {
/// Create a new pack header.
pub fn new(frames: Vec<PackFrame>) -> Self {
Self {
magic: PACK_HEADER_MAGIC,
frames,
}
}
/// Verifies the header magic.
pub fn is_valid(&self) -> bool {
self.magic == PACK_HEADER_MAGIC
}
}
impl Default for PackHeader {
fn default() -> Self {
Self {
magic: PACK_HEADER_MAGIC,
frames: vec![],
}
}
}
/// Represents an pack file.
pub struct Pack {
/// The base filename ([`Path::file_stem`]) of the pack.
name: String,
/// The index for the pack.
index: PackIndex,
/// The header of the pack.
header: PackHeader,
/// PackReader instances for each frame in the .pack file.
frame_readers: Vec<PackReader>,
/// The file size of the pack (in bytes).
file_size: u64,
}
impl Pack {
/// Opens a pack file and its corresponding index.
///
/// # Arguments
///
/// * `pack_name` - The base filename ([`Path::file_stem`]) of the pack.
pub fn open<P>(repo: P, pack_name: &PackId) -> Result<Self, Error>
where
P: AsRef<Path>,
{
let PackId::Pack(pack_name) = pack_name;
let mut packs_data = repo.as_ref().join(&*Repository::data_dir());
packs_data.push(PACKS_DIR);
let mut pack_index_path = packs_data.join(pack_name);
pack_index_path.set_extension(PACK_INDEX_EXTENSION);
let mut pack_path = packs_data.join(pack_name);
pack_path.set_extension(PACK_EXTENSION);
info!("Reading pack index {:?}...", pack_index_path);
let pack_index = PackIndex::load(&pack_index_path)?;
info!("Opening pack file {:?}...", pack_path);
let (file_size, header, frame_readers) =
Self::open_pack(&pack_path).or_else(|_| Self::open_pack_legacy(&pack_path))?;
Ok(Pack {
name: pack_name.to_owned(),
index: pack_index,
header,
frame_readers,
file_size,
})
}
/// Opens the pack file for reading.
fn open_pack(pack_path: &Path) -> Result<(u64, PackHeader, Vec<PackReader>), Error> {
let file = File::open(&pack_path)?;
let file_size = file.metadata()?.len();
let mut reader = io::BufReader::new(file);
let mut header = vec![];
let header_size = read_skippable_frame(&mut reader, &mut header)?;
let header: PackHeader =
rmp_serde::decode::from_read(&header[..]).map_err(|_| Error::CorruptPack)?;
drop(reader);
if !header.is_valid() {
return Err(Error::CorruptPack);
}
let frame_offsets = compute_frame_offsets(&header.frames);
let frame_readers = frame_offsets
.iter()
.map(|offset| -> Result<_, Error> {
let mut reader = File::open(&pack_path)?;
io::Seek::seek(&mut reader, io::SeekFrom::Start(header_size + offset))?;
let mut reader = Decoder::new(reader)?;
reader.set_parameter(DParameter::WindowLogMax(DEFAULT_WINDOW_LOG_MAX))?;
// Wrap in a `PackReader`.
let reader = PackReader::Compressed(reader);
Ok(reader)
})
.collect::<Result<Vec<_>, _>>()?;
Ok((file_size, header, frame_readers))
}
/// Backwards-compatible open_pack for the legacy pack format (no skippable frame/no header).
fn open_pack_legacy(pack_path: &Path) -> Result<(u64, PackHeader, Vec<PackReader>), Error> {
let file = File::open(&pack_path)?;
let file_size = file.metadata()?.len();
// This manufactured pack header works for the current implementation. We might
// change how/whether we support the legacy pack format in the future...
let header = PackHeader::new(vec![PackFrame {
frame_size: file_size,
decompressed_size: u64::MAX,
}]);
let mut reader = Decoder::new(file)?;
reader.set_parameter(DParameter::WindowLogMax(DEFAULT_WINDOW_LOG_MAX))?;
let frame_reader = PackReader::Compressed(reader);
Ok((file_size, header, vec![frame_reader]))
}
/// The base filename ([`Path::file_stem`]) of the pack.
pub fn name(&self) -> &str {
&self.name
}
/// A reference to the pack index.
pub fn index(&self) -> &PackIndex {
&self.index
}
/// The size of the pack in bytes.
pub fn file_size(&self) -> u64 {
self.file_size
}
/// Extracts the specified entries from the pack into the specified directory.
/// This operation consumes the pack, since [`Pack`] objects contain a unidirectional
/// data stream that becomes unusable after it is read.
///
/// # Arguments
///
/// * `entries` - The list of entries to extract. These *must* be entries contained in the pack index.
/// * `output_dir` - The directory relative to which the files will be extracted.
/// * `verify` - Enable/disable checksum verification.
#[allow(unused_mut)]
#[allow(clippy::needless_collect)]
pub(crate) fn extract_entries<P>(
mut self,
entries: &[FileEntry],
output_dir: P,
verify: bool,
num_workers: u32,
) -> Result<(), Error>
where
P: AsRef<Path> + Sync,
{
if entries.is_empty() {
return Ok(());
}
let num_frames = self.header.frames.len();
assert_ne!(0, num_workers);
assert_ne!(0, num_frames);
if num_frames < num_workers as usize {
info!(
"Requested {} workers, but there are only {} frames!",
num_workers, num_frames
);
}
let num_workers = std::cmp::min(num_workers, num_frames as u32);
// Assign entries to the frames they reside in.
// The resulting entries will have offsets relative to their containing frame.
let frame_to_entries = assign_to_frames(&self.header.frames, entries)?;
// Compute and log total amount of seeking and decompression needed.
let bytes_to_decompress = frame_to_entries
.iter()
.flat_map(|entries| {
entries
.iter()
.map(|e| e.metadata.offset + e.metadata.offset)
.max()
})
.sum::<u64>();
info!(
"Decompressing {:.3} MiB of data...",
bytes_to_decompress as f64 / 1024f64 / 1024f64
);
// Collect required for run_in_parallel ExactSizeIterator argument.
let tasks = self
.frame_readers
.into_iter()
.zip(frame_to_entries.into_iter())
// Skip empty frames.
.filter(|(_, entries)| !entries.is_empty())
.collect::<Vec<_>>();
// Record start time
let start_time = std::time::Instant::now();
let results = run_in_parallel(
num_workers as usize,
tasks.into_iter(),
|(frame_reader, entries)| extract_files(frame_reader, &entries, &output_dir, verify),
);
// Collect stats
let stats = results
.into_iter()
.sum::<Result<ExtractStats, Error>>()?
// Convert the statistics into fractions, since summing the time per thread doesn't make much sense.
.fractions();
// Log statistics about the decompression performance
let real_time = std::time::Instant::now() - start_time;
info!(
"Decompression statistics ({:?})\n\
\tSeeking: {:.1}%\n\
\tObject decompression: {:.1}%\n\
\tVerification: {:.1}%\n\
\tWriting to disk: {:.1}%\n\
\tOther: {:.1}%",
real_time,
stats.seek_time * 100f64,
stats.object_time * 100f64,
stats.verify_time * 100f64,
stats.write_time * 100f64,
stats.other_time() * 100f64,
);
Ok(())
}
}
/// Verifies that the object has the expected checksum.
fn verify_object(buf: &[u8], exp_checksum: &ObjectChecksum) -> Result<(), Error> {
// Verify checksum
let mut checksum = [0u8; 20];
let mut hasher = Sha1::new();
hasher.input(buf);
hasher.result(&mut checksum);
if &checksum != exp_checksum {
return Err(PackError::ChecksumMismatch(*exp_checksum, checksum).into());
}
Ok(())
}
/// Writes the object to the specified path, taking care
/// of adjusting file permissions.
fn write_object(buf: &[u8], path: &Path) -> Result<(), Error> {
fs::create_dir_all(path.parent().unwrap())?;
let mut f = File::create(path)?;
f.write_all(buf)?;
Ok(())
}
/// Returns a list of the frame offsets, computed
/// using the order and sizes of the given frames.
fn | (frames: &[PackFrame]) -> Vec<u64> {
let mut frame_offsets: Vec<_> = vec![0; frames.len()];
for i in 1..frame_offsets.len() {
frame_offsets[i] = frames[i - 1].frame_size + frame_offsets[i - 1];
}
frame_offsets
}
/// Returns a list of the data offsets, computed using the order and
/// decompressed sizes of the given frames.
fn compute_frame_decompressed_offset(frames: &[PackFrame]) -> Vec<u64> {
let mut frame_decompressed_offset: Vec<_> = vec![0; frames.len()];
for i in 1..frame_decompressed_offset.len() {
frame_decompressed_offset[i] =
frames[i - 1].decompressed_size + frame_decompressed_offset[i - 1];
}
frame_decompressed_offset
}
/// Groups and transforms the list of [`FileEntry`]-s taken from a pack index
/// (and with absolute offsets into the decompressed stream) into sets of
/// entries per frame, with adjusted (relative) offsets to that corresponding
/// Zstandard frame. Objects are assumed to not be split across two frames.
fn assign_to_frames(
frames: &[PackFrame],
entries: &[FileEntry],
) -> Result<Vec<Vec<FileEntry>>, Error> {
let frame_decompressed_offset: Vec<_> = compute_frame_decompressed_offset(frames);
// Figure out frame belonging of the objects,
// using the frame offset and the object offset.
let mut frames: Vec<Vec<FileEntry>> = (0..frames.len()).map(|_| Vec::new()).collect();
for entry in entries {
let frame_index = frame_decompressed_offset
.iter()
// Find the index of the frame containing the object (objects are
// assumed to not be split across two frames)
.rposition(|&x| x <= entry.metadata.offset)
.ok_or(Error::CorruptPack)?;
// Compute the offset relative to that frame.
let local_offset = entry.metadata.offset - frame_decompressed_offset[frame_index];
let local_entry = FileEntry::new(
entry.path.clone(),
entry.checksum,
ObjectMetadata {
offset: local_offset, // Replace global offset -> local offset
size: entry.metadata.size,
},
);
frames[frame_index].push(local_entry);
}
Ok(frames)
}
/// Used for timing the different parts of the extraction process.
#[derive(Default)]
struct ExtractStats {
total_time: f64,
seek_time: f64,
object_time: f64,
verify_time: f64,
write_time: f64,
}
impl ExtractStats {
fn other_time(&self) -> f64 {
self.total_time - (self.seek_time + self.object_time + self.verify_time + self.write_time)
}
/// Convert the statistics into fractions, taken relative to `self.total_time`.
fn fractions(&self) -> Self {
let norm_factor = 1f64 / self.total_time;
Self {
total_time: norm_factor * self.total_time,
seek_time: norm_factor * self.seek_time,
object_time: norm_factor * self.object_time,
verify_time: norm_factor * self.verify_time,
write_time: norm_factor * self.write_time,
}
}
}
impl std::ops::Add for ExtractStats {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
total_time: self.total_time + other.total_time,
seek_time: self.seek_time + other.seek_time,
object_time: self.object_time + other.object_time,
verify_time: self.verify_time + other.verify_time,
write_time: self.write_time + other.write_time,
}
}
}
impl std::iter::Sum<ExtractStats> for ExtractStats {
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = ExtractStats>,
{
let mut acc = ExtractStats::default();
for x in iter {
acc = acc + x;
}
acc
}
}
/// Extracts the given entries from the pack reader into the specified output directory.
/// Checksum verification can be toggled on/off.
fn extract_files(
mut reader: PackReader,
entries: &[FileEntry],
output_dir: impl AsRef<Path>,
verify: bool,
) -> Result<ExtractStats, Error> {
let mut entries: Vec<FileEntry> = entries.to_vec();
// Sort objects to allow for forward-only seeking
entries.sort_by(|x, y| {
let offset_x = x.metadata.offset;
let offset_y = y.metadata.offset;
offset_x.cmp(&offset_y)
});
// Used for timing
let mut stats = ExtractStats::default();
let total_time = measure_ok(|| -> Result<(), Error> {
// Decompression buffer
let mut buf = vec![];
let mut path_buf = PathBuf::new();
let mut pos = 0;
for entry in entries {
let metadata = &entry.metadata;
// Seek forward
let discard_bytes = metadata.offset - pos;
// Check if we need to read a new object.
// The current position in stream can be AFTER the object offset only
// if the previous and this object are the same. This is because the objects
// are sorted by offset, and the current position is set to the offset at the
// end of each object, after that object is consumed.
if pos <= metadata.offset {
stats.seek_time += measure_ok(|| reader.seek(discard_bytes))?.0.as_secs_f64();
// Resize buf
buf.resize(metadata.size as usize, 0);
// Read object
stats.object_time += measure_ok(|| reader.read_exact(&mut buf[..]))?
.0
.as_secs_f64();
pos = metadata.offset + metadata.size;
if verify {
stats.verify_time += measure_ok(|| verify_object(&buf[..], &entry.checksum))?
.0
.as_secs_f64();
}
}
// Output path
path_buf.clear();
path_buf.push(&output_dir);
path_buf.push(&entry.path);
stats.write_time += measure_ok(|| write_object(&buf[..], &path_buf))?
.0
.as_secs_f64();
}
Ok(())
})?
.0;
stats.total_time = total_time.as_secs_f64();
Ok(stats)
}
#[cfg(test)]
mod tests {
use super::*;
fn make_md(offset: u64, size: u64) -> ObjectMetadata {
ObjectMetadata { offset, size }
}
#[test]
fn pack_id_validation_works() {
// VALID
assert!(PackId::is_valid("ABCD"));
assert!(PackId::is_valid("abcd"));
assert!(PackId::is_valid("____"));
assert!(PackId::is_valid("----"));
assert!(PackId::is_valid("ABCD-132_TAG"));
// NOT VALID
// spaces
assert!(!PackId::is_valid("Some Text"));
// non-latin alphabets
assert!(!PackId::is_valid("това-е-тест"));
// non-letter symbols other than - and _
assert!(!PackId::is_valid("QWERTY-^!$^%^@!#"));
}
#[test]
fn snapshot_tag_validation_works() {
// VALID
assert!(SnapshotId::is_valid("ABCD"));
assert!(SnapshotId::is_valid("abcd"));
assert!(SnapshotId::is_valid("____"));
assert!(SnapshotId::is_valid("----"));
assert!(SnapshotId::is_valid("ABCD-132_TAG"));
// NOT VALID
// spaces
assert!(!SnapshotId::is_valid("Some Text"));
// non-latin alphabets
assert!(!SnapshotId::is_valid("това-е-тест"));
// non-letter symbols other than - and _
assert!(!SnapshotId::is_valid("QWERTY-^!$^%^@!#"));
}
#[test]
fn assign_to_frames_single_works() {
let frames = [PackFrame {
frame_size: 100,
decompressed_size: 1000,
}];
let entries = [
FileEntry::new("A".into(), [0; 20], make_md(50, 1)),
FileEntry::new("B".into(), [1; 20], make_md(50, 1)),
];
let result = assign_to_frames(&frames, &entries).unwrap();
assert_eq!(1, result.len());
assert_eq!(&entries, result[0].as_slice());
}
#[test]
fn assign_to_frames_multiple_works() {
let frames = [
PackFrame {
frame_size: 100,
decompressed_size: 1000,
},
PackFrame {
frame_size: 100,
decompressed_size: 1000,
},
];
let entries = [
FileEntry::new("A".into(), [0; 20], make_md(800, 200)),
FileEntry::new("B".into(), [1; 20], make_md(1200, 200)),
];
let frame_1_entries = [
// Offset is same
FileEntry::new("A".into(), [0; 20], make_md(800, 200)),
];
let frame_2_entries = [
// Offset 1200 -> 200
FileEntry::new("B".into(), [1; 20], make_md(200, 200)),
];
let result = assign_to_frames(&frames, &entries).unwrap();
assert_eq!(2, result.len());
assert_eq!(&frame_1_entries, result[0].as_slice());
assert_eq!(&frame_2_entries, result[1].as_slice());
}
}
| compute_frame_offsets | identifier_name |
pack.rs | //! SPDX-License-Identifier: Apache-2.0
//! Copyright (C) 2021 Arm Limited or its affiliates and Contributors. All rights reserved.
use serde::{Deserialize, Serialize};
use std::{
fmt, fs,
fs::File,
io,
io::{BufReader, Read, Write},
path::{Path, PathBuf},
};
use std::{fmt::Display, str::FromStr};
use crypto::digest::Digest;
use crypto::sha1::Sha1;
use log::info;
use zstd::stream::raw::DParameter;
use zstd::Decoder;
use super::constants::{
DEFAULT_WINDOW_LOG_MAX, PACKS_DIR, PACK_EXTENSION, PACK_HEADER_MAGIC, PACK_INDEX_EXTENSION,
};
use super::error::Error;
use super::repository::Repository;
use super::{algo::run_in_parallel, constants::DOT_PACK_INDEX_EXTENSION};
use crate::packidx::{FileEntry, ObjectChecksum, PackError, PackIndex};
use crate::{log::measure_ok, packidx::ObjectMetadata};
/// Pack and snapshots IDs can contain latin letter, digits or the following characters.
const EXTRA_ID_CHARS: &[char] = &['-', '_', '/'];
#[derive(Debug)]
/// Error used when parsing a [`SnapshotId`] fails.
pub enum IdError {
BadFormat(String),
InvalidPack(String),
InvalidSnapshot(String),
}
impl Display for IdError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::BadFormat(s) => write!(f, "Unrecognized identifier format '{}'!", s),
Self::InvalidPack(s) => write!(
f,
"Invalid pack identifier '{}'! Latin letters, digits, -, _ and / are allowed!",
s
),
Self::InvalidSnapshot(s) => write!(
f,
"Invalid snapshot identifier '{}'! Latin letters, digits, - and _ are allowed!",
s
),
}
}
}
impl std::error::Error for IdError {}
/// Identifies a pack file.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum PackId {
Pack(String),
}
impl PackId {
/// from_index_path returns Some(PackId::Pack(index_path)) with the pack
/// index extension trimmed, if the input ends in the PACK_INDEX_EXTENSION.
pub fn from_index_path(index_path: String) -> Option<PackId> {
index_path
.strip_suffix(DOT_PACK_INDEX_EXTENSION)
.map(|s| PackId::Pack(s.to_owned()))
}
fn is_valid(s: &str) -> bool {
s.chars()
.all(|c| c.is_ascii_alphanumeric() || EXTRA_ID_CHARS.contains(&c))
}
}
impl FromStr for PackId {
type Err = IdError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if !PackId::is_valid(s) {
return Err(IdError::InvalidPack(s.to_owned()));
}
Ok(PackId::Pack(s.to_owned()))
}
}
impl Display for PackId {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
PackId::Pack(s) => write!(f, "{}", s),
}
}
}
/// Identifies a snapshot using a pack base filename [`Path::file_stem`] and a snapshot tag.
#[derive(PartialEq, Clone, Debug)]
pub struct SnapshotId {
pack: PackId,
tag: String,
}
impl SnapshotId {
/// Creates a [`SnapshotId`] from a pack and a snapshot tag
pub fn new(pack: PackId, tag: &str) -> Result<Self, IdError> {
if !Self::is_valid(tag) {
return Err(IdError::InvalidSnapshot(tag.to_owned()));
}
Ok(Self {
pack,
tag: tag.to_owned(),
})
}
pub fn pack(&self) -> &PackId {
&self.pack
}
pub fn tag(&self) -> &str {
&self.tag
}
fn is_valid(tag: &str) -> bool {
tag.chars()
.all(|c| c.is_ascii_alphanumeric() || EXTRA_ID_CHARS.contains(&c))
}
}
/// [`SnapshotId`] equality is a full equivalence relation.
impl Eq for SnapshotId {}
/// Prints the canonical form for [`SnapshotId`].
impl Display for SnapshotId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.pack, self.tag)
}
}
/// Parses a [`SnapshotId`] from a canonical form string 'pack_name:snapshot_name'.
impl FromStr for SnapshotId {
type Err = IdError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some((pack, snapshot)) = s.trim_end().rsplit_once(':') {
if pack.is_empty() {
return Err(IdError::InvalidPack(pack.to_owned()));
}
if snapshot.is_empty() {
return Err(IdError::InvalidSnapshot(snapshot.to_owned()));
}
Ok(SnapshotId {
pack: PackId::from_str(pack)?,
tag: snapshot.to_owned(),
})
} else {
Err(IdError::BadFormat(s.to_owned()))
}
}
}
/// A magic constant that has no use other than to indicate a custom .pack header
/// stored in a Zstandard skippable frame.
const SKIPPABLE_MAGIC_MASK: u32 = 0x184D2A50;
/// Reads a Zstandard skippable frame from reader and writes the result to `buf`.
/// Returns the size of the frame, *not* the number of bytes read.
pub fn read_skippable_frame(mut reader: impl Read, buf: &mut Vec<u8>) -> io::Result<u64> {
fn read_u32_le(mut reader: impl Read) -> io::Result<u32> {
let mut bytes = [0u8; 4];
reader.read_exact(&mut bytes)?;
Ok(u32::from_le_bytes(bytes))
}
// Ensure this is a skippable frame.
let magic = read_u32_le(&mut reader)?;
if magic & SKIPPABLE_MAGIC_MASK != SKIPPABLE_MAGIC_MASK {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Not a Zstandard skippable frame!",
));
}
let frame_size = read_u32_le(&mut reader)?;
buf.resize(frame_size as usize, 0);
reader.read_exact(buf)?;
// Compute overall frame size.
Ok((std::mem::size_of::<u32>() * 2 + buf.len()) as u64)
}
/// Writes a Zstandard skippable frame with the given user data.
/// Returns the number of bytes written to writer (the size of the frame, including the magic number and header).
pub fn write_skippable_frame(mut writer: impl Write, buf: &[u8]) -> io::Result<u64> {
fn write_u32_le(mut writer: impl Write, value: u32) -> io::Result<()> {
writer.write_all(&value.to_le_bytes())
}
// Ensure this is a skippable frame.
write_u32_le(&mut writer, SKIPPABLE_MAGIC_MASK)?;
write_u32_le(&mut writer, buf.len() as u32)?;
writer.write_all(buf)?;
// Compute overall frame size.
Ok((std::mem::size_of::<u32>() * 2 + buf.len()) as u64)
}
/// The unidirectional stream of data stored in the pack.
enum PackReader {
Compressed(Decoder<'static, BufReader<File>>),
}
impl PackReader {
/// Consumes the specified number of bytes from the reader.
fn seek(&mut self, bytes: u64) -> io::Result<()> {
match self {
Self::Compressed(decoder) => {
io::copy(&mut decoder.by_ref().take(bytes), &mut io::sink()).map(|_| {})
}
}
}
// Reads the exact number of bytes into `buf`.
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
match self {
Self::Compressed(decoder) => decoder.read_exact(buf),
}
}
}
/// Represents a compressed Zstandard frame in the .pack file.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PackFrame {
/// The size of the frame, in bytes.
pub frame_size: u64,
/// The size of the data stream in the frame, once decompressed, in bytes.
pub decompressed_size: u64,
}
/// Represents the custom header in the beginning of a .pack file.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PackHeader {
/// Valid pack headers have this value set to [`PACK_HEADER_MAGIC`].
magic: u64,
/// The list of frames in the pack file, sorted by their byte offsets.
frames: Vec<PackFrame>,
}
impl PackHeader {
/// Create a new pack header.
pub fn new(frames: Vec<PackFrame>) -> Self {
Self {
magic: PACK_HEADER_MAGIC,
frames,
}
}
/// Verifies the header magic.
pub fn is_valid(&self) -> bool {
self.magic == PACK_HEADER_MAGIC
}
}
impl Default for PackHeader {
fn default() -> Self {
Self {
magic: PACK_HEADER_MAGIC,
frames: vec![],
}
}
}
/// Represents an pack file.
pub struct Pack {
/// The base filename ([`Path::file_stem`]) of the pack.
name: String,
/// The index for the pack.
index: PackIndex,
/// The header of the pack.
header: PackHeader,
/// PackReader instances for each frame in the .pack file.
frame_readers: Vec<PackReader>,
/// The file size of the pack (in bytes).
file_size: u64,
}
impl Pack {
/// Opens a pack file and its corresponding index.
///
/// # Arguments
///
/// * `pack_name` - The base filename ([`Path::file_stem`]) of the pack.
pub fn open<P>(repo: P, pack_name: &PackId) -> Result<Self, Error>
where
P: AsRef<Path>,
{
let PackId::Pack(pack_name) = pack_name;
let mut packs_data = repo.as_ref().join(&*Repository::data_dir());
packs_data.push(PACKS_DIR);
let mut pack_index_path = packs_data.join(pack_name);
pack_index_path.set_extension(PACK_INDEX_EXTENSION);
let mut pack_path = packs_data.join(pack_name);
pack_path.set_extension(PACK_EXTENSION);
info!("Reading pack index {:?}...", pack_index_path);
let pack_index = PackIndex::load(&pack_index_path)?;
info!("Opening pack file {:?}...", pack_path);
let (file_size, header, frame_readers) =
Self::open_pack(&pack_path).or_else(|_| Self::open_pack_legacy(&pack_path))?;
Ok(Pack {
name: pack_name.to_owned(),
index: pack_index,
header,
frame_readers,
file_size,
})
}
/// Opens the pack file for reading.
fn open_pack(pack_path: &Path) -> Result<(u64, PackHeader, Vec<PackReader>), Error> {
let file = File::open(&pack_path)?;
let file_size = file.metadata()?.len();
let mut reader = io::BufReader::new(file);
let mut header = vec![];
let header_size = read_skippable_frame(&mut reader, &mut header)?;
let header: PackHeader =
rmp_serde::decode::from_read(&header[..]).map_err(|_| Error::CorruptPack)?;
drop(reader);
if !header.is_valid() {
return Err(Error::CorruptPack);
}
let frame_offsets = compute_frame_offsets(&header.frames);
let frame_readers = frame_offsets
.iter()
.map(|offset| -> Result<_, Error> {
let mut reader = File::open(&pack_path)?;
io::Seek::seek(&mut reader, io::SeekFrom::Start(header_size + offset))?;
let mut reader = Decoder::new(reader)?;
reader.set_parameter(DParameter::WindowLogMax(DEFAULT_WINDOW_LOG_MAX))?;
// Wrap in a `PackReader`.
let reader = PackReader::Compressed(reader);
Ok(reader)
})
.collect::<Result<Vec<_>, _>>()?;
Ok((file_size, header, frame_readers))
}
/// Backwards-compatible open_pack for the legacy pack format (no skippable frame/no header).
fn open_pack_legacy(pack_path: &Path) -> Result<(u64, PackHeader, Vec<PackReader>), Error> {
let file = File::open(&pack_path)?;
let file_size = file.metadata()?.len();
// This manufactured pack header works for the current implementation. We might
// change how/whether we support the legacy pack format in the future...
let header = PackHeader::new(vec![PackFrame {
frame_size: file_size,
decompressed_size: u64::MAX,
}]);
let mut reader = Decoder::new(file)?;
reader.set_parameter(DParameter::WindowLogMax(DEFAULT_WINDOW_LOG_MAX))?;
let frame_reader = PackReader::Compressed(reader);
Ok((file_size, header, vec![frame_reader]))
}
/// The base filename ([`Path::file_stem`]) of the pack.
pub fn name(&self) -> &str {
&self.name
}
/// A reference to the pack index.
pub fn index(&self) -> &PackIndex {
&self.index
}
/// The size of the pack in bytes.
pub fn file_size(&self) -> u64 {
self.file_size
}
/// Extracts the specified entries from the pack into the specified directory.
/// This operation consumes the pack, since [`Pack`] objects contain a unidirectional
/// data stream that becomes unusable after it is read.
///
/// # Arguments
///
/// * `entries` - The list of entries to extract. These *must* be entries contained in the pack index.
/// * `output_dir` - The directory relative to which the files will be extracted.
/// * `verify` - Enable/disable checksum verification.
#[allow(unused_mut)]
#[allow(clippy::needless_collect)]
pub(crate) fn extract_entries<P>(
mut self,
entries: &[FileEntry],
output_dir: P,
verify: bool,
num_workers: u32,
) -> Result<(), Error>
where
P: AsRef<Path> + Sync,
{
if entries.is_empty() {
return Ok(());
}
let num_frames = self.header.frames.len();
assert_ne!(0, num_workers);
assert_ne!(0, num_frames);
if num_frames < num_workers as usize {
info!(
"Requested {} workers, but there are only {} frames!",
num_workers, num_frames
);
}
let num_workers = std::cmp::min(num_workers, num_frames as u32);
// Assign entries to the frames they reside in.
// The resulting entries will have offsets relative to their containing frame.
let frame_to_entries = assign_to_frames(&self.header.frames, entries)?;
// Compute and log total amount of seeking and decompression needed.
let bytes_to_decompress = frame_to_entries
.iter()
.flat_map(|entries| {
entries
.iter()
.map(|e| e.metadata.offset + e.metadata.offset)
.max()
})
.sum::<u64>();
info!(
"Decompressing {:.3} MiB of data...",
bytes_to_decompress as f64 / 1024f64 / 1024f64
);
// Collect required for run_in_parallel ExactSizeIterator argument.
let tasks = self
.frame_readers
.into_iter()
.zip(frame_to_entries.into_iter())
// Skip empty frames.
.filter(|(_, entries)| !entries.is_empty())
.collect::<Vec<_>>();
// Record start time
let start_time = std::time::Instant::now();
let results = run_in_parallel(
num_workers as usize,
tasks.into_iter(),
|(frame_reader, entries)| extract_files(frame_reader, &entries, &output_dir, verify),
);
// Collect stats
let stats = results
.into_iter()
.sum::<Result<ExtractStats, Error>>()?
// Convert the statistics into fractions, since summing the time per thread doesn't make much sense.
.fractions();
// Log statistics about the decompression performance
let real_time = std::time::Instant::now() - start_time;
info!(
"Decompression statistics ({:?})\n\
\tSeeking: {:.1}%\n\
\tObject decompression: {:.1}%\n\
\tVerification: {:.1}%\n\
\tWriting to disk: {:.1}%\n\
\tOther: {:.1}%",
real_time,
stats.seek_time * 100f64,
stats.object_time * 100f64,
stats.verify_time * 100f64,
stats.write_time * 100f64,
stats.other_time() * 100f64,
);
Ok(())
}
}
/// Verifies that the object has the expected checksum.
fn verify_object(buf: &[u8], exp_checksum: &ObjectChecksum) -> Result<(), Error> {
// Verify checksum
let mut checksum = [0u8; 20];
let mut hasher = Sha1::new();
hasher.input(buf);
hasher.result(&mut checksum);
if &checksum != exp_checksum {
return Err(PackError::ChecksumMismatch(*exp_checksum, checksum).into());
}
Ok(())
}
/// Writes the object to the specified path, taking care
/// of adjusting file permissions.
fn write_object(buf: &[u8], path: &Path) -> Result<(), Error> {
fs::create_dir_all(path.parent().unwrap())?;
let mut f = File::create(path)?;
f.write_all(buf)?;
Ok(())
}
/// Returns a list of the frame offsets, computed
/// using the order and sizes of the given frames.
fn compute_frame_offsets(frames: &[PackFrame]) -> Vec<u64> {
let mut frame_offsets: Vec<_> = vec![0; frames.len()];
for i in 1..frame_offsets.len() {
frame_offsets[i] = frames[i - 1].frame_size + frame_offsets[i - 1];
}
frame_offsets
}
/// Returns a list of the data offsets, computed using the order and
/// decompressed sizes of the given frames.
fn compute_frame_decompressed_offset(frames: &[PackFrame]) -> Vec<u64> {
let mut frame_decompressed_offset: Vec<_> = vec![0; frames.len()];
for i in 1..frame_decompressed_offset.len() {
frame_decompressed_offset[i] =
frames[i - 1].decompressed_size + frame_decompressed_offset[i - 1];
}
frame_decompressed_offset
}
/// Groups and transforms the list of [`FileEntry`]-s taken from a pack index
/// (and with absolute offsets into the decompressed stream) into sets of
/// entries per frame, with adjusted (relative) offsets to that corresponding
/// Zstandard frame. Objects are assumed to not be split across two frames.
fn assign_to_frames(
frames: &[PackFrame],
entries: &[FileEntry],
) -> Result<Vec<Vec<FileEntry>>, Error> {
let frame_decompressed_offset: Vec<_> = compute_frame_decompressed_offset(frames);
// Figure out frame belonging of the objects,
// using the frame offset and the object offset.
let mut frames: Vec<Vec<FileEntry>> = (0..frames.len()).map(|_| Vec::new()).collect();
for entry in entries {
let frame_index = frame_decompressed_offset
.iter()
// Find the index of the frame containing the object (objects are
// assumed to not be split across two frames)
.rposition(|&x| x <= entry.metadata.offset)
.ok_or(Error::CorruptPack)?;
// Compute the offset relative to that frame.
let local_offset = entry.metadata.offset - frame_decompressed_offset[frame_index];
let local_entry = FileEntry::new(
entry.path.clone(),
entry.checksum,
ObjectMetadata {
offset: local_offset, // Replace global offset -> local offset
size: entry.metadata.size,
},
);
frames[frame_index].push(local_entry);
}
Ok(frames)
}
/// Used for timing the different parts of the extraction process.
#[derive(Default)]
struct ExtractStats {
total_time: f64,
seek_time: f64,
object_time: f64,
verify_time: f64,
write_time: f64,
}
impl ExtractStats {
fn other_time(&self) -> f64 {
self.total_time - (self.seek_time + self.object_time + self.verify_time + self.write_time)
}
/// Convert the statistics into fractions, taken relative to `self.total_time`.
fn fractions(&self) -> Self {
let norm_factor = 1f64 / self.total_time;
Self {
total_time: norm_factor * self.total_time,
seek_time: norm_factor * self.seek_time,
object_time: norm_factor * self.object_time,
verify_time: norm_factor * self.verify_time,
write_time: norm_factor * self.write_time,
}
}
}
impl std::ops::Add for ExtractStats {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
total_time: self.total_time + other.total_time,
seek_time: self.seek_time + other.seek_time,
object_time: self.object_time + other.object_time,
verify_time: self.verify_time + other.verify_time,
write_time: self.write_time + other.write_time,
}
}
}
impl std::iter::Sum<ExtractStats> for ExtractStats {
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = ExtractStats>,
{
let mut acc = ExtractStats::default();
for x in iter {
acc = acc + x;
}
acc
}
}
/// Extracts the given entries from the pack reader into the specified output directory.
/// Checksum verification can be toggled on/off.
fn extract_files(
mut reader: PackReader,
entries: &[FileEntry],
output_dir: impl AsRef<Path>,
verify: bool,
) -> Result<ExtractStats, Error> {
let mut entries: Vec<FileEntry> = entries.to_vec();
// Sort objects to allow for forward-only seeking
entries.sort_by(|x, y| {
let offset_x = x.metadata.offset;
let offset_y = y.metadata.offset;
offset_x.cmp(&offset_y)
});
// Used for timing
let mut stats = ExtractStats::default();
let total_time = measure_ok(|| -> Result<(), Error> {
// Decompression buffer
let mut buf = vec![];
let mut path_buf = PathBuf::new();
let mut pos = 0;
for entry in entries {
let metadata = &entry.metadata;
// Seek forward
let discard_bytes = metadata.offset - pos;
// Check if we need to read a new object.
// The current position in stream can be AFTER the object offset only
// if the previous and this object are the same. This is because the objects
// are sorted by offset, and the current position is set to the offset at the
// end of each object, after that object is consumed.
if pos <= metadata.offset {
stats.seek_time += measure_ok(|| reader.seek(discard_bytes))?.0.as_secs_f64();
// Resize buf
buf.resize(metadata.size as usize, 0);
// Read object
stats.object_time += measure_ok(|| reader.read_exact(&mut buf[..]))?
.0
.as_secs_f64();
pos = metadata.offset + metadata.size;
if verify {
stats.verify_time += measure_ok(|| verify_object(&buf[..], &entry.checksum))?
.0
.as_secs_f64();
}
}
// Output path
path_buf.clear();
path_buf.push(&output_dir);
path_buf.push(&entry.path);
stats.write_time += measure_ok(|| write_object(&buf[..], &path_buf))?
.0
.as_secs_f64();
}
Ok(())
})?
.0;
stats.total_time = total_time.as_secs_f64();
Ok(stats)
}
#[cfg(test)]
mod tests {
use super::*;
fn make_md(offset: u64, size: u64) -> ObjectMetadata {
ObjectMetadata { offset, size }
}
#[test]
fn pack_id_validation_works() {
// VALID
assert!(PackId::is_valid("ABCD"));
assert!(PackId::is_valid("abcd"));
assert!(PackId::is_valid("____"));
assert!(PackId::is_valid("----"));
assert!(PackId::is_valid("ABCD-132_TAG"));
// NOT VALID
// spaces | // non-letter symbols other than - and _
assert!(!PackId::is_valid("QWERTY-^!$^%^@!#"));
}
#[test]
fn snapshot_tag_validation_works() {
// VALID
assert!(SnapshotId::is_valid("ABCD"));
assert!(SnapshotId::is_valid("abcd"));
assert!(SnapshotId::is_valid("____"));
assert!(SnapshotId::is_valid("----"));
assert!(SnapshotId::is_valid("ABCD-132_TAG"));
// NOT VALID
// spaces
assert!(!SnapshotId::is_valid("Some Text"));
// non-latin alphabets
assert!(!SnapshotId::is_valid("това-е-тест"));
// non-letter symbols other than - and _
assert!(!SnapshotId::is_valid("QWERTY-^!$^%^@!#"));
}
#[test]
fn assign_to_frames_single_works() {
let frames = [PackFrame {
frame_size: 100,
decompressed_size: 1000,
}];
let entries = [
FileEntry::new("A".into(), [0; 20], make_md(50, 1)),
FileEntry::new("B".into(), [1; 20], make_md(50, 1)),
];
let result = assign_to_frames(&frames, &entries).unwrap();
assert_eq!(1, result.len());
assert_eq!(&entries, result[0].as_slice());
}
#[test]
fn assign_to_frames_multiple_works() {
let frames = [
PackFrame {
frame_size: 100,
decompressed_size: 1000,
},
PackFrame {
frame_size: 100,
decompressed_size: 1000,
},
];
let entries = [
FileEntry::new("A".into(), [0; 20], make_md(800, 200)),
FileEntry::new("B".into(), [1; 20], make_md(1200, 200)),
];
let frame_1_entries = [
// Offset is same
FileEntry::new("A".into(), [0; 20], make_md(800, 200)),
];
let frame_2_entries = [
// Offset 1200 -> 200
FileEntry::new("B".into(), [1; 20], make_md(200, 200)),
];
let result = assign_to_frames(&frames, &entries).unwrap();
assert_eq!(2, result.len());
assert_eq!(&frame_1_entries, result[0].as_slice());
assert_eq!(&frame_2_entries, result[1].as_slice());
}
} | assert!(!PackId::is_valid("Some Text"));
// non-latin alphabets
assert!(!PackId::is_valid("това-е-тест")); | random_line_split |
app.py | # BlueGraph: unifying Python framework for graph analytics and co-occurrence analysis.
# Copyright 2020-2021 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main embedding service app."""
import json
import os
import shutil
import re
import time
from flask import Flask, request
from kgforge.core import KnowledgeGraphForge
from bluegraph import PandasPGFrame
from bluegraph.downstream import EmbeddingPipeline
from bluegraph.core import GraphElementEmbedder
def _retrieve_token(request):
"""Retrieve NEXUS token from the request header."""
auth_string = request.headers.get('Authorization')
try:
match = re.match("Bearer (.+)", auth_string)
except TypeError:
match = None
if match:
return match.groups()[0]
def digest_model_data(model_resource):
"""Digest model meta-data."""
model_data = {
"id": model_resource.id,
"name": model_resource.name,
"description": model_resource.description,
"filename": model_resource.distribution.name,
"created": model_resource._store_metadata._createdAt,
"modified": model_resource._store_metadata._updatedAt
}
return model_data
def _retrieve_models(local=True):
"""Retrieve all models from the catalog."""
# Check if the download folder exists
def _get_meta_data(model_name, file):
return {
"data": {
"id": model_name,
"name": model_name,
"description": model_name,
"filename": os.path.join(
app.config["DOWNLOAD_DIR"], file),
"created": time.ctime(os.path.getctime(
os.path.join(
app.config["DOWNLOAD_DIR"],
file))),
"modified": time.ctime(os.path.getmtime(
os.path.join(
app.config["DOWNLOAD_DIR"],
file)))
}
}
if not os.path.exists(app.config["DOWNLOAD_DIR"]):
os.makedirs(app.config["DOWNLOAD_DIR"])
if not local:
# Fetch from a Nexus-hosted catalog
resources = app.forge.search({"type": "EmbeddingModel"})
for resource in resources:
app.models[resource.name] = {
"data": digest_model_data(resource),
}
app.forge.download(
resource, "distribution.contentUrl",
app.config["DOWNLOAD_DIR"])
pipeline_path = os.path.join(
app.config["DOWNLOAD_DIR"],
resource.distribution.name)
app.models[resource.name]["object"] = EmbeddingPipeline.load(
pipeline_path,
embedder_interface=GraphElementEmbedder,
embedder_ext="zip")
# Clear the downloads dir
for f in os.listdir(app.config["DOWNLOAD_DIR"]):
try:
os.remove(os.path.join(app.config["DOWNLOAD_DIR"], f))
except Exception:
shutil.rmtree(os.path.join(app.config["DOWNLOAD_DIR"], f))
else:
# Fetch from a local dir
for (_, dirs, files) in os.walk(app.config["DOWNLOAD_DIR"]):
for path in dirs + files:
if path[0] != ".":
match = re.match(r"(.*)\.zip", path)
if match:
model_name = match.groups()[0]
else:
model_name = path
app.models[model_name] = _get_meta_data(model_name, path)
pipeline_path = os.path.join(
app.config["DOWNLOAD_DIR"], path)
app.models[model_name]["object"] = EmbeddingPipeline.load(
pipeline_path,
embedder_interface=GraphElementEmbedder,
embedder_ext="zip")
break
app = Flask(__name__)
app.config.from_pyfile('configs/app_config.py')
if app.config["LOCAL"] is False:
TOKEN = os.environ["NEXUS_TOKEN"]
app.forge = KnowledgeGraphForge(
app.config["FORGE_CONFIG"],
token=TOKEN)
else:
app.forge = None
app.models = {}
_retrieve_models(app.config["LOCAL"])
# --------------- Handlers ----------------
def _respond_success():
return (
json.dumps({"success": True}), 200,
{'ContentType': 'application/json'}
)
def _respond_not_found(message=None):
if message is None:
message = "Model is not found in the catalog"
return (
json.dumps({
'success': False,
'message': message
}), 404,
{'ContentType': 'application/json'}
)
def _respond_not_allowed(message=None):
if message is None:
message = "Request method is not allowed"
return (
json.dumps({
'success': False,
'message': message
}), 405,
{'ContentType': 'application/json'}
)
def _preprocess_data(data, data_type, auth=None):
"""Preprocess input data according to the specified type.
Possoble data types are:
- "raw" use data as is provided in the request
- "json_pgframe" create a PandasPGFrame from the provided JSON repr
- "nexus_dataset" download a JSON dataset from Nexus and
create a PandasPGFrame from this representation
# - collection of Nexus resources to build a PG from
# - (then i guess we need a bucket/org/project/token)
"""
if data_type == "raw":
# Use passed data as is
return data
elif data_type == "json_pgframe":
return PandasPGFrame.from_json(data)
elif data_type == "nexus_dataset":
if auth is None:
raise ValueError(
"To use Nexus-hosted property graph as the dataset "
"authentication token should be provided in the "
"request header")
forge = KnowledgeGraphForge(
app.config["FORGE_CONFIG"], endpoint=data["endpoint"],
bucket=data["bucket"], token=auth)
resource = forge.retrieve(data["resource_id"])
forge.download(
resource, "distribution.contentUrl",
app.config["DOWNLOAD_DIR"])
downloaded_file = os.path.join(
app.config["DOWNLOAD_DIR"], resource.distribution.name)
graph = PandasPGFrame.load_json(downloaded_file)
os.remove(downloaded_file)
return graph
else:
raise ValueError("Unknown data type")
@app.route("/models/<model_name>", methods=["GET"]) # , "GET", "DELETE"])
def handle_model_request(model_name):
"""Handle request of model data."""
if model_name in app.models:
return (
json.dumps(app.models[model_name]["data"]), 200,
{'ContentType': 'application/json'}
)
else:
return _respond_not_found()
@app.route("/models/", methods=["GET"]) # , "DELETE"])
def handle_models_request():
"""Handle request of all existing models."""
# TODO: add sort and filter by creation/modification date
return (
json.dumps({"models": {
k: d["data"] for k, d in app.models.items()
}}), 200,
{'ContentType': 'application/json'}
)
@app.route("/models/<model_name>/embedding/", methods=["GET", "POST"])
def handle_embeddings_request(model_name):
"""Handle request of embedding vectors for provided resources."""
if model_name in app.models:
pipeline = app.models[model_name]["object"]
if request.method == "GET":
params = request.args.to_dict(flat=False)
indices = params["resource_ids"]
embeddings = pipeline.retrieve_embeddings(indices)
return (
json.dumps({
"vectors": dict(zip(indices, embeddings))
}), 200,
{'ContentType': 'application/json'}
)
else:
if pipeline.is_inductive():
auth_token = _retrieve_token(request)
content = request.get_json()
data = content["data"]
data_type = (
content["data_type"]
if "data_type" in content else "raw"
)
preprocessor_kwargs = (
content["preprocessor_kwargs"]
if "preprocessor_kwargs" in content else None
)
embedder_kwargs = (
content["embedder_kwargs"]
if "embedder_kwargs" in content else None
)
data = _preprocess_data(data, data_type, auth_token)
vectors = pipeline.run_prediction(
data, preprocessor_kwargs, embedder_kwargs)
if not isinstance(vectors, list):
vectors = vectors.tolist()
return (
json.dumps({"vectors": vectors}), 200,
{'ContentType': 'application/json'}
)
else:
_respond_not_allowed(
"Model is transductive, prediction of "
"embedding for unseen data is not supported")
else:
return _respond_not_found()
@app.route("/models/<model_name>/neighbors/", methods=["GET", "POST"])
def handle_similar_points_request(model_name):
"""Handle request of similar points to provided resources."""
if model_name not in app.models:
return _respond_not_found()
pipeline = app.models[model_name]["object"]
params = request.args.to_dict(flat=False)
k = int(params["k"][0])
values = (
params["values"][0] == "True" if "values" in params else False
)
if request.method == 'GET':
indices = params["resource_ids"]
similar_points, dist = pipeline.get_similar_points(
existing_indices=indices, k=k)
if values:
result = {
indices[i]: {
p: float(dist[i][j]) for j, p in enumerate(points)
} if points is not None else None
for i, points in enumerate(similar_points)
}
else:
result = {
indices[i]: list(points) if points is not None else None
for i, points in enumerate(similar_points)
}
else:
content = request.get_json()
vectors = content["vectors"]
similar_points, dist = pipeline.get_similar_points(
vectors=vectors, k=k)
if values:
|
else:
result = [
el.tolist() for el in similar_points
]
return (
json.dumps({"neighbors": result}), 200,
{'ContentType': 'application/json'}
)
@app.route("/models/<model_name>/<component_name>/")
def handle_info_request(model_name, component_name):
"""Handle request of details on different model components."""
if model_name in app.models:
pipeline = app.models[model_name]["object"]
info = None
if component_name == "preprocessor":
if pipeline.preprocessor is not None:
info = pipeline.preprocessor.info()
info["interface"] = pipeline.preprocessor.__class__.__name__
else:
return _respond_not_found(
"Model does not contain a preprocessor")
elif component_name == "embedder":
if pipeline.embedder is not None:
info = pipeline.embedder.info()
else:
return _respond_not_found(
"Model does not contain an embedder")
elif component_name == "similarity-processor":
info = pipeline.similarity_processor.info()
info["interface"] = pipeline.similarity_processor.__class__.__name__
# Convert all the values to str
for k in info.keys():
info[k] = str(info[k])
return (
json.dumps(info), 200,
{'ContentType': 'application/json'}
)
else:
return _respond_not_found()
if __name__ == '__main__':
app.run(host='0.0.0.0')
| result = [
{point: dist[i].tolist()[j] for j, point in enumerate(el)}
for i, el in enumerate(similar_points)
] | conditional_block |
app.py | # BlueGraph: unifying Python framework for graph analytics and co-occurrence analysis.
# Copyright 2020-2021 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main embedding service app."""
import json
import os
import shutil
import re
import time
from flask import Flask, request
from kgforge.core import KnowledgeGraphForge
from bluegraph import PandasPGFrame
from bluegraph.downstream import EmbeddingPipeline
from bluegraph.core import GraphElementEmbedder
def _retrieve_token(request):
"""Retrieve NEXUS token from the request header."""
auth_string = request.headers.get('Authorization')
try:
match = re.match("Bearer (.+)", auth_string)
except TypeError:
match = None
if match:
return match.groups()[0]
def digest_model_data(model_resource):
"""Digest model meta-data."""
model_data = {
"id": model_resource.id,
"name": model_resource.name,
"description": model_resource.description,
"filename": model_resource.distribution.name,
"created": model_resource._store_metadata._createdAt,
"modified": model_resource._store_metadata._updatedAt
}
return model_data
def _retrieve_models(local=True):
"""Retrieve all models from the catalog."""
# Check if the download folder exists
def _get_meta_data(model_name, file):
return {
"data": {
"id": model_name,
"name": model_name,
"description": model_name,
"filename": os.path.join(
app.config["DOWNLOAD_DIR"], file),
"created": time.ctime(os.path.getctime(
os.path.join(
app.config["DOWNLOAD_DIR"],
file))),
"modified": time.ctime(os.path.getmtime(
os.path.join(
app.config["DOWNLOAD_DIR"],
file)))
}
}
if not os.path.exists(app.config["DOWNLOAD_DIR"]):
os.makedirs(app.config["DOWNLOAD_DIR"])
if not local:
# Fetch from a Nexus-hosted catalog
resources = app.forge.search({"type": "EmbeddingModel"})
for resource in resources:
app.models[resource.name] = {
"data": digest_model_data(resource),
}
app.forge.download(
resource, "distribution.contentUrl",
app.config["DOWNLOAD_DIR"])
pipeline_path = os.path.join(
app.config["DOWNLOAD_DIR"],
resource.distribution.name)
app.models[resource.name]["object"] = EmbeddingPipeline.load(
pipeline_path,
embedder_interface=GraphElementEmbedder,
embedder_ext="zip")
# Clear the downloads dir
for f in os.listdir(app.config["DOWNLOAD_DIR"]):
try:
os.remove(os.path.join(app.config["DOWNLOAD_DIR"], f))
except Exception:
shutil.rmtree(os.path.join(app.config["DOWNLOAD_DIR"], f))
else:
# Fetch from a local dir
for (_, dirs, files) in os.walk(app.config["DOWNLOAD_DIR"]):
for path in dirs + files:
if path[0] != ".":
match = re.match(r"(.*)\.zip", path)
if match:
model_name = match.groups()[0]
else:
model_name = path
app.models[model_name] = _get_meta_data(model_name, path)
pipeline_path = os.path.join(
app.config["DOWNLOAD_DIR"], path)
app.models[model_name]["object"] = EmbeddingPipeline.load(
pipeline_path,
embedder_interface=GraphElementEmbedder,
embedder_ext="zip")
break
app = Flask(__name__)
app.config.from_pyfile('configs/app_config.py')
if app.config["LOCAL"] is False:
TOKEN = os.environ["NEXUS_TOKEN"]
app.forge = KnowledgeGraphForge(
app.config["FORGE_CONFIG"],
token=TOKEN)
else:
app.forge = None
app.models = {}
_retrieve_models(app.config["LOCAL"])
# --------------- Handlers ----------------
def _respond_success():
return (
json.dumps({"success": True}), 200,
{'ContentType': 'application/json'}
)
def _respond_not_found(message=None):
if message is None:
message = "Model is not found in the catalog"
return (
json.dumps({
'success': False,
'message': message
}), 404,
{'ContentType': 'application/json'}
)
def _respond_not_allowed(message=None):
if message is None:
message = "Request method is not allowed"
return (
json.dumps({
'success': False,
'message': message
}), 405,
{'ContentType': 'application/json'}
)
def _preprocess_data(data, data_type, auth=None):
"""Preprocess input data according to the specified type.
Possoble data types are:
- "raw" use data as is provided in the request
- "json_pgframe" create a PandasPGFrame from the provided JSON repr
- "nexus_dataset" download a JSON dataset from Nexus and
create a PandasPGFrame from this representation
# - collection of Nexus resources to build a PG from
# - (then i guess we need a bucket/org/project/token)
"""
if data_type == "raw":
# Use passed data as is
return data
elif data_type == "json_pgframe":
return PandasPGFrame.from_json(data)
elif data_type == "nexus_dataset":
if auth is None:
raise ValueError(
"To use Nexus-hosted property graph as the dataset "
"authentication token should be provided in the "
"request header")
forge = KnowledgeGraphForge(
app.config["FORGE_CONFIG"], endpoint=data["endpoint"],
bucket=data["bucket"], token=auth)
resource = forge.retrieve(data["resource_id"])
forge.download(
resource, "distribution.contentUrl",
app.config["DOWNLOAD_DIR"])
downloaded_file = os.path.join(
app.config["DOWNLOAD_DIR"], resource.distribution.name)
graph = PandasPGFrame.load_json(downloaded_file)
os.remove(downloaded_file)
return graph
else:
raise ValueError("Unknown data type")
@app.route("/models/<model_name>", methods=["GET"]) # , "GET", "DELETE"])
def handle_model_request(model_name):
"""Handle request of model data."""
if model_name in app.models:
return (
json.dumps(app.models[model_name]["data"]), 200,
{'ContentType': 'application/json'}
)
else:
return _respond_not_found()
@app.route("/models/", methods=["GET"]) # , "DELETE"])
def handle_models_request():
"""Handle request of all existing models."""
# TODO: add sort and filter by creation/modification date
return (
json.dumps({"models": {
k: d["data"] for k, d in app.models.items()
}}), 200,
{'ContentType': 'application/json'}
)
@app.route("/models/<model_name>/embedding/", methods=["GET", "POST"])
def handle_embeddings_request(model_name):
"""Handle request of embedding vectors for provided resources."""
if model_name in app.models:
pipeline = app.models[model_name]["object"]
if request.method == "GET":
params = request.args.to_dict(flat=False)
indices = params["resource_ids"]
embeddings = pipeline.retrieve_embeddings(indices)
return (
json.dumps({
"vectors": dict(zip(indices, embeddings))
}), 200,
{'ContentType': 'application/json'}
)
else:
if pipeline.is_inductive():
auth_token = _retrieve_token(request)
content = request.get_json()
data = content["data"]
data_type = (
content["data_type"]
if "data_type" in content else "raw"
)
preprocessor_kwargs = (
content["preprocessor_kwargs"]
if "preprocessor_kwargs" in content else None
)
embedder_kwargs = (
content["embedder_kwargs"]
if "embedder_kwargs" in content else None
)
data = _preprocess_data(data, data_type, auth_token)
vectors = pipeline.run_prediction(
data, preprocessor_kwargs, embedder_kwargs)
if not isinstance(vectors, list):
vectors = vectors.tolist()
return (
json.dumps({"vectors": vectors}), 200,
{'ContentType': 'application/json'}
)
else:
_respond_not_allowed(
"Model is transductive, prediction of "
"embedding for unseen data is not supported")
else:
return _respond_not_found()
@app.route("/models/<model_name>/neighbors/", methods=["GET", "POST"])
def handle_similar_points_request(model_name):
|
@app.route("/models/<model_name>/<component_name>/")
def handle_info_request(model_name, component_name):
"""Handle request of details on different model components."""
if model_name in app.models:
pipeline = app.models[model_name]["object"]
info = None
if component_name == "preprocessor":
if pipeline.preprocessor is not None:
info = pipeline.preprocessor.info()
info["interface"] = pipeline.preprocessor.__class__.__name__
else:
return _respond_not_found(
"Model does not contain a preprocessor")
elif component_name == "embedder":
if pipeline.embedder is not None:
info = pipeline.embedder.info()
else:
return _respond_not_found(
"Model does not contain an embedder")
elif component_name == "similarity-processor":
info = pipeline.similarity_processor.info()
info["interface"] = pipeline.similarity_processor.__class__.__name__
# Convert all the values to str
for k in info.keys():
info[k] = str(info[k])
return (
json.dumps(info), 200,
{'ContentType': 'application/json'}
)
else:
return _respond_not_found()
if __name__ == '__main__':
app.run(host='0.0.0.0')
| """Handle request of similar points to provided resources."""
if model_name not in app.models:
return _respond_not_found()
pipeline = app.models[model_name]["object"]
params = request.args.to_dict(flat=False)
k = int(params["k"][0])
values = (
params["values"][0] == "True" if "values" in params else False
)
if request.method == 'GET':
indices = params["resource_ids"]
similar_points, dist = pipeline.get_similar_points(
existing_indices=indices, k=k)
if values:
result = {
indices[i]: {
p: float(dist[i][j]) for j, p in enumerate(points)
} if points is not None else None
for i, points in enumerate(similar_points)
}
else:
result = {
indices[i]: list(points) if points is not None else None
for i, points in enumerate(similar_points)
}
else:
content = request.get_json()
vectors = content["vectors"]
similar_points, dist = pipeline.get_similar_points(
vectors=vectors, k=k)
if values:
result = [
{point: dist[i].tolist()[j] for j, point in enumerate(el)}
for i, el in enumerate(similar_points)
]
else:
result = [
el.tolist() for el in similar_points
]
return (
json.dumps({"neighbors": result}), 200,
{'ContentType': 'application/json'}
) | identifier_body |
app.py | # BlueGraph: unifying Python framework for graph analytics and co-occurrence analysis.
# Copyright 2020-2021 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main embedding service app."""
import json
import os
import shutil
import re
import time
from flask import Flask, request
from kgforge.core import KnowledgeGraphForge
from bluegraph import PandasPGFrame
from bluegraph.downstream import EmbeddingPipeline
from bluegraph.core import GraphElementEmbedder
def _retrieve_token(request):
"""Retrieve NEXUS token from the request header."""
auth_string = request.headers.get('Authorization')
try:
match = re.match("Bearer (.+)", auth_string)
except TypeError:
match = None
if match:
return match.groups()[0]
def digest_model_data(model_resource):
"""Digest model meta-data."""
model_data = {
"id": model_resource.id,
"name": model_resource.name,
"description": model_resource.description,
"filename": model_resource.distribution.name,
"created": model_resource._store_metadata._createdAt,
"modified": model_resource._store_metadata._updatedAt
}
return model_data
def _retrieve_models(local=True):
"""Retrieve all models from the catalog."""
# Check if the download folder exists
def _get_meta_data(model_name, file):
return {
"data": {
"id": model_name,
"name": model_name,
"description": model_name,
"filename": os.path.join(
app.config["DOWNLOAD_DIR"], file),
"created": time.ctime(os.path.getctime(
os.path.join(
app.config["DOWNLOAD_DIR"],
file))),
"modified": time.ctime(os.path.getmtime(
os.path.join(
app.config["DOWNLOAD_DIR"],
file)))
}
}
if not os.path.exists(app.config["DOWNLOAD_DIR"]):
os.makedirs(app.config["DOWNLOAD_DIR"])
if not local:
# Fetch from a Nexus-hosted catalog
resources = app.forge.search({"type": "EmbeddingModel"})
for resource in resources:
app.models[resource.name] = {
"data": digest_model_data(resource),
}
app.forge.download(
resource, "distribution.contentUrl",
app.config["DOWNLOAD_DIR"])
pipeline_path = os.path.join(
app.config["DOWNLOAD_DIR"],
resource.distribution.name)
app.models[resource.name]["object"] = EmbeddingPipeline.load(
pipeline_path,
embedder_interface=GraphElementEmbedder,
embedder_ext="zip")
# Clear the downloads dir
for f in os.listdir(app.config["DOWNLOAD_DIR"]):
try:
os.remove(os.path.join(app.config["DOWNLOAD_DIR"], f))
except Exception:
shutil.rmtree(os.path.join(app.config["DOWNLOAD_DIR"], f))
else:
# Fetch from a local dir
for (_, dirs, files) in os.walk(app.config["DOWNLOAD_DIR"]):
for path in dirs + files:
if path[0] != ".":
match = re.match(r"(.*)\.zip", path)
if match:
model_name = match.groups()[0]
else:
model_name = path
app.models[model_name] = _get_meta_data(model_name, path)
pipeline_path = os.path.join(
app.config["DOWNLOAD_DIR"], path)
app.models[model_name]["object"] = EmbeddingPipeline.load(
pipeline_path,
embedder_interface=GraphElementEmbedder,
embedder_ext="zip")
break
app = Flask(__name__)
app.config.from_pyfile('configs/app_config.py')
if app.config["LOCAL"] is False:
TOKEN = os.environ["NEXUS_TOKEN"]
app.forge = KnowledgeGraphForge(
app.config["FORGE_CONFIG"],
token=TOKEN)
else:
app.forge = None
app.models = {}
_retrieve_models(app.config["LOCAL"])
# --------------- Handlers ----------------
def _respond_success():
return (
json.dumps({"success": True}), 200,
{'ContentType': 'application/json'}
)
def _respond_not_found(message=None):
if message is None:
message = "Model is not found in the catalog"
return (
json.dumps({
'success': False,
'message': message
}), 404,
{'ContentType': 'application/json'}
)
def _respond_not_allowed(message=None):
if message is None:
message = "Request method is not allowed"
return (
json.dumps({
'success': False,
'message': message
}), 405,
{'ContentType': 'application/json'}
)
def _preprocess_data(data, data_type, auth=None):
"""Preprocess input data according to the specified type.
Possoble data types are:
- "raw" use data as is provided in the request
- "json_pgframe" create a PandasPGFrame from the provided JSON repr
- "nexus_dataset" download a JSON dataset from Nexus and
create a PandasPGFrame from this representation
# - collection of Nexus resources to build a PG from
# - (then i guess we need a bucket/org/project/token)
"""
if data_type == "raw":
# Use passed data as is
return data
elif data_type == "json_pgframe":
return PandasPGFrame.from_json(data)
elif data_type == "nexus_dataset":
if auth is None:
raise ValueError(
"To use Nexus-hosted property graph as the dataset "
"authentication token should be provided in the "
"request header")
forge = KnowledgeGraphForge(
app.config["FORGE_CONFIG"], endpoint=data["endpoint"],
bucket=data["bucket"], token=auth)
resource = forge.retrieve(data["resource_id"])
forge.download(
resource, "distribution.contentUrl",
app.config["DOWNLOAD_DIR"])
downloaded_file = os.path.join(
app.config["DOWNLOAD_DIR"], resource.distribution.name)
graph = PandasPGFrame.load_json(downloaded_file)
os.remove(downloaded_file)
return graph
else:
raise ValueError("Unknown data type")
@app.route("/models/<model_name>", methods=["GET"]) # , "GET", "DELETE"])
def handle_model_request(model_name):
"""Handle request of model data."""
if model_name in app.models:
return (
json.dumps(app.models[model_name]["data"]), 200,
{'ContentType': 'application/json'}
)
else:
return _respond_not_found()
@app.route("/models/", methods=["GET"]) # , "DELETE"])
def handle_models_request():
"""Handle request of all existing models."""
# TODO: add sort and filter by creation/modification date
return (
json.dumps({"models": {
k: d["data"] for k, d in app.models.items()
}}), 200,
{'ContentType': 'application/json'}
)
@app.route("/models/<model_name>/embedding/", methods=["GET", "POST"])
def handle_embeddings_request(model_name):
"""Handle request of embedding vectors for provided resources."""
if model_name in app.models:
pipeline = app.models[model_name]["object"]
if request.method == "GET":
params = request.args.to_dict(flat=False)
indices = params["resource_ids"]
embeddings = pipeline.retrieve_embeddings(indices)
return (
json.dumps({
"vectors": dict(zip(indices, embeddings))
}), 200,
{'ContentType': 'application/json'}
)
else:
if pipeline.is_inductive():
auth_token = _retrieve_token(request)
content = request.get_json()
data = content["data"]
data_type = (
content["data_type"]
if "data_type" in content else "raw"
)
preprocessor_kwargs = (
content["preprocessor_kwargs"]
if "preprocessor_kwargs" in content else None
)
embedder_kwargs = (
content["embedder_kwargs"]
if "embedder_kwargs" in content else None
)
data = _preprocess_data(data, data_type, auth_token)
vectors = pipeline.run_prediction(
data, preprocessor_kwargs, embedder_kwargs)
if not isinstance(vectors, list):
vectors = vectors.tolist()
return (
json.dumps({"vectors": vectors}), 200,
{'ContentType': 'application/json'}
)
else:
_respond_not_allowed(
"Model is transductive, prediction of "
"embedding for unseen data is not supported")
else:
return _respond_not_found()
@app.route("/models/<model_name>/neighbors/", methods=["GET", "POST"])
def handle_similar_points_request(model_name):
"""Handle request of similar points to provided resources."""
if model_name not in app.models:
return _respond_not_found()
pipeline = app.models[model_name]["object"]
params = request.args.to_dict(flat=False)
k = int(params["k"][0])
values = (
params["values"][0] == "True" if "values" in params else False
)
if request.method == 'GET':
indices = params["resource_ids"]
similar_points, dist = pipeline.get_similar_points(
existing_indices=indices, k=k)
if values:
result = {
indices[i]: {
p: float(dist[i][j]) for j, p in enumerate(points)
} if points is not None else None
for i, points in enumerate(similar_points)
}
else:
result = { | else:
content = request.get_json()
vectors = content["vectors"]
similar_points, dist = pipeline.get_similar_points(
vectors=vectors, k=k)
if values:
result = [
{point: dist[i].tolist()[j] for j, point in enumerate(el)}
for i, el in enumerate(similar_points)
]
else:
result = [
el.tolist() for el in similar_points
]
return (
json.dumps({"neighbors": result}), 200,
{'ContentType': 'application/json'}
)
@app.route("/models/<model_name>/<component_name>/")
def handle_info_request(model_name, component_name):
"""Handle request of details on different model components."""
if model_name in app.models:
pipeline = app.models[model_name]["object"]
info = None
if component_name == "preprocessor":
if pipeline.preprocessor is not None:
info = pipeline.preprocessor.info()
info["interface"] = pipeline.preprocessor.__class__.__name__
else:
return _respond_not_found(
"Model does not contain a preprocessor")
elif component_name == "embedder":
if pipeline.embedder is not None:
info = pipeline.embedder.info()
else:
return _respond_not_found(
"Model does not contain an embedder")
elif component_name == "similarity-processor":
info = pipeline.similarity_processor.info()
info["interface"] = pipeline.similarity_processor.__class__.__name__
# Convert all the values to str
for k in info.keys():
info[k] = str(info[k])
return (
json.dumps(info), 200,
{'ContentType': 'application/json'}
)
else:
return _respond_not_found()
if __name__ == '__main__':
app.run(host='0.0.0.0') | indices[i]: list(points) if points is not None else None
for i, points in enumerate(similar_points)
} | random_line_split |
app.py | # BlueGraph: unifying Python framework for graph analytics and co-occurrence analysis.
# Copyright 2020-2021 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main embedding service app."""
import json
import os
import shutil
import re
import time
from flask import Flask, request
from kgforge.core import KnowledgeGraphForge
from bluegraph import PandasPGFrame
from bluegraph.downstream import EmbeddingPipeline
from bluegraph.core import GraphElementEmbedder
def _retrieve_token(request):
"""Retrieve NEXUS token from the request header."""
auth_string = request.headers.get('Authorization')
try:
match = re.match("Bearer (.+)", auth_string)
except TypeError:
match = None
if match:
return match.groups()[0]
def digest_model_data(model_resource):
"""Digest model meta-data."""
model_data = {
"id": model_resource.id,
"name": model_resource.name,
"description": model_resource.description,
"filename": model_resource.distribution.name,
"created": model_resource._store_metadata._createdAt,
"modified": model_resource._store_metadata._updatedAt
}
return model_data
def _retrieve_models(local=True):
"""Retrieve all models from the catalog."""
# Check if the download folder exists
def _get_meta_data(model_name, file):
return {
"data": {
"id": model_name,
"name": model_name,
"description": model_name,
"filename": os.path.join(
app.config["DOWNLOAD_DIR"], file),
"created": time.ctime(os.path.getctime(
os.path.join(
app.config["DOWNLOAD_DIR"],
file))),
"modified": time.ctime(os.path.getmtime(
os.path.join(
app.config["DOWNLOAD_DIR"],
file)))
}
}
if not os.path.exists(app.config["DOWNLOAD_DIR"]):
os.makedirs(app.config["DOWNLOAD_DIR"])
if not local:
# Fetch from a Nexus-hosted catalog
resources = app.forge.search({"type": "EmbeddingModel"})
for resource in resources:
app.models[resource.name] = {
"data": digest_model_data(resource),
}
app.forge.download(
resource, "distribution.contentUrl",
app.config["DOWNLOAD_DIR"])
pipeline_path = os.path.join(
app.config["DOWNLOAD_DIR"],
resource.distribution.name)
app.models[resource.name]["object"] = EmbeddingPipeline.load(
pipeline_path,
embedder_interface=GraphElementEmbedder,
embedder_ext="zip")
# Clear the downloads dir
for f in os.listdir(app.config["DOWNLOAD_DIR"]):
try:
os.remove(os.path.join(app.config["DOWNLOAD_DIR"], f))
except Exception:
shutil.rmtree(os.path.join(app.config["DOWNLOAD_DIR"], f))
else:
# Fetch from a local dir
for (_, dirs, files) in os.walk(app.config["DOWNLOAD_DIR"]):
for path in dirs + files:
if path[0] != ".":
match = re.match(r"(.*)\.zip", path)
if match:
model_name = match.groups()[0]
else:
model_name = path
app.models[model_name] = _get_meta_data(model_name, path)
pipeline_path = os.path.join(
app.config["DOWNLOAD_DIR"], path)
app.models[model_name]["object"] = EmbeddingPipeline.load(
pipeline_path,
embedder_interface=GraphElementEmbedder,
embedder_ext="zip")
break
app = Flask(__name__)
app.config.from_pyfile('configs/app_config.py')
if app.config["LOCAL"] is False:
TOKEN = os.environ["NEXUS_TOKEN"]
app.forge = KnowledgeGraphForge(
app.config["FORGE_CONFIG"],
token=TOKEN)
else:
app.forge = None
app.models = {}
_retrieve_models(app.config["LOCAL"])
# --------------- Handlers ----------------
def _respond_success():
return (
json.dumps({"success": True}), 200,
{'ContentType': 'application/json'}
)
def _respond_not_found(message=None):
if message is None:
message = "Model is not found in the catalog"
return (
json.dumps({
'success': False,
'message': message
}), 404,
{'ContentType': 'application/json'}
)
def _respond_not_allowed(message=None):
if message is None:
message = "Request method is not allowed"
return (
json.dumps({
'success': False,
'message': message
}), 405,
{'ContentType': 'application/json'}
)
def _preprocess_data(data, data_type, auth=None):
"""Preprocess input data according to the specified type.
Possoble data types are:
- "raw" use data as is provided in the request
- "json_pgframe" create a PandasPGFrame from the provided JSON repr
- "nexus_dataset" download a JSON dataset from Nexus and
create a PandasPGFrame from this representation
# - collection of Nexus resources to build a PG from
# - (then i guess we need a bucket/org/project/token)
"""
if data_type == "raw":
# Use passed data as is
return data
elif data_type == "json_pgframe":
return PandasPGFrame.from_json(data)
elif data_type == "nexus_dataset":
if auth is None:
raise ValueError(
"To use Nexus-hosted property graph as the dataset "
"authentication token should be provided in the "
"request header")
forge = KnowledgeGraphForge(
app.config["FORGE_CONFIG"], endpoint=data["endpoint"],
bucket=data["bucket"], token=auth)
resource = forge.retrieve(data["resource_id"])
forge.download(
resource, "distribution.contentUrl",
app.config["DOWNLOAD_DIR"])
downloaded_file = os.path.join(
app.config["DOWNLOAD_DIR"], resource.distribution.name)
graph = PandasPGFrame.load_json(downloaded_file)
os.remove(downloaded_file)
return graph
else:
raise ValueError("Unknown data type")
@app.route("/models/<model_name>", methods=["GET"]) # , "GET", "DELETE"])
def handle_model_request(model_name):
"""Handle request of model data."""
if model_name in app.models:
return (
json.dumps(app.models[model_name]["data"]), 200,
{'ContentType': 'application/json'}
)
else:
return _respond_not_found()
@app.route("/models/", methods=["GET"]) # , "DELETE"])
def handle_models_request():
"""Handle request of all existing models."""
# TODO: add sort and filter by creation/modification date
return (
json.dumps({"models": {
k: d["data"] for k, d in app.models.items()
}}), 200,
{'ContentType': 'application/json'}
)
@app.route("/models/<model_name>/embedding/", methods=["GET", "POST"])
def | (model_name):
"""Handle request of embedding vectors for provided resources."""
if model_name in app.models:
pipeline = app.models[model_name]["object"]
if request.method == "GET":
params = request.args.to_dict(flat=False)
indices = params["resource_ids"]
embeddings = pipeline.retrieve_embeddings(indices)
return (
json.dumps({
"vectors": dict(zip(indices, embeddings))
}), 200,
{'ContentType': 'application/json'}
)
else:
if pipeline.is_inductive():
auth_token = _retrieve_token(request)
content = request.get_json()
data = content["data"]
data_type = (
content["data_type"]
if "data_type" in content else "raw"
)
preprocessor_kwargs = (
content["preprocessor_kwargs"]
if "preprocessor_kwargs" in content else None
)
embedder_kwargs = (
content["embedder_kwargs"]
if "embedder_kwargs" in content else None
)
data = _preprocess_data(data, data_type, auth_token)
vectors = pipeline.run_prediction(
data, preprocessor_kwargs, embedder_kwargs)
if not isinstance(vectors, list):
vectors = vectors.tolist()
return (
json.dumps({"vectors": vectors}), 200,
{'ContentType': 'application/json'}
)
else:
_respond_not_allowed(
"Model is transductive, prediction of "
"embedding for unseen data is not supported")
else:
return _respond_not_found()
@app.route("/models/<model_name>/neighbors/", methods=["GET", "POST"])
def handle_similar_points_request(model_name):
"""Handle request of similar points to provided resources."""
if model_name not in app.models:
return _respond_not_found()
pipeline = app.models[model_name]["object"]
params = request.args.to_dict(flat=False)
k = int(params["k"][0])
values = (
params["values"][0] == "True" if "values" in params else False
)
if request.method == 'GET':
indices = params["resource_ids"]
similar_points, dist = pipeline.get_similar_points(
existing_indices=indices, k=k)
if values:
result = {
indices[i]: {
p: float(dist[i][j]) for j, p in enumerate(points)
} if points is not None else None
for i, points in enumerate(similar_points)
}
else:
result = {
indices[i]: list(points) if points is not None else None
for i, points in enumerate(similar_points)
}
else:
content = request.get_json()
vectors = content["vectors"]
similar_points, dist = pipeline.get_similar_points(
vectors=vectors, k=k)
if values:
result = [
{point: dist[i].tolist()[j] for j, point in enumerate(el)}
for i, el in enumerate(similar_points)
]
else:
result = [
el.tolist() for el in similar_points
]
return (
json.dumps({"neighbors": result}), 200,
{'ContentType': 'application/json'}
)
@app.route("/models/<model_name>/<component_name>/")
def handle_info_request(model_name, component_name):
"""Handle request of details on different model components."""
if model_name in app.models:
pipeline = app.models[model_name]["object"]
info = None
if component_name == "preprocessor":
if pipeline.preprocessor is not None:
info = pipeline.preprocessor.info()
info["interface"] = pipeline.preprocessor.__class__.__name__
else:
return _respond_not_found(
"Model does not contain a preprocessor")
elif component_name == "embedder":
if pipeline.embedder is not None:
info = pipeline.embedder.info()
else:
return _respond_not_found(
"Model does not contain an embedder")
elif component_name == "similarity-processor":
info = pipeline.similarity_processor.info()
info["interface"] = pipeline.similarity_processor.__class__.__name__
# Convert all the values to str
for k in info.keys():
info[k] = str(info[k])
return (
json.dumps(info), 200,
{'ContentType': 'application/json'}
)
else:
return _respond_not_found()
if __name__ == '__main__':
app.run(host='0.0.0.0')
| handle_embeddings_request | identifier_name |
RealSolver.py | '''
Kaden Archibald
Created: Oct 11, 2018
Revised: Jul 13, 2019
Version: IPython 7.2.0 (Anaconda distribution) with Python 3.7.1
Module for numerically solving arbitrary real-valued
functions in a single vairable.
'''
import const
#import data
''' Driver function to hide object intialization from end programmer. '''
def realSolver(function, firstGuess, secondGuess = None, fPrime = None):
temp = RealSolver(function, firstGuess, secondGuess, fPrime)
temp.findRoot()
return temp.root
class RealSolver:
'''
Create an object that contains a function to be solved, one (or two) initial
guesses, a function for the derivative, auxiliary precalculated arguements for a function,
and data about the root solving problem itself: a maximum error, a maximum number of
iterations, and a string array to log major operations.
'''
def __init__(self, function, firstGuess, secondGuess = None, fPrime = None):
# General Data
self.f = function # Function to be solved numerically
self.firstGuess = firstGuess # First initial estimate of the root
self.secondGuess = secondGuess # Second initial guess of the root
if not secondGuess is None:
self.isBracketed = True # True if initialized with two estimates...
else:
self.isBracketed = False # ...False otherwise
self.fPrime = fPrime # First derivative of the function
# Meta Data
self.root = None # Store the final result
self.isCorrect = False # True if the root if verified to be correct
self.opLog = [] # Log major operations of the algorithm
# Begin the Root Finding Procedure
self.realMethods = [self.newtonRaphson, self.onePointIteration, self.bisection]
self.opLog.append('Initialization Succesful\n')
'''
Apply the first root finding algorithm and then reevaluate the function at the
returned root. If the functions returns (close enough to) zero, terminate the
loop and return that value.
If the algorithm diverges, or if the algorithm returns a value that is not
the root, continue to the next algorithm.
For general functions, algorithms such as Newton's Method and the Bisection Method
usually converge, so all alogrithms failing will usually mean that the
function has no real roots.
'''
def findRoot(self, methodList = []) -> None:
# Every algorithm will return an estimate of the root.
# Keep a dictionary of which method returned which estimate.
self.rootLog = {}
# Find the root from every method
if not methodList:
methodList = self.realMethods
for method in methodList:
name = method.__name__
self.opLog.append(name + ' started')
potentialRoot = method()
self.rootLog[name] = potentialRoot
if not potentialRoot is None:
if self.verifyRoot(potentialRoot, name):
self.root = potentialRoot
#break
else:
self.opLog.append(name + ' halted\n')
#data.writeStr(self.opLog)
'''
Verify that a root really is zero before terminating. This method ensures that no number
that is not actually a root will ever be returned. This is valuable becuase some
algorithms can converge to incorrect values, especially if the initial guess was an optima
or endpoint of a funciton.
'''
def verifyRoot(self, potentialRoot: float, name: str) -> bool:
isVerified = False
try:
if abs(self.f(potentialRoot)) <= const.maxError:
self.opLog.append(name + ' verified\n')
isVerified = True
else:
self.opLog.append(name + ' converged to incorrect value\n')
except ValueError:
self.opLog.append(name + ' returned an undefined value\n')
except TypeError:
if self.root is None:
self.opLog.append('root not updated in meta data\n')
return isVerified
'''
Employ Newton's Method of numerically converging on a root using
a function's derivative:
X(i+1) = X(i) - f(X(i))/g(X(i))
Where f is the target function, g is that function's derivative, and i
is the current iteration.
'''
def newtonRaphson(self) -> float:
count = 0 # Count Iterations
root = self.firstGuess # Store the result
actualError = 10 * const.maxError # Error when comparing iteratons
deltax = 1e-10 # Step size for secant line
while actualError > const.maxError and count < const.maxIterations:
# Store the last root.
lastRoot = root
# Find the slope of the line secant to the curve at this point by using
# numerical differentiation. If the parameter fPrime (the functions derivative)
# was provided, then use that to find the tangent line.
try:
if self.fPrime is None:
# Secant Line
slope = (self.f(lastRoot + deltax) - self.f(lastRoot))/deltax
else:
# Tangent Line
slope = self.fPrime(lastRoot)
# Find the new root and the error from the last root
root = lastRoot - self.f(lastRoot)/slope
actualError = abs((lastRoot - root)/root)
# Special case for roots that are close to zero because the method used
# to find the actual error diverges for small root values
if abs(root) < const.maxError:
break
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
count += 1
self.opLog.append('converged at ' + str(root))
return root
'''
Starting with two initial guesses, conduct a binary search.
'''
def bisection(self) -> float:
# Do not execute without a second guess
if not self.isBracketed:
self.opLog.append('closed method not attempted')
return None
count = 0 # Count Iterations
upperBound = self.secondGuess # Start Here
lowerBound = self.firstGuess # End Here
while count < const.maxIterations:
# Assume the root is the average of the two bounds.
root = (upperBound + lowerBound)/2
# Check the sign of the new root.
try:
signCheck = self.f(root) * self.f(lowerBound)
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
# Check where this value lands. If the root is not found, check the sign of the
| if abs(self.f(root)) < const.maxError:
break
else:
if signCheck > 0:
lowerBound = root
if signCheck < 0:
upperBound = root
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
count += 1
self.opLog.append('converged at ' + str(root))
return root
'''
Transform the root equation into iterator form:
0 = f(x) --> x = g(x)
Then iterate with the transformed equation:
X(i+1) = g(X(i))
'''
def onePointIteration(self) -> float:
count = 0 # Count Iterations
root = self.firstGuess # Store the result
actualError = 10 * const.maxError # Error when comparing iteratons
while actualError > const.maxError and count < const.maxIterations:
# Store the last root.
lastRoot = root
# Add the old estimate to each side of the function to find a new estimate.
try:
root = self.f(lastRoot) + lastRoot
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
except OverflowError:
self.opLog.append('overflow')
return None
actualError = abs((lastRoot - root)/root)
count += 1
self.opLog.append('One Point Iteration Converged at ' + str(root))
return root
'''
Return a string representation of the object.
'''
def toString(self) -> str:
thisStr = ''
thisStr += 'Function: ' + self.f.__name__ + '\n'
thisStr += 'Root: ' + str(self.root) + '\n'
thisStr += 'Operation Log: \n'
for item in self.opLog:
thisStr += '\t' + item + '\n'
thisStr += 'Root Log: \n'
for (key, value) in self.rootLog.items():
thisStr += '\t' + key + ': ' + str(value) + '\n'
return thisStr | # estimated root to decide which bound to discard.
try:
| random_line_split |
RealSolver.py | '''
Kaden Archibald
Created: Oct 11, 2018
Revised: Jul 13, 2019
Version: IPython 7.2.0 (Anaconda distribution) with Python 3.7.1
Module for numerically solving arbitrary real-valued
functions in a single vairable.
'''
import const
#import data
''' Driver function to hide object intialization from end programmer. '''
def realSolver(function, firstGuess, secondGuess = None, fPrime = None):
temp = RealSolver(function, firstGuess, secondGuess, fPrime)
temp.findRoot()
return temp.root
class RealSolver:
'''
Create an object that contains a function to be solved, one (or two) initial
guesses, a function for the derivative, auxiliary precalculated arguements for a function,
and data about the root solving problem itself: a maximum error, a maximum number of
iterations, and a string array to log major operations.
'''
def __init__(self, function, firstGuess, secondGuess = None, fPrime = None):
# General Data
self.f = function # Function to be solved numerically
self.firstGuess = firstGuess # First initial estimate of the root
self.secondGuess = secondGuess # Second initial guess of the root
if not secondGuess is None:
self.isBracketed = True # True if initialized with two estimates...
else:
self.isBracketed = False # ...False otherwise
self.fPrime = fPrime # First derivative of the function
# Meta Data
self.root = None # Store the final result
self.isCorrect = False # True if the root if verified to be correct
self.opLog = [] # Log major operations of the algorithm
# Begin the Root Finding Procedure
self.realMethods = [self.newtonRaphson, self.onePointIteration, self.bisection]
self.opLog.append('Initialization Succesful\n')
'''
Apply the first root finding algorithm and then reevaluate the function at the
returned root. If the functions returns (close enough to) zero, terminate the
loop and return that value.
If the algorithm diverges, or if the algorithm returns a value that is not
the root, continue to the next algorithm.
For general functions, algorithms such as Newton's Method and the Bisection Method
usually converge, so all alogrithms failing will usually mean that the
function has no real roots.
'''
def findRoot(self, methodList = []) -> None:
# Every algorithm will return an estimate of the root.
# Keep a dictionary of which method returned which estimate.
self.rootLog = {}
# Find the root from every method
if not methodList:
methodList = self.realMethods
for method in methodList:
name = method.__name__
self.opLog.append(name + ' started')
potentialRoot = method()
self.rootLog[name] = potentialRoot
if not potentialRoot is None:
if self.verifyRoot(potentialRoot, name):
self.root = potentialRoot
#break
else:
self.opLog.append(name + ' halted\n')
#data.writeStr(self.opLog)
'''
Verify that a root really is zero before terminating. This method ensures that no number
that is not actually a root will ever be returned. This is valuable becuase some
algorithms can converge to incorrect values, especially if the initial guess was an optima
or endpoint of a funciton.
'''
def verifyRoot(self, potentialRoot: float, name: str) -> bool:
|
'''
Employ Newton's Method of numerically converging on a root using
a function's derivative:
X(i+1) = X(i) - f(X(i))/g(X(i))
Where f is the target function, g is that function's derivative, and i
is the current iteration.
'''
def newtonRaphson(self) -> float:
count = 0 # Count Iterations
root = self.firstGuess # Store the result
actualError = 10 * const.maxError # Error when comparing iteratons
deltax = 1e-10 # Step size for secant line
while actualError > const.maxError and count < const.maxIterations:
# Store the last root.
lastRoot = root
# Find the slope of the line secant to the curve at this point by using
# numerical differentiation. If the parameter fPrime (the functions derivative)
# was provided, then use that to find the tangent line.
try:
if self.fPrime is None:
# Secant Line
slope = (self.f(lastRoot + deltax) - self.f(lastRoot))/deltax
else:
# Tangent Line
slope = self.fPrime(lastRoot)
# Find the new root and the error from the last root
root = lastRoot - self.f(lastRoot)/slope
actualError = abs((lastRoot - root)/root)
# Special case for roots that are close to zero because the method used
# to find the actual error diverges for small root values
if abs(root) < const.maxError:
break
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
count += 1
self.opLog.append('converged at ' + str(root))
return root
'''
Starting with two initial guesses, conduct a binary search.
'''
def bisection(self) -> float:
# Do not execute without a second guess
if not self.isBracketed:
self.opLog.append('closed method not attempted')
return None
count = 0 # Count Iterations
upperBound = self.secondGuess # Start Here
lowerBound = self.firstGuess # End Here
while count < const.maxIterations:
# Assume the root is the average of the two bounds.
root = (upperBound + lowerBound)/2
# Check the sign of the new root.
try:
signCheck = self.f(root) * self.f(lowerBound)
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
# Check where this value lands. If the root is not found, check the sign of the
# estimated root to decide which bound to discard.
try:
if abs(self.f(root)) < const.maxError:
break
else:
if signCheck > 0:
lowerBound = root
if signCheck < 0:
upperBound = root
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
count += 1
self.opLog.append('converged at ' + str(root))
return root
'''
Transform the root equation into iterator form:
0 = f(x) --> x = g(x)
Then iterate with the transformed equation:
X(i+1) = g(X(i))
'''
def onePointIteration(self) -> float:
count = 0 # Count Iterations
root = self.firstGuess # Store the result
actualError = 10 * const.maxError # Error when comparing iteratons
while actualError > const.maxError and count < const.maxIterations:
# Store the last root.
lastRoot = root
# Add the old estimate to each side of the function to find a new estimate.
try:
root = self.f(lastRoot) + lastRoot
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
except OverflowError:
self.opLog.append('overflow')
return None
actualError = abs((lastRoot - root)/root)
count += 1
self.opLog.append('One Point Iteration Converged at ' + str(root))
return root
'''
Return a string representation of the object.
'''
def toString(self) -> str:
thisStr = ''
thisStr += 'Function: ' + self.f.__name__ + '\n'
thisStr += 'Root: ' + str(self.root) + '\n'
thisStr += 'Operation Log: \n'
for item in self.opLog:
thisStr += '\t' + item + '\n'
thisStr += 'Root Log: \n'
for (key, value) in self.rootLog.items():
thisStr += '\t' + key + ': ' + str(value) + '\n'
return thisStr
| isVerified = False
try:
if abs(self.f(potentialRoot)) <= const.maxError:
self.opLog.append(name + ' verified\n')
isVerified = True
else:
self.opLog.append(name + ' converged to incorrect value\n')
except ValueError:
self.opLog.append(name + ' returned an undefined value\n')
except TypeError:
if self.root is None:
self.opLog.append('root not updated in meta data\n')
return isVerified | identifier_body |
RealSolver.py | '''
Kaden Archibald
Created: Oct 11, 2018
Revised: Jul 13, 2019
Version: IPython 7.2.0 (Anaconda distribution) with Python 3.7.1
Module for numerically solving arbitrary real-valued
functions in a single vairable.
'''
import const
#import data
''' Driver function to hide object intialization from end programmer. '''
def realSolver(function, firstGuess, secondGuess = None, fPrime = None):
temp = RealSolver(function, firstGuess, secondGuess, fPrime)
temp.findRoot()
return temp.root
class | :
'''
Create an object that contains a function to be solved, one (or two) initial
guesses, a function for the derivative, auxiliary precalculated arguements for a function,
and data about the root solving problem itself: a maximum error, a maximum number of
iterations, and a string array to log major operations.
'''
def __init__(self, function, firstGuess, secondGuess = None, fPrime = None):
# General Data
self.f = function # Function to be solved numerically
self.firstGuess = firstGuess # First initial estimate of the root
self.secondGuess = secondGuess # Second initial guess of the root
if not secondGuess is None:
self.isBracketed = True # True if initialized with two estimates...
else:
self.isBracketed = False # ...False otherwise
self.fPrime = fPrime # First derivative of the function
# Meta Data
self.root = None # Store the final result
self.isCorrect = False # True if the root if verified to be correct
self.opLog = [] # Log major operations of the algorithm
# Begin the Root Finding Procedure
self.realMethods = [self.newtonRaphson, self.onePointIteration, self.bisection]
self.opLog.append('Initialization Succesful\n')
'''
Apply the first root finding algorithm and then reevaluate the function at the
returned root. If the functions returns (close enough to) zero, terminate the
loop and return that value.
If the algorithm diverges, or if the algorithm returns a value that is not
the root, continue to the next algorithm.
For general functions, algorithms such as Newton's Method and the Bisection Method
usually converge, so all alogrithms failing will usually mean that the
function has no real roots.
'''
def findRoot(self, methodList = []) -> None:
# Every algorithm will return an estimate of the root.
# Keep a dictionary of which method returned which estimate.
self.rootLog = {}
# Find the root from every method
if not methodList:
methodList = self.realMethods
for method in methodList:
name = method.__name__
self.opLog.append(name + ' started')
potentialRoot = method()
self.rootLog[name] = potentialRoot
if not potentialRoot is None:
if self.verifyRoot(potentialRoot, name):
self.root = potentialRoot
#break
else:
self.opLog.append(name + ' halted\n')
#data.writeStr(self.opLog)
'''
Verify that a root really is zero before terminating. This method ensures that no number
that is not actually a root will ever be returned. This is valuable becuase some
algorithms can converge to incorrect values, especially if the initial guess was an optima
or endpoint of a funciton.
'''
def verifyRoot(self, potentialRoot: float, name: str) -> bool:
isVerified = False
try:
if abs(self.f(potentialRoot)) <= const.maxError:
self.opLog.append(name + ' verified\n')
isVerified = True
else:
self.opLog.append(name + ' converged to incorrect value\n')
except ValueError:
self.opLog.append(name + ' returned an undefined value\n')
except TypeError:
if self.root is None:
self.opLog.append('root not updated in meta data\n')
return isVerified
'''
Employ Newton's Method of numerically converging on a root using
a function's derivative:
X(i+1) = X(i) - f(X(i))/g(X(i))
Where f is the target function, g is that function's derivative, and i
is the current iteration.
'''
def newtonRaphson(self) -> float:
count = 0 # Count Iterations
root = self.firstGuess # Store the result
actualError = 10 * const.maxError # Error when comparing iteratons
deltax = 1e-10 # Step size for secant line
while actualError > const.maxError and count < const.maxIterations:
# Store the last root.
lastRoot = root
# Find the slope of the line secant to the curve at this point by using
# numerical differentiation. If the parameter fPrime (the functions derivative)
# was provided, then use that to find the tangent line.
try:
if self.fPrime is None:
# Secant Line
slope = (self.f(lastRoot + deltax) - self.f(lastRoot))/deltax
else:
# Tangent Line
slope = self.fPrime(lastRoot)
# Find the new root and the error from the last root
root = lastRoot - self.f(lastRoot)/slope
actualError = abs((lastRoot - root)/root)
# Special case for roots that are close to zero because the method used
# to find the actual error diverges for small root values
if abs(root) < const.maxError:
break
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
count += 1
self.opLog.append('converged at ' + str(root))
return root
'''
Starting with two initial guesses, conduct a binary search.
'''
def bisection(self) -> float:
# Do not execute without a second guess
if not self.isBracketed:
self.opLog.append('closed method not attempted')
return None
count = 0 # Count Iterations
upperBound = self.secondGuess # Start Here
lowerBound = self.firstGuess # End Here
while count < const.maxIterations:
# Assume the root is the average of the two bounds.
root = (upperBound + lowerBound)/2
# Check the sign of the new root.
try:
signCheck = self.f(root) * self.f(lowerBound)
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
# Check where this value lands. If the root is not found, check the sign of the
# estimated root to decide which bound to discard.
try:
if abs(self.f(root)) < const.maxError:
break
else:
if signCheck > 0:
lowerBound = root
if signCheck < 0:
upperBound = root
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
count += 1
self.opLog.append('converged at ' + str(root))
return root
'''
Transform the root equation into iterator form:
0 = f(x) --> x = g(x)
Then iterate with the transformed equation:
X(i+1) = g(X(i))
'''
def onePointIteration(self) -> float:
count = 0 # Count Iterations
root = self.firstGuess # Store the result
actualError = 10 * const.maxError # Error when comparing iteratons
while actualError > const.maxError and count < const.maxIterations:
# Store the last root.
lastRoot = root
# Add the old estimate to each side of the function to find a new estimate.
try:
root = self.f(lastRoot) + lastRoot
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
except OverflowError:
self.opLog.append('overflow')
return None
actualError = abs((lastRoot - root)/root)
count += 1
self.opLog.append('One Point Iteration Converged at ' + str(root))
return root
'''
Return a string representation of the object.
'''
def toString(self) -> str:
thisStr = ''
thisStr += 'Function: ' + self.f.__name__ + '\n'
thisStr += 'Root: ' + str(self.root) + '\n'
thisStr += 'Operation Log: \n'
for item in self.opLog:
thisStr += '\t' + item + '\n'
thisStr += 'Root Log: \n'
for (key, value) in self.rootLog.items():
thisStr += '\t' + key + ': ' + str(value) + '\n'
return thisStr
| RealSolver | identifier_name |
RealSolver.py | '''
Kaden Archibald
Created: Oct 11, 2018
Revised: Jul 13, 2019
Version: IPython 7.2.0 (Anaconda distribution) with Python 3.7.1
Module for numerically solving arbitrary real-valued
functions in a single vairable.
'''
import const
#import data
''' Driver function to hide object intialization from end programmer. '''
def realSolver(function, firstGuess, secondGuess = None, fPrime = None):
temp = RealSolver(function, firstGuess, secondGuess, fPrime)
temp.findRoot()
return temp.root
class RealSolver:
'''
Create an object that contains a function to be solved, one (or two) initial
guesses, a function for the derivative, auxiliary precalculated arguements for a function,
and data about the root solving problem itself: a maximum error, a maximum number of
iterations, and a string array to log major operations.
'''
def __init__(self, function, firstGuess, secondGuess = None, fPrime = None):
# General Data
self.f = function # Function to be solved numerically
self.firstGuess = firstGuess # First initial estimate of the root
self.secondGuess = secondGuess # Second initial guess of the root
if not secondGuess is None:
self.isBracketed = True # True if initialized with two estimates...
else:
self.isBracketed = False # ...False otherwise
self.fPrime = fPrime # First derivative of the function
# Meta Data
self.root = None # Store the final result
self.isCorrect = False # True if the root if verified to be correct
self.opLog = [] # Log major operations of the algorithm
# Begin the Root Finding Procedure
self.realMethods = [self.newtonRaphson, self.onePointIteration, self.bisection]
self.opLog.append('Initialization Succesful\n')
'''
Apply the first root finding algorithm and then reevaluate the function at the
returned root. If the functions returns (close enough to) zero, terminate the
loop and return that value.
If the algorithm diverges, or if the algorithm returns a value that is not
the root, continue to the next algorithm.
For general functions, algorithms such as Newton's Method and the Bisection Method
usually converge, so all alogrithms failing will usually mean that the
function has no real roots.
'''
def findRoot(self, methodList = []) -> None:
# Every algorithm will return an estimate of the root.
# Keep a dictionary of which method returned which estimate.
self.rootLog = {}
# Find the root from every method
if not methodList:
methodList = self.realMethods
for method in methodList:
name = method.__name__
self.opLog.append(name + ' started')
potentialRoot = method()
self.rootLog[name] = potentialRoot
if not potentialRoot is None:
if self.verifyRoot(potentialRoot, name):
self.root = potentialRoot
#break
else:
self.opLog.append(name + ' halted\n')
#data.writeStr(self.opLog)
'''
Verify that a root really is zero before terminating. This method ensures that no number
that is not actually a root will ever be returned. This is valuable becuase some
algorithms can converge to incorrect values, especially if the initial guess was an optima
or endpoint of a funciton.
'''
def verifyRoot(self, potentialRoot: float, name: str) -> bool:
isVerified = False
try:
if abs(self.f(potentialRoot)) <= const.maxError:
self.opLog.append(name + ' verified\n')
isVerified = True
else:
|
except ValueError:
self.opLog.append(name + ' returned an undefined value\n')
except TypeError:
if self.root is None:
self.opLog.append('root not updated in meta data\n')
return isVerified
'''
Employ Newton's Method of numerically converging on a root using
a function's derivative:
X(i+1) = X(i) - f(X(i))/g(X(i))
Where f is the target function, g is that function's derivative, and i
is the current iteration.
'''
def newtonRaphson(self) -> float:
count = 0 # Count Iterations
root = self.firstGuess # Store the result
actualError = 10 * const.maxError # Error when comparing iteratons
deltax = 1e-10 # Step size for secant line
while actualError > const.maxError and count < const.maxIterations:
# Store the last root.
lastRoot = root
# Find the slope of the line secant to the curve at this point by using
# numerical differentiation. If the parameter fPrime (the functions derivative)
# was provided, then use that to find the tangent line.
try:
if self.fPrime is None:
# Secant Line
slope = (self.f(lastRoot + deltax) - self.f(lastRoot))/deltax
else:
# Tangent Line
slope = self.fPrime(lastRoot)
# Find the new root and the error from the last root
root = lastRoot - self.f(lastRoot)/slope
actualError = abs((lastRoot - root)/root)
# Special case for roots that are close to zero because the method used
# to find the actual error diverges for small root values
if abs(root) < const.maxError:
break
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
count += 1
self.opLog.append('converged at ' + str(root))
return root
'''
Starting with two initial guesses, conduct a binary search.
'''
def bisection(self) -> float:
# Do not execute without a second guess
if not self.isBracketed:
self.opLog.append('closed method not attempted')
return None
count = 0 # Count Iterations
upperBound = self.secondGuess # Start Here
lowerBound = self.firstGuess # End Here
while count < const.maxIterations:
# Assume the root is the average of the two bounds.
root = (upperBound + lowerBound)/2
# Check the sign of the new root.
try:
signCheck = self.f(root) * self.f(lowerBound)
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
# Check where this value lands. If the root is not found, check the sign of the
# estimated root to decide which bound to discard.
try:
if abs(self.f(root)) < const.maxError:
break
else:
if signCheck > 0:
lowerBound = root
if signCheck < 0:
upperBound = root
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
count += 1
self.opLog.append('converged at ' + str(root))
return root
'''
Transform the root equation into iterator form:
0 = f(x) --> x = g(x)
Then iterate with the transformed equation:
X(i+1) = g(X(i))
'''
def onePointIteration(self) -> float:
count = 0 # Count Iterations
root = self.firstGuess # Store the result
actualError = 10 * const.maxError # Error when comparing iteratons
while actualError > const.maxError and count < const.maxIterations:
# Store the last root.
lastRoot = root
# Add the old estimate to each side of the function to find a new estimate.
try:
root = self.f(lastRoot) + lastRoot
except (ValueError, ZeroDivisionError):
self.opLog.append('invalid function evaluation')
return None
except OverflowError:
self.opLog.append('overflow')
return None
actualError = abs((lastRoot - root)/root)
count += 1
self.opLog.append('One Point Iteration Converged at ' + str(root))
return root
'''
Return a string representation of the object.
'''
def toString(self) -> str:
thisStr = ''
thisStr += 'Function: ' + self.f.__name__ + '\n'
thisStr += 'Root: ' + str(self.root) + '\n'
thisStr += 'Operation Log: \n'
for item in self.opLog:
thisStr += '\t' + item + '\n'
thisStr += 'Root Log: \n'
for (key, value) in self.rootLog.items():
thisStr += '\t' + key + ': ' + str(value) + '\n'
return thisStr
| self.opLog.append(name + ' converged to incorrect value\n') | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.